Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 29 additions & 16 deletions lib/Conversion/TorchToArith/TorchToArith.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,12 +134,13 @@ class ConvertAtenUnaryOpToFloatMathOp : public OpConversionPattern<AtenOp> {
} // namespace

namespace {
class ConvertAtenDivIntOp : public OpConversionPattern<AtenDivIntOp> {
template <typename AtenOp>
class ConvertAtenDivOp : public OpConversionPattern<AtenOp> {
public:
using OpConversionPattern<AtenDivIntOp>::OpConversionPattern;
using OpConversionPattern<AtenOp>::OpConversionPattern;
using OpAdaptor = typename AtenOp::Adaptor;
LogicalResult
matchAndRewrite(AtenDivIntOp op,
typename OpConversionPattern<AtenDivIntOp>::OpAdaptor adaptor,
matchAndRewrite(AtenOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Value a = convertScalarToDtype(rewriter, loc, adaptor.getA(),
Expand Down Expand Up @@ -306,11 +307,13 @@ class ConvertAtenScalarArithOp : public OpConversionPattern<AtenOp> {
} // namespace

namespace {
class ConvertAtenAddOp : public OpConversionPattern<AtenAddOp> {
template <typename AtenOp, typename ArithFOp, typename ArithIOp>
class ConvertAtenBinaryScalarOp : public OpConversionPattern<AtenOp> {
public:
using OpConversionPattern::OpConversionPattern;
using OpConversionPattern<AtenOp>::OpConversionPattern;
using OpAdaptor = typename AtenOp::Adaptor;
LogicalResult
matchAndRewrite(AtenAddOp op, OpAdaptor adaptor,
matchAndRewrite(AtenOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Location loc = op.getLoc();
Type resultType =
Expand All @@ -320,9 +323,9 @@ class ConvertAtenAddOp : public OpConversionPattern<AtenAddOp> {
Value operandB =
convertScalarToDtype(rewriter, loc, adaptor.getB(), resultType);
if (isa<mlir::FloatType>(resultType)) {
rewriter.replaceOpWithNewOp<arith::AddFOp>(op, operandA, operandB);
rewriter.replaceOpWithNewOp<ArithFOp>(op, operandA, operandB);
} else if (isa<mlir::IntegerType>(resultType)) {
rewriter.replaceOpWithNewOp<arith::AddIOp>(op, operandA, operandB);
rewriter.replaceOpWithNewOp<ArithIOp>(op, operandA, operandB);
} else {
return rewriter.notifyMatchFailure(
op, "unimplemented: only support integer or float result type");
Expand Down Expand Up @@ -497,8 +500,17 @@ class ConvertTorchToArith
patterns.add<ConvertAtenCastOp<AtenFloatScalarOp>>(typeConverter, context);
patterns.add<ConvertAtenCastOp<AtenIntScalarOp>>(typeConverter, context);

target.addIllegalOp<AtenAddOp>();
patterns.add<ConvertAtenAddOp>(typeConverter, context);
target.addIllegalOp<AtenAddOp, AtenSubOp, AtenMulOp>();
patterns.add<
ConvertAtenBinaryScalarOp<AtenAddOp, arith::AddFOp, arith::AddIOp>>(
typeConverter, context);
patterns.add<
ConvertAtenBinaryScalarOp<AtenSubOp, arith::SubFOp, arith::SubIOp>>(
typeConverter, context);
patterns.add<
ConvertAtenBinaryScalarOp<AtenMulOp, arith::MulFOp, arith::MulIOp>>(
typeConverter, context);

target.addIllegalOp<AtenNegIntOp>();
patterns.add<ConvertAtenNegIntOp>(typeConverter, context);
target.addIllegalOp<AtenAddIntOp, AtenAddFloatIntOp, AtenSubIntOp,
Expand All @@ -523,11 +535,12 @@ class ConvertTorchToArith
typeConverter, context);
patterns.add<ConvertAtenBinaryOp<AtenMulFloatOp, arith::MulFOp>>(
typeConverter, context);
target.addIllegalOp<AtenDivIntOp>();
patterns.add<ConvertAtenDivIntOp>(typeConverter, context);
target.addIllegalOp<AtenDivFloatOp>();
patterns.add<ConvertAtenBinaryOp<AtenDivFloatOp, arith::DivFOp>>(
typeConverter, context);

target.addIllegalOp<AtenDivOp, AtenDivIntOp, AtenDivFloatOp>();
patterns.add<ConvertAtenDivOp<AtenDivOp>>(typeConverter, context);
patterns.add<ConvertAtenDivOp<AtenDivIntOp>>(typeConverter, context);
patterns.add<ConvertAtenDivOp<AtenDivFloatOp>>(typeConverter, context);

target.addIllegalOp<AtenFloordivIntOp>();
patterns.add<ConvertAtenBinaryOp<AtenFloordivIntOp, arith::FloorDivSIOp>>(
typeConverter, context);
Expand Down
59 changes: 57 additions & 2 deletions test/Conversion/TorchToArith/basic.mlir
Original file line number Diff line number Diff line change
Expand Up @@ -269,8 +269,8 @@ func.func @torch.aten.mul.float_int(%arg0: !torch.float, %arg1: !torch.int) -> !
// CHECK-SAME: %[[RHS:.*]]: !torch.float) -> !torch.float {
// CHECK-DAG: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]]
// CHECK-DAG: %[[RHS_F64:.*]] = torch_c.to_f64 %[[RHS]]
// CHECK: %[[SUB:.*]] = arith.divf %[[LHS_F64:.*]], [[RHS_F64:.*]] : f64
// CHECK: %[[OUT:.*]] = torch_c.from_f64 %[[SUB:.*]]
// CHECK: %[[DIV:.*]] = arith.divf %[[LHS_F64:.*]], [[RHS_F64:.*]] : f64
// CHECK: %[[OUT:.*]] = torch_c.from_f64 %[[DIV:.*]]
// CHECK: return %[[OUT:.*]] : !torch.float
func.func @torch.aten.div.float(%arg0: !torch.float, %arg1: !torch.float) -> !torch.float {
%0 = torch.aten.div.float %arg0, %arg1 : !torch.float, !torch.float -> !torch.float
Expand Down Expand Up @@ -407,3 +407,58 @@ func.func @torch.aten.Int.Scalar(%arg0: !torch.float) -> !torch.int {
%0 = torch.aten.Int.Scalar %arg0 : !torch.float -> !torch.int
return %0 : !torch.int
}

// CHECK-LABEL: func.func @torch.aten.add(
// CHECK-SAME: %[[LHS:.*]]: !torch.float,
// CHECK-SAME: %[[RHS:.*]]: !torch.float) -> !torch.int {
// CHECK-DAG: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]]
// CHECK-DAG: %[[RHS_F64:.*]] = torch_c.to_f64 %[[RHS]]
// CHECK: %[[LHS_FPTOSI:.*]] = arith.fptosi %[[LHS_F64]] : f64 to i64
// CHECK: %[[RHS_FPTOSI:.*]] = arith.fptosi %[[RHS_F64]] : f64 to i64
// CHECK: %[[ADD:.*]] = arith.addi %[[LHS_FPTOSI]], %[[RHS_FPTOSI]] : i64
// CHECK: %[[OUT:.*]] = torch_c.from_i64 %[[ADD]]
// CHECK: return %[[OUT]] : !torch.int
func.func @torch.aten.add(%arg0: !torch.float, %arg1: !torch.float) -> !torch.int {
%0 = torch.aten.add %arg0, %arg1 : !torch.float, !torch.float -> !torch.int
return %0 : !torch.int
}

// CHECK-LABEL: func.func @torch.aten.sub(
// CHECK-SAME: %[[LHS:.*]]: !torch.float,
// CHECK-SAME: %[[RHS:.*]]: !torch.float) -> !torch.float {
// CHECK-DAG: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]]
// CHECK-DAG: %[[RHS_F64:.*]] = torch_c.to_f64 %[[RHS]]
// CHECK: %[[SUB:.*]] = arith.subf %[[LHS_F64]], %[[RHS_F64]] : f64
// CHECK: %[[OUT:.*]] = torch_c.from_f64 %[[SUB]]
// CHECK: return %[[OUT]] : !torch.float
func.func @torch.aten.sub(%arg0: !torch.float, %arg1: !torch.float) -> !torch.float {
%0 = torch.aten.sub %arg0, %arg1 : !torch.float, !torch.float -> !torch.float
return %0 : !torch.float
}

// CHECK-LABEL: func.func @torch.aten.mul(
// CHECK-SAME: %[[LHS:.*]]: !torch.int,
// CHECK-SAME: %[[RHS:.*]]: !torch.float) -> !torch.float {
// CHECK-DAG: %[[LHS_I64:.*]] = torch_c.to_i64 %[[LHS]]
// CHECK-DAG: %[[RHS_F64:.*]] = torch_c.to_f64 %[[RHS]]
// CHECK: %[[LHS_F64:.*]] = arith.sitofp %[[LHS_I64]] : i64 to f64
// CHECK: %[[MUL:.*]] = arith.mulf %[[LHS_F64]], %[[RHS_F64]] : f64
// CHECK: %[[OUT:.*]] = torch_c.from_f64 %[[MUL]]
// CHECK: return %[[OUT]] : !torch.float
func.func @torch.aten.mul(%arg0: !torch.int, %arg1: !torch.float) -> !torch.float {
%0 = torch.aten.mul %arg0, %arg1 : !torch.int, !torch.float -> !torch.float
return %0 : !torch.float
}

// CHECK-LABEL: func.func @torch.aten.div(
// CHECK-SAME: %[[LHS:.*]]: !torch.float,
// CHECK-SAME: %[[RHS:.*]]: !torch.float) -> !torch.float {
// CHECK-DAG: %[[LHS_F64:.*]] = torch_c.to_f64 %[[LHS]]
// CHECK-DAG: %[[RHS_F64:.*]] = torch_c.to_f64 %[[RHS]]
// CHECK: %[[DIV:.*]] = arith.divf %[[LHS_F64]], %[[RHS_F64]] : f64
// CHECK: %[[OUT:.*]] = torch_c.from_f64 %[[DIV]]
// CHECK: return %[[OUT]] : !torch.float
func.func @torch.aten.div(%arg0: !torch.float, %arg1: !torch.float) -> !torch.float {
%0 = torch.aten.div %arg0, %arg1 : !torch.float, !torch.float -> !torch.float
return %0 : !torch.float
}
Loading