@@ -505,6 +505,26 @@ func.func @test_conv_with_strides_padding(%arg0: !torch.vtensor<[1,1,7,5],f32>,
505505 return %0 : !torch.vtensor <[1 ,1 ,4 ,3 ],f32 >
506506}
507507
508+ // CHECK-LABEL: @test_conv_with_bias_strides_padding
509+ func.func @test_conv_with_bias_strides_padding (%arg0: !torch.vtensor <[?,?,224 ,224 ],f32 >, %arg1: !torch.vtensor <[64 ,3 ,7 ,7 ],f32 >, %arg2: !torch.vtensor <[64 ],f32 >) -> !torch.vtensor <[?,64 ,112 ,112 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
510+ // CHECK: %[[C3:.*]] = torch.constant.int 3
511+ // CHECK: %[[C3_0:.*]] = torch.constant.int 3
512+ // CHECK: %[[C1:.*]] = torch.constant.int 1
513+ // CHECK: %[[C1_0:.*]] = torch.constant.int 1
514+ // CHECK: %[[C2:.*]] = torch.constant.int 2
515+ // CHECK: %[[C2_0:.*]] = torch.constant.int 2
516+ // CHECK: %[[C0:.*]] = torch.constant.int 0
517+ // CHECK: %[[PADDING:.*]] = torch.prim.ListConstruct %[[C3]], %[[C3_0]] : (!torch.int, !torch.int) -> !torch.list<int>
518+ // CHECK: %[[DILATIONS:.*]] = torch.prim.ListConstruct %[[C1]], %[[C1_0]] : (!torch.int, !torch.int) -> !torch.list<int>
519+ // CHECK: %[[STRIDE:.*]] = torch.prim.ListConstruct %[[C2]], %[[C2_0]] : (!torch.int, !torch.int) -> !torch.list<int>
520+ // CHECK: %[[OUTPUT_PADDING:.*]] = torch.prim.ListConstruct %[[C0]], %[[C0]] : (!torch.int, !torch.int) -> !torch.list<int>
521+ // CHECK: %[[TRANSPOSED:.*]] = torch.constant.bool false
522+ // CHECK: %[[GROUPS:.*]] = torch.constant.int 1
523+ // CHECK: torch.aten.convolution %arg0, %arg1, %arg2, %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[?,?,224,224],f32>, !torch.vtensor<[64,3,7,7],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[?,64,112,112],f32>
524+ %0 = torch.operator " onnx.Conv" (%arg0 , %arg1 , %arg2 ) {torch.onnx.dilations = [1 : si64 , 1 : si64 ], torch.onnx.group = 1 : si64 , torch.onnx.kernel_shape = [7 : si64 , 7 : si64 ], torch.onnx.pads = [3 : si64 , 3 : si64 , 3 : si64 , 3 : si64 ], torch.onnx.strides = [2 : si64 , 2 : si64 ]} : (!torch.vtensor <[?,?,224 ,224 ],f32 >, !torch.vtensor <[64 ,3 ,7 ,7 ],f32 >, !torch.vtensor <[64 ],f32 >) -> !torch.vtensor <[?,64 ,112 ,112 ],f32 >
525+ return %0 : !torch.vtensor <[?,64 ,112 ,112 ],f32 >
526+ }
527+
508528// CHECK-LABEL: @test_convtranspose_dilations
509529func.func @test_convtranspose_dilations (%arg0: !torch.vtensor <[1 ,1 ,3 ,3 ],f32 >, %arg1: !torch.vtensor <[1 ,1 ,2 ,2 ],f32 >) -> !torch.vtensor <[1 ,1 ,5 ,5 ],f32 > attributes {torch.onnx_meta.ir_version = 6 : si64 , torch.onnx_meta.opset_version = 11 : si64 , torch.onnx_meta.producer_name = " backend-test" , torch.onnx_meta.producer_version = " " } {
510530 // CHECK: %[[C0:.*]] = torch.constant.int 0
0 commit comments