diff --git a/e2e_testing/torchscript/xfail_sets.py b/e2e_testing/torchscript/xfail_sets.py index f26b061ca..ddff294ec 100644 --- a/e2e_testing/torchscript/xfail_sets.py +++ b/e2e_testing/torchscript/xfail_sets.py @@ -28,7 +28,6 @@ TOSA_PASS_SET = { "ElementwiseBinaryModule_basic", "ElementwiseSigmoidModule_basic", "ElementwiseExpModule_basic", - "ElementwiseReluModule_basic", "ElementwiseFloorModule_basic", "ElementwiseLogModule_basic", "ElementwiseBinaryStaticShapeModule_basic", @@ -103,7 +102,6 @@ TOSA_PASS_SET = { "ElementwiseFlattenBroadcastModule_basic", "SquareModule_basic", "MaxPool2dStaticModule_basic", - "ResNet18StaticModule_basic", "NativeLayerNormModule4D_basic", "LayerNormNormalizeOverAllDimsModule_basic", "PermuteModule_basic", diff --git a/externals/llvm-external-projects/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp b/externals/llvm-external-projects/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp index 0fa16f1df..f10c59e5b 100644 --- a/externals/llvm-external-projects/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp +++ b/externals/llvm-external-projects/torch-mlir-dialects/lib/Dialect/TMTensor/IR/TMTensorOps.cpp @@ -516,7 +516,7 @@ struct FoldTensorCastOp : public OpInterfaceRewritePattern { for (OpOperand *opOperand : op.getInputOperands()) { auto tensorCastOp = opOperand->get().getDefiningOp(); newOperands.push_back(canFoldIntoConsumerOp(tensorCastOp) - ? tensorCastOp.source() + ? tensorCastOp.getSource() : opOperand->get()); } // Init tensors may fold, in which case the resultType must also change. diff --git a/externals/llvm-project b/externals/llvm-project index 061e0189a..2dde4ba63 160000 --- a/externals/llvm-project +++ b/externals/llvm-project @@ -1 +1 @@ -Subproject commit 061e0189a3dab6b1831a80d489ff1b15ad93aafb +Subproject commit 2dde4ba63974daf59f8ce5c346505f194f920131 diff --git a/externals/mlir-hlo b/externals/mlir-hlo index 0430519b7..9c49473d8 160000 --- a/externals/mlir-hlo +++ b/externals/mlir-hlo @@ -1 +1 @@ -Subproject commit 0430519b7ebf11a3f44c469fce8b579561fa6052 +Subproject commit 9c49473d80a8667e94232ddb5ed60a1a9d8ad266 diff --git a/lib/Conversion/TorchToLinalg/DataMovement.cpp b/lib/Conversion/TorchToLinalg/DataMovement.cpp index 6de17c254..5283774f0 100644 --- a/lib/Conversion/TorchToLinalg/DataMovement.cpp +++ b/lib/Conversion/TorchToLinalg/DataMovement.cpp @@ -576,7 +576,7 @@ public: rewriter .create(loc, intermediateResultType, castedInput, inputAssociations) - .result(); + .getResult(); } if (llvm::any_of(outputAssociations, [](ReassociationIndices indices) { @@ -588,7 +588,7 @@ public: expandedInput.has_value() ? expandedInput.value() : castedInput, outputAssociations) - .result(); + .getResult(); } Value result = collapsedInput.has_value() ? collapsedInput.value() diff --git a/lib/Conversion/TorchToLinalg/Reduction.cpp b/lib/Conversion/TorchToLinalg/Reduction.cpp index af5268a06..728c53bf2 100644 --- a/lib/Conversion/TorchToLinalg/Reduction.cpp +++ b/lib/Conversion/TorchToLinalg/Reduction.cpp @@ -239,7 +239,7 @@ static Value createLinalgPayloadForReduceOp(OpBuilder &b, Location loc, Value elem = payloadArgs[0]; Value result = payloadArgs[1]; Value self = convertScalarToDtype(b, loc, elem, resultElementType); - auto abs = b.create(loc, self); + auto abs = b.create(loc, self); AtenLinalgVectorNormOp::Adaptor adaptor(operands); Value ord = convertScalarToDtype(b, loc, adaptor.ord(), resultElementType); auto pow = b.create(loc, abs, ord); diff --git a/lib/Conversion/TorchToLinalg/Uncategorized.cpp b/lib/Conversion/TorchToLinalg/Uncategorized.cpp index 1e5150d46..d1f473990 100644 --- a/lib/Conversion/TorchToLinalg/Uncategorized.cpp +++ b/lib/Conversion/TorchToLinalg/Uncategorized.cpp @@ -210,7 +210,7 @@ static Value createLinalgPayloadCalculationForElementwiseOp( return b.create(loc, lhsTest, rhsTest); } if (isa(op)) - return b.create(loc, payloadArgs[0]); + return b.create(loc, payloadArgs[0]); if (isa(op)) { auto negate = createCalculationForMathOpWithDtypeConversion( b, converter, payloadArgs[0], op); diff --git a/lib/Conversion/TorchToMhlo/Basic.cpp b/lib/Conversion/TorchToMhlo/Basic.cpp index ef084d2dc..a8f77c112 100644 --- a/lib/Conversion/TorchToMhlo/Basic.cpp +++ b/lib/Conversion/TorchToMhlo/Basic.cpp @@ -1063,9 +1063,6 @@ LogicalResult ConvertAtenOp::matchAndRewrite( op.getLoc(), rewriter.getFloatAttr(inputTy.getElementType(), toDoubleValue)); - auto outType = getTypeConverter() - ->convertType(op.getType()) - .template dyn_cast(); rewriter.replaceOpWithNewOp( op, inputTy, fromTensor, toTensor, mhloShape, mhlo::RngDistribution::UNIFORM); return success(); diff --git a/lib/Conversion/TorchToMhlo/Linear.cpp b/lib/Conversion/TorchToMhlo/Linear.cpp index 42dbd1798..0bb2e388b 100644 --- a/lib/Conversion/TorchToMhlo/Linear.cpp +++ b/lib/Conversion/TorchToMhlo/Linear.cpp @@ -531,7 +531,7 @@ public: std::copy(outputPadding.begin(), outputPadding.end(), edgePaddingHighVec.begin() + 2); Value paddingValue = - mhlo::getConstTensor(rewriter, op, {0.0}, {}).getValue(); + mhlo::getConstTensor(rewriter, op, {0.0}, {}).value(); paddingValue = mhlo::promoteType(rewriter, paddingValue, inputTy); mlir::DenseIntElementsAttr edgePaddingLow = rewriter.getI64VectorAttr(edgePaddingLowVec); diff --git a/lib/Conversion/TorchToMhlo/Reduction.cpp b/lib/Conversion/TorchToMhlo/Reduction.cpp index 7ae2c58be..65927cc4c 100644 --- a/lib/Conversion/TorchToMhlo/Reduction.cpp +++ b/lib/Conversion/TorchToMhlo/Reduction.cpp @@ -87,11 +87,9 @@ getMaxInDim(ConversionPatternRewriter &rewriter, Operation *op, Value &input, if (!initValue) return llvm::None; Value initIndex; if (mlir::mhlo::kMhloDimSizeBits == 32) { - initIndex = - mhlo::getConstTensor(rewriter, op, {0}, {}).getValue(); + initIndex = mhlo::getConstTensor(rewriter, op, {0}, {}).value(); } else { - initIndex = - mhlo::getConstTensor(rewriter, op, {0}, {}).getValue(); + initIndex = mhlo::getConstTensor(rewriter, op, {0}, {}).value(); } DenseIntElementsAttr dimensions = DenseIntElementsAttr::get( diff --git a/lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp b/lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp index 654e90b53..29e435b3d 100644 --- a/lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp +++ b/lib/Dialect/Torch/Transforms/InlineGlobalSlots.cpp @@ -94,7 +94,9 @@ static bool isUseTreatedWithValueSemantics(OpOperand &use) { /// unsafe class InlineGlobalSlotsAnalysisState : public AnalysisState { public: - InlineGlobalSlotsAnalysisState(ProgramPoint point) : AnalysisState(point) {} + InlineGlobalSlotsAnalysisState(ProgramPoint point) : AnalysisState(point) { + setSafe(); + } bool isUninitialized() const override { // We are an optimistic analysis, so we are always default initialized to @@ -102,11 +104,6 @@ public: return false; } - ChangeResult defaultInitialize() override { - // We are an optimistic analysis, so the default state is always "safe". - return setSafe(); - } - void print(raw_ostream &os) const override { os << "InlineGlobalSlotsAnalysisState(" << (isSafe ? "safe" : "unsafe") << ")"; diff --git a/test/Conversion/TorchToMhlo/basic.mlir b/test/Conversion/TorchToMhlo/basic.mlir index 7f327357b..ae505146d 100644 --- a/test/Conversion/TorchToMhlo/basic.mlir +++ b/test/Conversion/TorchToMhlo/basic.mlir @@ -7,7 +7,7 @@ // CHECK-SAME: %[[VAL_0:.*]]: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { // CHECK: %[[VAL_1:.*]] = torch_c.to_builtin_tensor %[[VAL_0]] : !torch.vtensor<[?,?],f32> -> tensor // CHECK: %[[VAL_2:.*]] = torch.constant.none -// CHECK: %[[VAL_3:.*]] = "mhlo.copy"(%[[VAL_1]]) : (tensor) -> tensor +// CHECK: %[[VAL_3:.*]] = mhlo.copy %[[VAL_1]] : (tensor) -> tensor // CHECK: %[[VAL_4:.*]] = torch_c.from_builtin_tensor %[[VAL_3]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[VAL_4]] : !torch.vtensor<[?,?],f32> func.func @torch.aten.clone$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,?],f32> { @@ -47,7 +47,7 @@ func.func @torch.vtensor.literal$signed() -> !torch.vtensor<[2],si64> { // CHECK: %[[T0:.*]] = torch_c.to_i64 %[[INT1]] // CHECK: %[[T1:.*]] = tensor.from_elements %[[T0]] : tensor<1xi64> // CHECK: %[[T2:.*]] = mhlo.convert %[[T1]] : tensor<1xi64> -// CHECK: %[[T3:.*]] = "mhlo.reshape"(%[[T2]]) : (tensor<1xi64>) -> tensor +// CHECK: %[[T3:.*]] = mhlo.reshape %[[T2]] : (tensor<1xi64>) -> tensor // CHECK: %[[T4:.*]] = torch_c.from_builtin_tensor %[[T3]] : tensor -> !torch.vtensor<[],si64> // CHECK: return %[[T4]] : !torch.vtensor<[],si64> func.func @torch.prim.NumToTensor.Scalar$basic() -> !torch.vtensor<[], si64> { @@ -229,16 +229,16 @@ func.func @torch.aten.batch_norm$no_bias_weight(%arg0: !torch.vtensor<[?,3,?,?], // CHECK: %true = torch.constant.bool true // CHECK: %[[VAL_4:.*]] = torch.prim.ListConstruct %int4, %int5 : (!torch.int, !torch.int) -> !torch.list // CHECK: %[[VAL_5:.*]] = mhlo.constant dense<[1, 21, 20]> : tensor<3xi64> -// CHECK: %[[VAL_6:.*]] = "mhlo.dynamic_reshape"(%[[VAL_1]], %[[VAL_5]]) : (tensor<3x7x4x5xf32>, tensor<3xi64>) -> tensor<1x21x20xf32> +// CHECK: %[[VAL_6:.*]] = mhlo.dynamic_reshape %[[VAL_1]], %[[VAL_5]] : (tensor<3x7x4x5xf32>, tensor<3xi64>) -> tensor<1x21x20xf32> // CHECK: %[[VAL_7:.*]] = mhlo.constant dense<1.000000e+00> : tensor<21xf32> // CHECK: %[[VAL_8:.*]] = mhlo.constant dense<0.000000e+00> : tensor<21xf32> // CHECK: %[[VAL_9:.*]], %[[VAL_10:.*]], %[[VAL_11:.*]] = "mhlo.batch_norm_training"(%[[VAL_6]], %[[VAL_7]], %[[VAL_8]]) {epsilon = 9.99999974E-6 : f32, feature_index = 1 : i64} : (tensor<1x21x20xf32>, tensor<21xf32>, tensor<21xf32>) -> (tensor<1x21x20xf32>, tensor<21xf32>, tensor<21xf32>) // CHECK: %[[VAL_12:.*]] = mhlo.constant dense<[3, 7, 4, 5]> : tensor<4xi64> -// CHECK: %[[VAL_13:.*]] = "mhlo.dynamic_reshape"(%[[VAL_9]], %[[VAL_12]]) : (tensor<1x21x20xf32>, tensor<4xi64>) -> tensor<3x7x4x5xf32> +// CHECK: %[[VAL_13:.*]] = mhlo.dynamic_reshape %[[VAL_9]], %[[VAL_12]] : (tensor<1x21x20xf32>, tensor<4xi64>) -> tensor<3x7x4x5xf32> // CHECK: %[[VAL_14:.*]] = mhlo.constant dense<[3, 7, 1, 1]> : tensor<4xi64> -// CHECK: %[[VAL_15:.*]] = "mhlo.dynamic_reshape"(%[[VAL_10]], %[[VAL_14]]) : (tensor<21xf32>, tensor<4xi64>) -> tensor<3x7x1x1xf32> +// CHECK: %[[VAL_15:.*]] = mhlo.dynamic_reshape %[[VAL_10]], %[[VAL_14]] : (tensor<21xf32>, tensor<4xi64>) -> tensor<3x7x1x1xf32> // CHECK: %[[VAL_16:.*]] = mhlo.constant dense<[3, 7, 1, 1]> : tensor<4xi64> -// CHECK: %[[VAL_17:.*]] = "mhlo.dynamic_reshape"(%[[VAL_11]], %[[VAL_16]]) : (tensor<21xf32>, tensor<4xi64>) -> tensor<3x7x1x1xf32> +// CHECK: %[[VAL_17:.*]] = mhlo.dynamic_reshape %[[VAL_11]], %[[VAL_16]] : (tensor<21xf32>, tensor<4xi64>) -> tensor<3x7x1x1xf32> // CHECK: %[[VAL_18:.*]] = "mhlo.broadcast_in_dim"(%[[VAL_3]]) {broadcast_dimensions = dense<[2, 3]> : tensor<2xi64>} : (tensor<4x5xf32>) -> tensor<3x7x4x5xf32> // CHECK: %[[VAL_19:.*]] = "mhlo.broadcast_in_dim"(%[[VAL_2]]) {broadcast_dimensions = dense<[2, 3]> : tensor<2xi64>} : (tensor<4x5xf32>) -> tensor<3x7x4x5xf32> // CHECK: %[[VAL_20:.*]] = mhlo.multiply %[[VAL_13]], %[[VAL_18]] : tensor<3x7x4x5xf32> diff --git a/test/Conversion/TorchToMhlo/dropout.mlir b/test/Conversion/TorchToMhlo/dropout.mlir index b61a61b3b..e595b7ecc 100644 --- a/test/Conversion/TorchToMhlo/dropout.mlir +++ b/test/Conversion/TorchToMhlo/dropout.mlir @@ -10,7 +10,7 @@ // CHECK: %[[CST_2:.*]] = arith.constant 1.000000e+00 : f64 // CHECK: %[[CST_3:.*]] = arith.subf %[[CST_2]], %[[ARG1]] : f64 // CHECK: %[[T3:.*]] = tensor.from_elements %[[CST_3]] : tensor<1xf64> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf64>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf64>) -> tensor // CHECK: %[[T5:.*]] = mhlo.convert(%[[ARG0]]) : (tensor) -> tensor // CHECK: %[[DIM_0:.*]] = tensor.dim %[[T5]], %[[CST_1]] : tensor // CHECK: %[[CST_I64_0:.*]] = arith.index_cast %[[DIM_0]] : index to i64 @@ -33,7 +33,7 @@ // CHECK: shape.assuming_yield %[[T19]] : tensor // CHECK: } // CHECK: %[[T20:.*]] = mhlo.convert(%[[T3]]) : (tensor<1xf64>) -> tensor<1xf32> -// CHECK: %[[T21:.*]] = "mhlo.reshape"(%[[T20]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T21:.*]] = mhlo.reshape %[[T20]] : (tensor<1xf32>) -> tensor // CHECK: %[[T22:.*]] = shape.shape_of %[[T15]] : tensor -> tensor<2xindex> // CHECK: %[[T23:.*]] = "mhlo.dynamic_broadcast_in_dim"(%[[T21]], %[[T22]]) {broadcast_dimensions = dense<> : tensor<0xi64>} : (tensor, tensor<2xindex>) -> tensor // CHECK: %[[T24:.*]] = mhlo.multiply %[[T15]], %[[T23]] : tensor @@ -44,4 +44,4 @@ func.func @torch.aten.native_dropout.train(%arg0: !torch.vtensor<[?,?],f32>, %ar %bool_true = torch.constant.bool true %result0, %result1 = torch.aten.native_dropout %arg0, %arg1, %bool_true: !torch.vtensor<[?,?],f32>, !torch.float, !torch.bool -> !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],i1> return %result0, %result1 : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,?],i1> -} \ No newline at end of file +} diff --git a/test/Conversion/TorchToMhlo/elementwise.mlir b/test/Conversion/TorchToMhlo/elementwise.mlir index 77aaea093..655f62a89 100644 --- a/test/Conversion/TorchToMhlo/elementwise.mlir +++ b/test/Conversion/TorchToMhlo/elementwise.mlir @@ -83,7 +83,7 @@ func.func @torch.aten.neg$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vten // CHECK: %[[INT1:.*]] = torch.constant.int 1 // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = chlo.broadcast_add %[[T0]], %[[T4]] : (tensor, tensor) -> tensor // CHECK: %[[T6:.*]] = torch_c.from_builtin_tensor %[[T5]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[T6]] : !torch.vtensor<[?,?],f32> @@ -105,10 +105,10 @@ func.func @torch.aten.addscalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torc // CHECK: %[[T2:.*]] = torch_c.to_i64 %[[INT2]] // CHECK: %[[T3:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T4:.*]] = mhlo.convert(%[[T3]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T5:.*]] = "mhlo.reshape"(%[[T4]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T5:.*]] = mhlo.reshape %[[T4]] : (tensor<1xf32>) -> tensor // CHECK: %[[T6:.*]] = tensor.from_elements %[[T2]] : tensor<1xi64> // CHECK: %[[T7:.*]] = mhlo.convert(%[[T6]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T8:.*]] = "mhlo.reshape"(%[[T7]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T8:.*]] = mhlo.reshape %[[T7]] : (tensor<1xf32>) -> tensor // CHECK: %[[T9:.*]] = chlo.broadcast_multiply %[[T5]], %[[T8]] : (tensor, tensor) -> tensor // CHECK: %[[T10:.*]] = chlo.broadcast_add %[[T0]], %[[T9]] : (tensor, tensor) -> tensor // CHECK: %[[T11:.*]] = torch_c.from_builtin_tensor %[[T10]] : tensor -> !torch.vtensor<[?,?],f32> @@ -146,7 +146,7 @@ func.func @torch.aten.addtensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: ! // CHECK: %[[T2:.*]] = torch_c.to_i64 %[[INT2]] // CHECK: %[[T3:.*]] = tensor.from_elements %[[T2]] : tensor<1xi64> // CHECK: %[[T4:.*]] = mhlo.convert(%[[T3]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T5:.*]] = "mhlo.reshape"(%[[T4]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T5:.*]] = mhlo.reshape %[[T4]] : (tensor<1xf32>) -> tensor // CHECK: %[[T6:.*]] = chlo.broadcast_multiply %[[T1]], %[[T5]] : (tensor, tensor) -> tensor // CHECK: %[[T7:.*]] = chlo.broadcast_add %[[T0]], %[[T6]] : (tensor, tensor) -> tensor // CHECK: %[[T8:.*]] = torch_c.from_builtin_tensor %[[T7]] : tensor -> !torch.vtensor<[?,?],f32> @@ -184,7 +184,7 @@ func.func @torch.aten.addtensor$promote(%arg0: !torch.vtensor<[?,?],si32>, %arg1 // CHECK: %[[INT1:.*]] = torch.constant.int 1 // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = chlo.broadcast_subtract %[[T0]], %[[T4]] : (tensor, tensor) -> tensor // CHECK: %[[T6:.*]] = torch_c.from_builtin_tensor %[[T5]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[T6]] : !torch.vtensor<[?,?],f32> @@ -206,10 +206,10 @@ func.func @torch.aten.subscalar$basic(%arg0: !torch.vtensor<[?,?],f32>) -> !torc // CHECK: %[[T2:.*]] = torch_c.to_i64 %[[INT2]] // CHECK: %[[T3:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T4:.*]] = mhlo.convert(%[[T3]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T5:.*]] = "mhlo.reshape"(%[[T4]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T5:.*]] = mhlo.reshape %[[T4]] : (tensor<1xf32>) -> tensor // CHECK: %[[T6:.*]] = tensor.from_elements %[[T2]] : tensor<1xi64> // CHECK: %[[T7:.*]] = mhlo.convert(%[[T6]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T8:.*]] = "mhlo.reshape"(%[[T7]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T8:.*]] = mhlo.reshape %[[T7]] : (tensor<1xf32>) -> tensor // CHECK: %[[T9:.*]] = chlo.broadcast_multiply %[[T5]], %[[T8]] : (tensor, tensor) -> tensor // CHECK: %[[T10:.*]] = chlo.broadcast_subtract %[[T0]], %[[T9]] : (tensor, tensor) -> tensor // CHECK: %[[T11:.*]] = torch_c.from_builtin_tensor %[[T10]] : tensor -> !torch.vtensor<[?,?],f32> @@ -247,7 +247,7 @@ func.func @torch.aten.subtensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: ! // CHECK: %[[T2:.*]] = torch_c.to_i64 %[[INT2]] // CHECK: %[[T3:.*]] = tensor.from_elements %[[T2]] : tensor<1xi64> // CHECK: %[[T4:.*]] = mhlo.convert(%[[T3]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T5:.*]] = "mhlo.reshape"(%[[T4]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T5:.*]] = mhlo.reshape %[[T4]] : (tensor<1xf32>) -> tensor // CHECK: %[[T6:.*]] = chlo.broadcast_multiply %[[T1]], %[[T5]] : (tensor, tensor) -> tensor // CHECK: %[[T7:.*]] = chlo.broadcast_subtract %[[T0]], %[[T6]] : (tensor, tensor) -> tensor // CHECK: %[[T8:.*]] = torch_c.from_builtin_tensor %[[T7]] : tensor -> !torch.vtensor<[?,?],f32> @@ -284,7 +284,7 @@ func.func @torch.aten.subtensor$promote(%arg0: !torch.vtensor<[?,?],si32>, %arg1 // CHECK: %[[T1:.*]] = torch_c.to_i64 %[[INT9]] // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = chlo.broadcast_multiply %[[T0]], %[[T4]] : (tensor, tensor) -> tensor // CHECK: %[[T6:.*]] = torch_c.from_builtin_tensor %[[T5]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[T6]] : !torch.vtensor<[?,?],f32> @@ -317,7 +317,7 @@ func.func @torch.aten.multensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: ! // CHECK: %[[T1:.*]] = torch_c.to_i64 %[[INT9]] // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = chlo.broadcast_divide %[[T0]], %[[T4]] : (tensor, tensor) -> tensor // CHECK: %[[T6:.*]] = torch_c.from_builtin_tensor %[[T5]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[T6]] : !torch.vtensor<[?,?],f32> @@ -350,7 +350,7 @@ func.func @torch.aten.divtensor$basic(%arg0: !torch.vtensor<[?,?],f32>, %arg1: ! // CHECK: %[[T1:.*]] = torch_c.to_i64 %[[INT3]] // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = chlo.broadcast_compare %[[T0]], %[[T4]] {compare_type = #mhlo, comparison_direction = #mhlo} : (tensor, tensor) -> tensor // CHECK: %[[T6:.*]] = torch_c.from_builtin_tensor %[[T5]] : tensor -> !torch.vtensor<[?,?],i1> // CHECK: return %[[T6]] : !torch.vtensor<[?,?],i1> @@ -457,10 +457,10 @@ func.func @torch.aten.relu(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[ // CHECK: %[[T1:.*]] = torch_c.to_f64 %[[ARG1]] // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xf64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xf64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = tensor.from_elements %[[T1]] : tensor<1xf64> // CHECK: %[[T6:.*]] = mhlo.convert(%[[T5]]) : (tensor<1xf64>) -> tensor<1xf32> -// CHECK: %[[T7:.*]] = "mhlo.reshape"(%[[T6]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T7:.*]] = mhlo.reshape %[[T6]] : (tensor<1xf32>) -> tensor // CHECK: %[[T8:.*]] = chlo.broadcast_multiply %[[T4]], %[[T7]] : (tensor, tensor) -> tensor // CHECK: %[[T9:.*]] = chlo.broadcast_add %[[T0]], %[[T8]] : (tensor, tensor) -> tensor // CHECK: %[[T10:.*]] = torch_c.from_builtin_tensor %[[T9]] : tensor -> !torch.vtensor<[?,?],f32> @@ -479,7 +479,7 @@ func.func @torch.aten.addscalar$variable(%arg0: !torch.vtensor<[?,?],f32>, %arg1 // CHECK: %[[T2:.*]] = torch_c.to_f64 %[[ARG2]] // CHECK: %[[T3:.*]] = tensor.from_elements %[[T2]] : tensor<1xf64> // CHECK: %[[T4:.*]] = mhlo.convert(%[[T3]]) : (tensor<1xf64>) -> tensor<1xf32> -// CHECK: %[[T5:.*]] = "mhlo.reshape"(%[[T4]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T5:.*]] = mhlo.reshape %[[T4]] : (tensor<1xf32>) -> tensor // CHECK: %[[T6:.*]] = chlo.broadcast_multiply %[[T1]], %[[T5]] : (tensor, tensor) -> tensor // CHECK: %[[T7:.*]] = chlo.broadcast_add %[[T0]], %[[T6]] : (tensor, tensor) -> tensor // CHECK: %[[T8:.*]] = torch_c.from_builtin_tensor %[[T7]] : tensor -> !torch.vtensor<[?,?],f32> @@ -497,7 +497,7 @@ func.func @torch.aten.addtensor$variable(%arg0: !torch.vtensor<[?,?],f32>, %arg1 // CHECK: %[[T1:.*]] = torch_c.to_i64 %[[ARG1]] // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = chlo.broadcast_multiply %[[T0]], %[[T4]] : (tensor, tensor) -> tensor // CHECK: %[[T6:.*]] = torch_c.from_builtin_tensor %[[T5]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[T6]] : !torch.vtensor<[?,?],f32> @@ -514,7 +514,7 @@ func.func @torch.aten.mulscalar$variable(%arg0: !torch.vtensor<[?,?],f32>, %arg1 // CHECK: %[[T1:.*]] = torch_c.to_i64 %[[ARG1]] // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = chlo.broadcast_divide %[[T0]], %[[T4]] : (tensor, tensor) -> tensor // CHECK: %[[T6:.*]] = torch_c.from_builtin_tensor %[[T5]] : tensor -> !torch.vtensor<[?,?],f32> // CHECK: return %[[T6]] : !torch.vtensor<[?,?],f32> @@ -531,7 +531,7 @@ func.func @torch.aten.divscalar$variable(%arg0: !torch.vtensor<[?,?],f32>, %arg1 // CHECK: %[[T1:.*]] = torch_c.to_i64 %[[ARG1]] // CHECK: %[[T2:.*]] = tensor.from_elements %[[T1]] : tensor<1xi64> // CHECK: %[[T3:.*]] = mhlo.convert(%[[T2]]) : (tensor<1xi64>) -> tensor<1xf32> -// CHECK: %[[T4:.*]] = "mhlo.reshape"(%[[T3]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T4:.*]] = mhlo.reshape %[[T3]] : (tensor<1xf32>) -> tensor // CHECK: %[[T5:.*]] = chlo.broadcast_compare %[[T0]], %[[T4]] {compare_type = #mhlo, comparison_direction = #mhlo} : (tensor, tensor) -> tensor // CHECK: %[[T6:.*]] = torch_c.from_builtin_tensor %[[T5]] : tensor -> !torch.vtensor<[?,?],i1> // CHECK: return %[[T6]] : !torch.vtensor<[?,?],i1> diff --git a/test/Conversion/TorchToMhlo/linear.mlir b/test/Conversion/TorchToMhlo/linear.mlir index 6c42f7af0..bad66a84d 100644 --- a/test/Conversion/TorchToMhlo/linear.mlir +++ b/test/Conversion/TorchToMhlo/linear.mlir @@ -328,7 +328,7 @@ func.func @torch.aten.convolution(%arg0: !torch.vtensor<[?,?,?,?],f32>, %arg1: ! // CHECK: %[[T_10:.*]] = arith.index_cast %[[T_9]] : index to i64 // CHECK: %[[VAL_0:.*]] = arith.constant 1 : i64 // CHECK: %[[T_11:.*]] = tensor.from_elements %[[T_10]], %[[VAL_0]], %[[VAL_0]] : tensor<3xi64> -// CHECK: %[[T_12:.*]] = "mhlo.dynamic_reshape"(%[[T_2]], %[[T_11]]) : (tensor, tensor<3xi64>) -> tensor +// CHECK: %[[T_12:.*]] = mhlo.dynamic_reshape %[[T_2]], %[[T_11]] : (tensor, tensor<3xi64>) -> tensor // CHECK: %[[T_13:.*]] = chlo.broadcast_add %[[T_8]], %[[T_12]] : (tensor, tensor) -> tensor // CHECK: %[[T_14:.*]] = torch_c.from_builtin_tensor %[[T_13]] : tensor -> !torch.vtensor<[?,?,?,?],f32> // CHECK: return %[[T_14]] : !torch.vtensor<[?,?,?,?],f32> @@ -479,10 +479,10 @@ func.func @torch.aten.convolution$transposed_outputpadding(%arg0: !torch.vtensor // CHECK: %[[T_15:.*]] = arith.divsi %[[T_8]], %[[T_24]] : i64 // CHECK: %[[T_16:.*]] = arith.muli %[[T_10]], %[[T_24]] : i64 // CHECK: %[[T_17:.*]] = tensor.from_elements %[[T_24]], %[[T_15]], %[[T_10]], %[[T_12]], %[[T_14]] : tensor<5xi64> -// CHECK: %[[T_18:.*]] = "mhlo.dynamic_reshape"(%[[T_6]], %[[T_17]]) : (tensor<2x2x3x3xf32>, tensor<5xi64>) -> tensor<2x1x2x3x3xf32> +// CHECK: %[[T_18:.*]] = mhlo.dynamic_reshape %[[T_6]], %[[T_17]] : (tensor<2x2x3x3xf32>, tensor<5xi64>) -> tensor<2x1x2x3x3xf32> // CHECK: %[[T_19:.*]] = "mhlo.transpose"(%[[T_18]]) {permutation = dense<[1, 0, 2, 3, 4]> : tensor<5xi64>} : (tensor<2x1x2x3x3xf32>) -> tensor<1x2x2x3x3xf32> // CHECK: %[[T_20:.*]] = tensor.from_elements %[[T_15]], %[[T_16]], %[[T_12]], %[[T_14]] : tensor<4xi64> -// CHECK: %[[T_21:.*]] = "mhlo.dynamic_reshape"(%[[T_19]], %[[T_20]]) : (tensor<1x2x2x3x3xf32>, tensor<4xi64>) -> tensor<1x4x3x3xf32> +// CHECK: %[[T_21:.*]] = mhlo.dynamic_reshape %[[T_19]], %[[T_20]] : (tensor<1x2x2x3x3xf32>, tensor<4xi64>) -> tensor<1x4x3x3xf32> // CHECK: %[[T_22:.*]] = mhlo.convolution(%[[T_0]], %[[T_21]]) // CHECK{LITERAL}: dim_numbers = [b, f, 0, 1]x[i, o, 0, 1]->[b, f, 0, 1], window = {stride = [1, 1], pad = [[2, 2], [2, 2]], lhs_dilate = [2, 2], rhs_dilate = [1, 1]} {batch_group_count = 1 : i64, feature_group_count = 2 : i64} : (tensor<1x2x7x7xf32>, tensor<1x4x3x3xf32>) -> tensor<1x4x15x15xf32> // CHECK: %[[T_23:.*]] = torch_c.from_builtin_tensor %[[T_22]] : tensor<1x4x15x15xf32> -> !torch.vtensor<[1,4,15,15],f32> @@ -498,4 +498,4 @@ func.func @torch.aten.convolution$transposed_groups(%arg0: !torch.vtensor<[1,2,7 %2 = torch.prim.ListConstruct %int2, %int2 : (!torch.int, !torch.int) -> !torch.list %3 = torch.aten.convolution %arg0, %arg1, %none, %2, %0, %1, %true, %0, %int2 : !torch.vtensor<[1,2,7,7],f32>, !torch.vtensor<[2,2,3,3],f32>, !torch.none, !torch.list, !torch.list, !torch.list, !torch.bool, !torch.list, !torch.int -> !torch.vtensor<[1,4,15,15],f32> return %3 : !torch.vtensor<[1,4,15,15],f32> -} \ No newline at end of file +} diff --git a/test/Conversion/TorchToMhlo/pooling.mlir b/test/Conversion/TorchToMhlo/pooling.mlir index b976e473e..00c918af5 100644 --- a/test/Conversion/TorchToMhlo/pooling.mlir +++ b/test/Conversion/TorchToMhlo/pooling.mlir @@ -17,7 +17,7 @@ // CHECK: %[[VAL_7:.*]] = "mhlo.reduce_window"(%[[VAL_1]], %[[VAL_6]]) ({ // CHECK: ^bb0(%[[VAL_8:.*]]: tensor, %[[VAL_9:.*]]: tensor): // CHECK: %[[VAL_10:.*]] = mhlo.maximum %[[VAL_8]], %[[VAL_9]] : tensor -// CHECK: "mhlo.return"(%[[VAL_10]]) : (tensor) -> () +// CHECK: mhlo.return %[[VAL_10]] : tensor // CHECK: }) {padding = dense<0> : tensor<4x2xi64>, window_dilations = dense<[1, 1, 2, 1]> : tensor<4xi64>, window_dimensions = dense<[1, 1, 2, 2]> : tensor<4xi64>, window_strides = dense<1> : tensor<4xi64>} : (tensor, tensor) -> tensor // CHECK: %[[VAL_11:.*]] = torch_c.from_builtin_tensor %[[VAL_7]] : tensor -> !torch.vtensor<[?,?,?,?],f32> // CHECK: return %[[VAL_11]] : !torch.vtensor<[?,?,?,?],f32> @@ -49,7 +49,7 @@ func.func @torch.aten.max_pool2d(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch // CHECK: %[[VAL_6:.*]] = "mhlo.reduce_window"(%[[VAL_1]], %[[VAL_5]]) ({ // CHECK: ^bb0(%[[VAL_8:.*]]: tensor, %[[VAL_9:.*]]: tensor): // CHECK: %[[VAL_10:.*]] = mhlo.maximum %[[VAL_8]], %[[VAL_9]] : tensor -// CHECK: "mhlo.return"(%[[VAL_10]]) : (tensor) -> () +// CHECK: mhlo.return %[[VAL_10]] : tensor // CHECK: }) // CHECK-SAME{LITERAL}: {padding = dense<[[0, 0], [0, 0], [2, 2], [1, 1]]> : tensor<4x2xi64>, window_dilations = dense<[1, 1, 2, 1]> : tensor<4xi64>, window_dimensions = dense<[1, 1, 2, 2]> : tensor<4xi64>, window_strides = dense<1> : tensor<4xi64>} : (tensor, tensor) -> tensor // CHECK: %[[VAL_7:.*]] = torch_c.from_builtin_tensor %[[VAL_6]] : tensor -> !torch.vtensor<[?,?,?,?],f32> @@ -94,7 +94,7 @@ func.func @torch.aten.max_pool2d$padding(%arg0: !torch.vtensor<[?,?,?,?],f32>) - // CHECK: %[[VAL_14:.*]] = arith.muli %[[VAL_12]], %[[VAL_10]] : i64 // CHECK: %[[VAL_15:.*]] = tensor.from_elements %[[VAL_8]], %[[VAL_14]] : tensor<2xi64> // CHECK: %[[VAL_16:.*]] = "mhlo.dynamic_iota"(%[[VAL_15]]) {iota_dimension = 1 : i64} : (tensor<2xi64>) -> tensor -// CHECK: %[[VAL_17:.*]] = "mhlo.dynamic_reshape"(%[[VAL_16]], %[[VAL_13]]) : (tensor, tensor<3xi64>) -> tensor +// CHECK: %[[VAL_17:.*]] = mhlo.dynamic_reshape %[[VAL_16]], %[[VAL_13]] : (tensor, tensor<3xi64>) -> tensor // CHECK: %[[VAL_18:.*]] = mhlo.constant dense<0> : tensor // CHECK: %[[VAL_19:.*]]:2 = "mhlo.reduce_window"(%[[VAL_1]], %[[VAL_17]], %[[VAL_6]], %[[VAL_18]]) ({ // CHECK: ^bb0(%[[IVAL_0:.*]]: tensor, %[[IVAL_1:.*]]: tensor, %[[IVAL_2:.*]]: tensor, %[[IVAL_3:.*]]: tensor): @@ -104,7 +104,7 @@ func.func @torch.aten.max_pool2d$padding(%arg0: !torch.vtensor<[?,?,?,?],f32>) - // CHECK: %[[IVAL_7:.*]] = mhlo.minimum %[[IVAL_1]], %[[IVAL_3]] : tensor // CHECK: %[[IVAL_8:.*]] = "mhlo.select"(%[[IVAL_4]], %[[IVAL_1]], %[[IVAL_3]]) : (tensor, tensor, tensor) -> tensor // CHECK: %[[IVAL_9:.*]] = "mhlo.select"(%[[IVAL_6]], %[[IVAL_7]], %[[IVAL_8]]) : (tensor, tensor, tensor) -> tensor -// CHECK: "mhlo.return"(%[[IVAL_5]], %[[IVAL_9]]) : (tensor, tensor) -> () +// CHECK: mhlo.return %[[IVAL_5]], %[[IVAL_9]] : tensor, tensor // CHECK{LITERAL}: }) {padding = dense<0> : tensor<3x2xi64>, window_dilations = dense<1> : tensor<3xi64>, window_dimensions = dense<[1, 3, 3]> : tensor<3xi64>, window_strides = dense<[1, 2, 2]> : tensor<3xi64>} : (tensor, tensor, tensor, tensor) -> (tensor, tensor) // CHECK: %[[VAL_20:.*]] = torch_c.from_builtin_tensor %[[VAL_19]]#0 : tensor -> !torch.vtensor<[?,?,?],f32> // CHECK: %[[VAL_21:.*]] = torch_c.from_builtin_tensor %[[VAL_19]]#1 : tensor -> !torch.vtensor<[?,?,?],si64> @@ -140,7 +140,7 @@ func.func @torch.aten.max_pool2d_with_indices(%arg0: !torch.vtensor<[?,?,?],f32> // CHECK: %[[VAL_6:.*]] = "mhlo.reduce_window"(%[[VAL_1]], %[[VAL_5]]) ({ // CHECK: ^bb0(%[[IVAL_0:.*]]: tensor, %[[IVAL_1:.*]]: tensor): // CHECK: %[[IVAL_2:.*]] = mhlo.add %[[IVAL_0]], %[[IVAL_1]] : tensor -// CHECK: "mhlo.return"(%[[IVAL_2]]) : (tensor) -> () +// CHECK: mhlo.return %[[IVAL_2]] : tensor // CHECK{LITERAL}: }) {padding = dense<[[0, 0], [0, 0], [1, 1], [1, 1]]> : tensor<4x2xi64>, window_dilations = dense<1> : tensor<4xi64>, window_dimensions = dense<[1, 1, 3, 3]> : tensor<4xi64>, window_strides = dense<[1, 1, 2, 2]> : tensor<4xi64>} : (tensor, tensor) -> tensor // CHECK: %[[VAL_7:.*]] = mhlo.constant dense<1.000000e+00> : tensor // CHECK: %[[IDX_0:.*]] = arith.constant 0 : index @@ -161,7 +161,7 @@ func.func @torch.aten.max_pool2d_with_indices(%arg0: !torch.vtensor<[?,?,?],f32> // CHECK: %[[VAL_19:.*]] = "mhlo.reduce_window"(%[[VAL_17]], %[[VAL_18]]) ({ // CHECK: ^bb0(%[[IVAL_3:.*]]: tensor, %[[IVAL_4:.*]]: tensor): // CHECK: %[[IVAL_5:.*]] = mhlo.add %[[IVAL_3]], %[[IVAL_4]] : tensor -// CHECK: "mhlo.return"(%[[IVAL_5]]) : (tensor) -> () +// CHECK: mhlo.return %[[IVAL_5]] : tensor // CHECK{LITERAL}: }) {padding = dense<[[0, 0], [0, 0], [1, 1], [1, 1]]> : tensor<4x2xi64>, window_dilations = dense<1> : tensor<4xi64>, window_dimensions = dense<[1, 1, 3, 3]> : tensor<4xi64>, window_strides = dense<[1, 1, 2, 2]> : tensor<4xi64>} : (tensor, tensor) -> tensor // CHECK: %[[VAL_20:.*]] = mhlo.divide %[[VAL_6]], %[[VAL_19]] : tensor // CHECK: %[[VAL_21:.*]] = torch_c.from_builtin_tensor %[[VAL_20]] : tensor -> !torch.vtensor<[?,?,?,?],f32> @@ -196,7 +196,7 @@ func.func @torch.aten.avg_pool2d(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch // CHECK: %[[VAL_6:.*]] = "mhlo.reduce_window"(%[[VAL_1]], %[[VAL_5]]) ({ // CHECK: ^bb0(%[[IVAL_0:.*]]: tensor, %[[IVAL_1:.*]]: tensor): // CHECK: %[[IVAL_2:.*]] = mhlo.add %[[IVAL_0]], %[[IVAL_1]] : tensor -// CHECK: "mhlo.return"(%[[IVAL_2]]) : (tensor) -> () +// CHECK: mhlo.return %[[IVAL_2]] : tensor // CHECK{LITERAL}: }) {padding = dense<[[0, 0], [0, 0], [1, 1], [1, 1]]> : tensor<4x2xi64>, window_dilations = dense<1> : tensor<4xi64>, window_dimensions = dense<[1, 1, 3, 3]> : tensor<4xi64>, window_strides = dense<[1, 1, 2, 2]> : tensor<4xi64>} : (tensor, tensor) -> tensor // CHECK: %[[VAL_7:.*]] = mhlo.constant dense<9> : tensor // CHECK: %[[VAL_8:.*]] = mhlo.convert(%[[VAL_7]]) : (tensor) -> tensor diff --git a/test/Conversion/TorchToMhlo/reduction.mlir b/test/Conversion/TorchToMhlo/reduction.mlir index 21f50e677..fb8545b78 100644 --- a/test/Conversion/TorchToMhlo/reduction.mlir +++ b/test/Conversion/TorchToMhlo/reduction.mlir @@ -23,12 +23,12 @@ // CHECK: %[[VAL_18:.*]] = mhlo.minimum %[[VAL_12]], %[[VAL_14]] : tensor // CHECK: %[[VAL_19:.*]] = "mhlo.select"(%[[VAL_15]], %[[VAL_12]], %[[VAL_14]]) : (tensor, tensor, tensor) -> tensor // CHECK: %[[VAL_20:.*]] = "mhlo.select"(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]]) : (tensor, tensor, tensor) -> tensor -// CHECK: "mhlo.return"(%[[VAL_16]], %[[VAL_20]]) : (tensor, tensor) -> () +// CHECK: mhlo.return %[[VAL_16]], %[[VAL_20]] : tensor, tensor // CHECK: } // CHECK: %[[VAL_21:.*]] = arith.constant 1 : i64 // CHECK: %[[VAL_22:.*]] = tensor.from_elements %[[VAL_3]], %[[VAL_21]] : tensor<2xi64> -// CHECK: %[[VAL_23:.*]] = "mhlo.dynamic_reshape"(%[[VAL_10]]#0, %[[VAL_22]]) : (tensor, tensor<2xi64>) -> tensor -// CHECK: %[[VAL_24:.*]] = "mhlo.dynamic_reshape"(%[[VAL_10]]#1, %[[VAL_22]]) : (tensor, tensor<2xi64>) -> tensor +// CHECK: %[[VAL_23:.*]] = mhlo.dynamic_reshape %[[VAL_10]]#0, %[[VAL_22]] : (tensor, tensor<2xi64>) -> tensor +// CHECK: %[[VAL_24:.*]] = mhlo.dynamic_reshape %[[VAL_10]]#1, %[[VAL_22]] : (tensor, tensor<2xi64>) -> tensor // CHECK: %[[VAL_25:.*]] = torch_c.from_builtin_tensor %[[VAL_23]] : tensor -> !torch.vtensor<[?,1],f32> // CHECK: %[[VAL_26:.*]] = torch_c.from_builtin_tensor %[[VAL_24]] : tensor -> !torch.vtensor<[?,1],si64> // CHECK: return %[[VAL_25]], %[[VAL_26]] : !torch.vtensor<[?,1],f32>, !torch.vtensor<[?,1],si64> @@ -64,7 +64,7 @@ func.func @torch.aten.max.dim$keepdim(%arg0: !torch.vtensor<[?,?],f32>) -> (!tor // CHECK: %[[VAL_18:.*]] = mhlo.minimum %[[VAL_12]], %[[VAL_14]] : tensor // CHECK: %[[VAL_19:.*]] = "mhlo.select"(%[[VAL_15]], %[[VAL_12]], %[[VAL_14]]) : (tensor, tensor, tensor) -> tensor // CHECK: %[[VAL_20:.*]] = "mhlo.select"(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]]) : (tensor, tensor, tensor) -> tensor -// CHECK: "mhlo.return"(%[[VAL_16]], %[[VAL_20]]) : (tensor, tensor) -> () +// CHECK: mhlo.return %[[VAL_16]], %[[VAL_20]] : tensor, tensor // CHECK: } // CHECK: %[[VAL_21:.*]] = torch_c.from_builtin_tensor %[[VAL_10]]#0 : tensor -> !torch.vtensor<[?],f32> // CHECK: %[[VAL_22:.*]] = torch_c.from_builtin_tensor %[[VAL_10]]#1 : tensor -> !torch.vtensor<[?],si64> @@ -101,11 +101,11 @@ func.func @torch.aten.max.dim(%arg0: !torch.vtensor<[?,?],f32>) -> (!torch.vtens // CHECK: %[[VAL_18:.*]] = mhlo.minimum %[[VAL_12]], %[[VAL_14]] : tensor // CHECK: %[[VAL_19:.*]] = "mhlo.select"(%[[VAL_15]], %[[VAL_12]], %[[VAL_14]]) : (tensor, tensor, tensor) -> tensor // CHECK: %[[VAL_20:.*]] = "mhlo.select"(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]]) : (tensor, tensor, tensor) -> tensor -// CHECK: "mhlo.return"(%[[VAL_16]], %[[VAL_20]]) : (tensor, tensor) -> () +// CHECK: mhlo.return %[[VAL_16]], %[[VAL_20]] : tensor, tensor // CHECK: } // CHECK: %[[VAL_21:.*]] = arith.constant 1 : i64 // CHECK: %[[VAL_22:.*]] = tensor.from_elements %[[VAL_3]], %[[VAL_21]] : tensor<2xi64> -// CHECK: %[[VAL_23:.*]] = "mhlo.dynamic_reshape"(%[[VAL_10]]#1, %[[VAL_22]]) : (tensor, tensor<2xi64>) -> tensor +// CHECK: %[[VAL_23:.*]] = mhlo.dynamic_reshape %[[VAL_10]]#1, %[[VAL_22]] : (tensor, tensor<2xi64>) -> tensor // CHECK: %[[VAL_24:.*]] = torch_c.from_builtin_tensor %[[VAL_23]] : tensor -> !torch.vtensor<[?,1],si64> // CHECK: return %[[VAL_24]] : !torch.vtensor<[?,1],si64> func.func @torch.aten.argmax$keepdim(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor<[?,1],si64> { @@ -140,7 +140,7 @@ func.func @torch.aten.argmax$keepdim(%arg0: !torch.vtensor<[?,?],f32>) -> !torch // CHECK: %[[VAL_18:.*]] = mhlo.minimum %[[VAL_12]], %[[VAL_14]] : tensor // CHECK: %[[VAL_19:.*]] = "mhlo.select"(%[[VAL_15]], %[[VAL_12]], %[[VAL_14]]) : (tensor, tensor, tensor) -> tensor // CHECK: %[[VAL_20:.*]] = "mhlo.select"(%[[VAL_17]], %[[VAL_18]], %[[VAL_19]]) : (tensor, tensor, tensor) -> tensor -// CHECK: "mhlo.return"(%[[VAL_16]], %[[VAL_20]]) : (tensor, tensor) -> () +// CHECK: mhlo.return %[[VAL_16]], %[[VAL_20]] : tensor, tensor // CHECK: } // CHECK: %[[VAL_11:.*]] = torch_c.from_builtin_tensor %[[VAL_10]]#1 : tensor -> !torch.vtensor<[?],si64> // CHECK: return %[[VAL_11]] : !torch.vtensor<[?],si64> @@ -174,7 +174,7 @@ func.func @torch.aten.argmax(%arg0: !torch.vtensor<[?,?],f32>) -> !torch.vtensor // CHECK: %[[VAL_10:.*]] = arith.index_cast %[[VAL_9]] : index to i64 // CHECK: %[[ONE_0:.*]] = arith.constant 1 : i64 // CHECK: %[[VAL_11:.*]] = tensor.from_elements %[[ONE_0]], %[[ONE_0]], %[[VAL_10]] : tensor<3xi64> -// CHECK: %[[VAL_12:.*]] = "mhlo.dynamic_reshape"(%[[VAL_4]], %[[VAL_11]]) : (tensor, tensor<3xi64>) -> tensor<1x1x?xf32> +// CHECK: %[[VAL_12:.*]] = mhlo.dynamic_reshape %[[VAL_4]], %[[VAL_11]] : (tensor, tensor<3xi64>) -> tensor<1x1x?xf32> // CHECK: %[[VAL_13:.*]] = torch_c.from_builtin_tensor %[[VAL_12]] : tensor<1x1x?xf32> -> !torch.vtensor<[1,1,?],f32> // CHECK: return %[[VAL_13]] : !torch.vtensor<[1,1,?],f32> func.func @torch.aten.sum.dim_Intlist$keepdim(%arg0: !torch.vtensor<[?,?,?],f32>) -> !torch.vtensor<[1,1,?],f32> { diff --git a/test/Conversion/TorchToMhlo/view_like.mlir b/test/Conversion/TorchToMhlo/view_like.mlir index ce9b6f947..37d38d9ae 100644 --- a/test/Conversion/TorchToMhlo/view_like.mlir +++ b/test/Conversion/TorchToMhlo/view_like.mlir @@ -42,7 +42,7 @@ // CHECK: %[[T26:.*]] = tensor.from_elements %[[T11]], %[[C0_I64_2]], %[[C0_I64_2]] : tensor<3xi64> // CHECK: %[[T27:.*]] = tensor.from_elements %[[T25]], %[[T21]], %[[T23]] : tensor<3xi64> // CHECK: %[[T28:.*]] = tensor.from_elements %[[T2]], %[[C1_I64]], %[[C1_I64]] : tensor<3xi64> -// CHECK: %[[T29:.*]] = "mhlo.real_dynamic_slice"(%[[T0]], %[[T26]], %[[T27]], %[[T28]]) : (tensor, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor +// CHECK: %[[T29:.*]] = mhlo.real_dynamic_slice %[[T0]], %[[T26]], %[[T27]], %[[T28]] : (tensor, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor // CHECK: %[[T30:.*]] = mhlo.convert %[[T29]] : tensor // CHECK: %[[T31:.*]] = torch_c.from_builtin_tensor %[[T30]] : tensor -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[T31]] : !torch.vtensor<[?,?,?],f32> @@ -96,7 +96,7 @@ func.func @torch.aten.slice.strided$slice_like(%arg0: !torch.vtensor<[?,?,?],f32 // CHECK: %[[T26:.*]] = tensor.from_elements %[[T11]], %[[C0_I64_2]], %[[C0_I64_2]] : tensor<3xi64> // CHECK: %[[T27:.*]] = tensor.from_elements %[[T25]], %[[T21]], %[[T23]] : tensor<3xi64> // CHECK: %[[T28:.*]] = tensor.from_elements %[[T2]], %[[C1_I64]], %[[C1_I64]] : tensor<3xi64> -// CHECK: %[[T29:.*]] = "mhlo.real_dynamic_slice"(%[[T0]], %[[T26]], %[[T27]], %[[T28]]) : (tensor<4x65x256xf32>, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor +// CHECK: %[[T29:.*]] = mhlo.real_dynamic_slice %[[T0]], %[[T26]], %[[T27]], %[[T28]] : (tensor<4x65x256xf32>, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor // CHECK: %[[T30:.*]] = mhlo.convert(%[[T29]]) : (tensor) -> tensor<2x65x256xf32> // CHECK: %[[T31:.*]] = torch_c.from_builtin_tensor %[[T30]] : tensor<2x65x256xf32> -> !torch.vtensor<[2,65,256],f32> // CHECK: return %[[T31]] : !torch.vtensor<[2,65,256],f32> @@ -151,7 +151,7 @@ func.func @torch.aten.slice.strided.static$slice_like(%arg0: !torch.vtensor<[4,6 // CHECK: %[[T26:.*]] = tensor.from_elements %[[C0_I64_2]], %[[T11]], %[[C0_I64_2]] : tensor<3xi64> // CHECK: %[[T27:.*]] = tensor.from_elements %[[T19]], %[[T25]], %[[T23]] : tensor<3xi64> // CHECK: %[[T28:.*]] = tensor.from_elements %[[C1_I64]], %[[T2]], %[[C1_I64]] : tensor<3xi64> -// CHECK: %[[T29:.*]] = "mhlo.real_dynamic_slice"(%[[T0]], %[[T26]], %[[T27]], %[[T28]]) : (tensor, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor +// CHECK: %[[T29:.*]] = mhlo.real_dynamic_slice %[[T0]], %[[T26]], %[[T27]], %[[T28]] : (tensor, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor // CHECK: %[[T30:.*]] = mhlo.convert(%[[T29]]) : (tensor) -> tensor // CHECK: %[[T31:.*]] = torch_c.from_builtin_tensor %[[T30]] : tensor -> !torch.vtensor<[?,1,?],f32> // CHECK: return %[[T31]] : !torch.vtensor<[?,1,?],f32> @@ -206,7 +206,7 @@ func.func @torch.aten.slice.last$slice_like(%arg0: !torch.vtensor<[?,?,?],f32>) // CHECK: %[[T26:.*]] = tensor.from_elements %[[C0_I64_2]], %[[T11]], %[[C0_I64_2]] : tensor<3xi64> // CHECK: %[[T27:.*]] = tensor.from_elements %[[T19]], %[[T25]], %[[T23]] : tensor<3xi64> // CHECK: %[[T28:.*]] = tensor.from_elements %[[C1_I64]], %[[T2]], %[[C1_I64]] : tensor<3xi64> -// CHECK: %[[T29:.*]] = "mhlo.real_dynamic_slice"(%[[T0]], %[[T26]], %[[T27]], %[[T28]]) : (tensor<4x65x256xf32>, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor<4x?x256xf32> +// CHECK: %[[T29:.*]] = mhlo.real_dynamic_slice %[[T0]], %[[T26]], %[[T27]], %[[T28]] : (tensor<4x65x256xf32>, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor<4x?x256xf32> // CHECK: %[[T30:.*]] = mhlo.convert(%[[T29]]) : (tensor<4x?x256xf32>) -> tensor<4x1x256xf32> // CHECK: %[[T31:.*]] = torch_c.from_builtin_tensor %[[T30]] : tensor<4x1x256xf32> -> !torch.vtensor<[4,1,256],f32> // CHECK: return %[[T31]] : !torch.vtensor<[4,1,256],f32> @@ -246,7 +246,7 @@ func.func @torch.aten.slice.last.static$slice_like(%arg0: !torch.vtensor<[4,65,2 // CHECK: %[[T12:.*]] = tensor.from_elements %[[C0_I64_1]], %[[C0_I64]], %[[C0_I64_1]] : tensor<3xi64> // CHECK: %[[T13:.*]] = tensor.from_elements %[[T5]], %[[T11]], %[[T9]] : tensor<3xi64> // CHECK: %[[T14:.*]] = tensor.from_elements %[[C1_I64]], %[[T1]], %[[C1_I64]] : tensor<3xi64> -// CHECK: %[[T15:.*]] = "mhlo.real_dynamic_slice"(%[[T0]], %[[T12]], %[[T13]], %[[T14]]) : (tensor, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor +// CHECK: %[[T15:.*]] = mhlo.real_dynamic_slice %[[T0]], %[[T12]], %[[T13]], %[[T14]] : (tensor, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor // CHECK: %[[T16:.*]] = mhlo.convert %[[T15]] : tensor // CHECK: %[[T17:.*]] = torch_c.from_builtin_tensor %[[T16]] : tensor -> !torch.vtensor<[?,?,?],f32> // CHECK: return %[[T17]] : !torch.vtensor<[?,?,?],f32> @@ -285,7 +285,7 @@ func.func @torch.aten.slice.none$slice_like(%arg0: !torch.vtensor<[?,?,?],f32>) // CHECK: %[[T12:.*]] = tensor.from_elements %[[C0_I64_1]], %[[C0_I64]], %[[C0_I64_1]] : tensor<3xi64> // CHECK: %[[T13:.*]] = tensor.from_elements %[[T5]], %[[T11]], %[[T9]] : tensor<3xi64> // CHECK: %[[T14:.*]] = tensor.from_elements %[[C1_I64]], %[[T1]], %[[C1_I64]] : tensor<3xi64> -// CHECK: %[[T15:.*]] = "mhlo.real_dynamic_slice"(%[[T0]], %[[T12]], %[[T13]], %[[T14]]) : (tensor<4x65x256xf32>, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor<4x?x256xf32> +// CHECK: %[[T15:.*]] = mhlo.real_dynamic_slice %[[T0]], %[[T12]], %[[T13]], %[[T14]] : (tensor<4x65x256xf32>, tensor<3xi64>, tensor<3xi64>, tensor<3xi64>) -> tensor<4x?x256xf32> // CHECK: %[[T16:.*]] = mhlo.convert(%[[T15]]) : (tensor<4x?x256xf32>) -> tensor<4x33x256xf32> // CHECK: %[[T17:.*]] = torch_c.from_builtin_tensor %[[T16]] : tensor<4x33x256xf32> -> !torch.vtensor<[4,33,256],f32> // CHECK: return %[[T17]] : !torch.vtensor<[4,33,256],f32> @@ -311,7 +311,7 @@ func.func @torch.aten.slice.none.static$slice_like(%arg0: !torch.vtensor<[4,65,2 // CHECK: %[[T6:.*]] = arith.index_cast %[[T5]] : i64 to index // CHECK: %[[T7:.*]] = tensor.from_elements %[[T2]], %[[T3]] : tensor<2xi64> // CHECK: %[[T8:.*]] = mhlo.compute_reshape_shape %[[T6]], %[[T7]] : index, tensor<2xi64> -> tensor<2xi64> -// CHECK: %[[T9:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T8]]) : (tensor, tensor<2xi64>) -> tensor +// CHECK: %[[T9:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T8]] : (tensor, tensor<2xi64>) -> tensor // CHECK: %[[T10:.*]] = torch_c.from_builtin_tensor %[[T9]] : tensor -> !torch.vtensor<[?,224],f32> // CHECK: return %[[T10]] : !torch.vtensor<[?,224],f32> func.func @torch.aten.view$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,224],f32> { @@ -342,7 +342,7 @@ func.func @torch.aten.view$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch // CHECK: %[[T10:.*]] = arith.index_cast %[[T9]] : i64 to index // CHECK: %[[T11:.*]] = tensor.from_elements %[[T2]], %[[T3]], %[[T4]], %[[T5]] : tensor<4xi64> // CHECK: %[[T12:.*]] = mhlo.compute_reshape_shape %[[T10]], %[[T11]] : index, tensor<4xi64> -> tensor<4xi64> -// CHECK: %[[T13:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T12]]) : (tensor, tensor<4xi64>) -> tensor +// CHECK: %[[T13:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T12]] : (tensor, tensor<4xi64>) -> tensor // CHECK: %[[T14:.*]] = torch_c.from_builtin_tensor %[[T13]] : tensor -> !torch.vtensor<[?,120,4,64],f32> // CHECK: return %[[T14]] : !torch.vtensor<[?,120,4,64],f32> func.func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?,?],f32>) -> !torch.vtensor<[?,120,4,64],f32> { @@ -382,7 +382,7 @@ func.func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?,?],f32>) -> ! // CHECK: %[[T10:.*]] = arith.index_cast %[[T9]] : i64 to index // CHECK: %[[T11:.*]] = tensor.from_elements %[[T4]], %[[T5]], %[[T6]] : tensor<3xi64> // CHECK: %[[T12:.*]] = mhlo.compute_reshape_shape %[[T10]], %[[T11]] : index, tensor<3xi64> -> tensor<3xi64> -// CHECK: %[[T13:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T12]]) : (tensor<2x3x?x?xf32>, tensor<3xi64>) -> tensor<2x3x?xf32> +// CHECK: %[[T13:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T12]] : (tensor<2x3x?x?xf32>, tensor<3xi64>) -> tensor<2x3x?xf32> // CHECK: %[[T14:.*]] = torch_c.from_builtin_tensor %[[T13]] : tensor<2x3x?xf32> -> !torch.vtensor<[2,3,?],f32> // CHECK: return %[[T14]] : !torch.vtensor<[2,3,?],f32> func.func @torch.aten.view$minus1(%arg0: !torch.vtensor<[2,3,?,?],f32>) -> !torch.vtensor<[2,3,?],f32> { @@ -401,7 +401,7 @@ func.func @torch.aten.view$minus1(%arg0: !torch.vtensor<[2,3,?,?],f32>) -> !torc // CHECK: %[[T0:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[],f32> -> tensor // CHECK: %[[INT1:.*]] = torch.constant.int 1 // CHECK: %[[T1:.*]] = torch.prim.ListConstruct %[[INT1]] : (!torch.int) -> !torch.list -// CHECK: %[[T2:.*]] = "mhlo.reshape"(%[[T0]]) : (tensor) -> tensor<1xf32> +// CHECK: %[[T2:.*]] = mhlo.reshape %[[T0]] : (tensor) -> tensor<1xf32> // CHECK: %[[T3:.*]] = torch_c.from_builtin_tensor %[[T2]] : tensor<1xf32> -> !torch.vtensor<[1],f32> // CHECK: return %[[T3]] : !torch.vtensor<[1],f32> func.func @torch.aten.view$to_rank1(%arg0: !torch.vtensor<[],f32>) -> !torch.vtensor<[1],f32> { @@ -414,7 +414,7 @@ func.func @torch.aten.view$to_rank1(%arg0: !torch.vtensor<[],f32>) -> !torch.vte // CHECK-SAME: %[[ARG0:.*]]: !torch.vtensor<[1],f32>) -> !torch.vtensor<[],f32> { // CHECK: %[[T0:.*]] = torch_c.to_builtin_tensor %[[ARG0]] : !torch.vtensor<[1],f32> -> tensor<1xf32> // CHECK: %[[T1:.*]] = torch.prim.ListConstruct : () -> !torch.list -// CHECK: %[[T2:.*]] = "mhlo.reshape"(%[[T0]]) : (tensor<1xf32>) -> tensor +// CHECK: %[[T2:.*]] = mhlo.reshape %[[T0]] : (tensor<1xf32>) -> tensor // CHECK: %[[T3:.*]] = torch_c.from_builtin_tensor %[[T2]] : tensor -> !torch.vtensor<[],f32> // CHECK: return %[[T3]] : !torch.vtensor<[],f32> func.func @torch.aten.view$to_rank0(%arg0: !torch.vtensor<[1],f32>) -> !torch.vtensor<[],f32> { @@ -451,7 +451,7 @@ func.func @torch.aten.squeeze.dim$0$static(%arg0: !torch.vtensor<[2,1,2,1,2],f32 // CHECK: %[[T7:.*]] = tensor.dim %[[T0]], %[[C4]] : tensor // CHECK: %[[T8:.*]] = arith.index_cast %[[T7]] : index to i64 // CHECK: %[[T9:.*]] = tensor.from_elements %[[T2]], %[[T4]], %[[T6]], %[[T8]] : tensor<4xi64> -// CHECK: %[[T10:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T9]]) : (tensor, tensor<4xi64>) -> tensor +// CHECK: %[[T10:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T9]] : (tensor, tensor<4xi64>) -> tensor // CHECK: %[[T11:.*]] = torch_c.from_builtin_tensor %[[T10]] : tensor -> !torch.vtensor<[?,?,1,?],f32> // CHECK: return %[[T11]] : !torch.vtensor<[?,?,1,?],f32> func.func @torch.aten.squeeze.dim$1(%arg0: !torch.vtensor<[?,1,?,1,?],f32>) -> !torch.vtensor<[?,?,1,?],f32> { @@ -477,7 +477,7 @@ func.func @torch.aten.squeeze.dim$1(%arg0: !torch.vtensor<[?,1,?,1,?],f32>) -> ! // CHECK: %[[T7:.*]] = tensor.dim %[[T0]], %[[C4]] : tensor // CHECK: %[[T8:.*]] = arith.index_cast %[[T7]] : index to i64 // CHECK: %[[T9:.*]] = tensor.from_elements %[[T2]], %[[T4]], %[[T6]], %[[T8]] : tensor<4xi64> -// CHECK: %[[T10:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T9]]) : (tensor, tensor<4xi64>) -> tensor +// CHECK: %[[T10:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T9]] : (tensor, tensor<4xi64>) -> tensor // CHECK: %[[T11:.*]] = torch_c.from_builtin_tensor %[[T10]] : tensor -> !torch.vtensor<[?,1,?,?],f32> // CHECK: return %[[T11]] : !torch.vtensor<[?,1,?,?],f32> func.func @torch.aten.squeeze.dim$from_end(%arg0: !torch.vtensor<[?,1,?,1,?],f32>) -> !torch.vtensor<[?,1,?,?],f32> { @@ -499,7 +499,7 @@ func.func @torch.aten.squeeze.dim$from_end(%arg0: !torch.vtensor<[?,1,?,1,?],f32 // CHECK: %[[T5:.*]] = tensor.dim %[[T0]], %[[C4]] : tensor<2x1x2x1x2xf32> // CHECK: %[[T6:.*]] = arith.index_cast %[[T5]] : index to i64 // CHECK: %[[T7:.*]] = tensor.from_elements %[[T2]], %[[T4]], %[[T6]] : tensor<3xi64> -// CHECK: %[[T8:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T7]]) : (tensor<2x1x2x1x2xf32>, tensor<3xi64>) -> tensor<2x2x2xf32> +// CHECK: %[[T8:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T7]] : (tensor<2x1x2x1x2xf32>, tensor<3xi64>) -> tensor<2x2x2xf32> // CHECK: %[[T9:.*]] = torch_c.from_builtin_tensor %[[T8]] : tensor<2x2x2xf32> -> !torch.vtensor<[2,2,2],f32> // CHECK: return %[[T9]] : !torch.vtensor<[2,2,2],f32> func.func @torch.aten.squeeze$static(%arg0: !torch.vtensor<[2,1,2,1,2],f32>) -> !torch.vtensor<[2,2,2],f32> { @@ -525,7 +525,7 @@ func.func @torch.aten.squeeze$static(%arg0: !torch.vtensor<[2,1,2,1,2],f32>) -> // CHECK: %[[T8:.*]] = arith.index_cast %[[T7]] : index to i64 // CHECK: %[[C1_I64:.*]] = arith.constant 1 : i64 // CHECK: %[[T9:.*]] = tensor.from_elements %[[C1_I64]], %[[T2]], %[[T4]], %[[T6]], %[[T8]] : tensor<5xi64> -// CHECK: %[[T10:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T9]]) : (tensor, tensor<5xi64>) -> tensor<1x?x?x?x?xf32> +// CHECK: %[[T10:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T9]] : (tensor, tensor<5xi64>) -> tensor<1x?x?x?x?xf32> // CHECK: %[[T11:.*]] = torch_c.from_builtin_tensor %[[T10]] : tensor<1x?x?x?x?xf32> -> !torch.vtensor<[1,?,?,?,?],f32> // CHECK: return %[[T11]] : !torch.vtensor<[1,?,?,?,?],f32> func.func @torch.aten.unsqueeze$dim$0(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[1,?,?,?,?],f32> { @@ -552,7 +552,7 @@ func.func @torch.aten.unsqueeze$dim$0(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> ! // CHECK: %[[T8:.*]] = arith.index_cast %[[T7]] : index to i64 // CHECK: %[[C1_I64:.*]] = arith.constant 1 : i64 // CHECK: %[[T9:.*]] = tensor.from_elements %[[T2]], %[[C1_I64]], %[[T4]], %[[T6]], %[[T8]] : tensor<5xi64> -// CHECK: %[[T10:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T9]]) : (tensor, tensor<5xi64>) -> tensor +// CHECK: %[[T10:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T9]] : (tensor, tensor<5xi64>) -> tensor // CHECK: %[[T11:.*]] = torch_c.from_builtin_tensor %[[T10]] : tensor -> !torch.vtensor<[?,1,?,?,?],f32> // CHECK: return %[[T11]] : !torch.vtensor<[?,1,?,?,?],f32> func.func @torch.aten.unsqueeze$dim$1(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,1,?,?,?],f32> { @@ -579,7 +579,7 @@ func.func @torch.aten.unsqueeze$dim$1(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> ! // CHECK: %[[T8:.*]] = arith.index_cast %[[T7]] : index to i64 // CHECK: %[[C1_I64:.*]] = arith.constant 1 : i64 // CHECK: %[[T9:.*]] = tensor.from_elements %[[T2]], %[[T4]], %[[T6]], %[[C1_I64]], %[[T8]] : tensor<5xi64> -// CHECK: %[[T10:.*]] = "mhlo.dynamic_reshape"(%[[T0]], %[[T9]]) : (tensor, tensor<5xi64>) -> tensor +// CHECK: %[[T10:.*]] = mhlo.dynamic_reshape %[[T0]], %[[T9]] : (tensor, tensor<5xi64>) -> tensor // CHECK: %[[T11:.*]] = torch_c.from_builtin_tensor %[[T10]] : tensor -> !torch.vtensor<[?,?,?,1,?],f32> // CHECK: return %[[T11]] : !torch.vtensor<[?,?,?,1,?],f32> func.func @torch.aten.unsqueeze$from_end(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,?,?,1,?],f32> {