[NFC reformat] Run pre-commit on all files and format misc.

This is part 1 of ~3, formatting all miscellaneous text files and CPP files matched by a first run of pre-commit. These tend to be low change-traffic and are likely not disruptive.

Subsequent patches will format Python files and remaining CPP files.
pull/3244/head
Stella Laurenzo 2024-04-27 14:08:09 -07:00
parent 6679728c56
commit 5d4b803914
40 changed files with 99 additions and 113 deletions

View File

@ -247,4 +247,4 @@ add_subdirectory(projects)
# Finish with top-level Python bindings so it can handle additional deps. # Finish with top-level Python bindings so it can handle additional deps.
if(MLIR_ENABLE_BINDINGS_PYTHON) if(MLIR_ENABLE_BINDINGS_PYTHON)
add_subdirectory(python) add_subdirectory(python)
endif() endif()

View File

@ -30,7 +30,7 @@ echo "::endgroup::"
case $torch_version in case $torch_version in
nightly) nightly)
# Failing with: NotImplementedError: # Failing with: NotImplementedError:
# Could not run 'aten::empty.memory_format' with arguments from the 'Lazy' backend. # Could not run 'aten::empty.memory_format' with arguments from the 'Lazy' backend.
# As of 2024-01-07 # As of 2024-01-07
# echo "::group::Run Lazy Tensor Core e2e integration tests" # echo "::group::Run Lazy Tensor Core e2e integration tests"

View File

@ -282,7 +282,7 @@ function _check_file_not_changed_by() {
function test_in_tree() { function test_in_tree() {
local torch_version="$1" local torch_version="$1"
echo ":::: Test in-tree" echo ":::: Test in-tree"
cmake --build /main_checkout/torch-mlir/build --target check-torch-mlir-all cmake --build /main_checkout/torch-mlir/build --target check-torch-mlir-all

View File

@ -140,4 +140,3 @@ torch-mlir's representation:
* `ConstantOfShape`: Mapped to `torch.vtensor.literal` with * `ConstantOfShape`: Mapped to `torch.vtensor.literal` with
a corresponding `value` attribute. a corresponding `value` attribute.

View File

@ -277,4 +277,3 @@ directly provided a way to plug into this.
Additionally, we can leverage the [`pytorch-jit-paritybench`](https://github.com/jansel/pytorch-jit-paritybench) Additionally, we can leverage the [`pytorch-jit-paritybench`](https://github.com/jansel/pytorch-jit-paritybench)
to verify our end-to-end correctness on real models. to verify our end-to-end correctness on real models.

View File

@ -1,2 +1,2 @@
add_subdirectory(torch-mlir) add_subdirectory(torch-mlir)
add_subdirectory(torch-mlir-dialects) add_subdirectory(torch-mlir-dialects)

View File

@ -756,12 +756,12 @@ def Torch_ConstantNumberOp : Torch_Op<"constant.number",
[ConstantLike, Pure]> { [ConstantLike, Pure]> {
let summary = "Materialize a constant `number` value."; let summary = "Materialize a constant `number` value.";
let description = [{ let description = [{
This op is used as a workaround to the fact that the constant This op is used as a workaround to the fact that the constant
materialization in MLIR must materialize a constant with a single op. materialization in MLIR must materialize a constant with a single op.
To materialize ops with a static `!torch.number` type, we must use this op, To materialize ops with a static `!torch.number` type, we must use this op,
even though we statically know if it is an integer or a float. even though we statically know if it is an integer or a float.
Note: This op unconditionally canonicalizes to Note: This op unconditionally canonicalizes to
`torch.constant.{float,int}` + `torch.derefine` `torch.constant.{float,int}` + `torch.derefine`
}]; }];
let arguments = (ins let arguments = (ins
@ -846,7 +846,7 @@ def Torch_OperatorOp : Torch_Op<"operator", [
let regions = (region VariadicRegion<AnyRegion>:$regions); let regions = (region VariadicRegion<AnyRegion>:$regions);
let assemblyFormat = [{ let assemblyFormat = [{
$name `(` $operands `)` attr-dict `:` functional-type($operands, $results) $regions $name `(` $operands `)` attr-dict `:` functional-type($operands, $results) $regions
}]; }];
} }
@ -1146,10 +1146,10 @@ def Torch_PromoteDtypesOp: Torch_Op<"promote_dtypes", [
let assemblyFormat = "$ranks `,` $dtypes attr-dict `:` functional-type(operands, results)"; let assemblyFormat = "$ranks `,` $dtypes attr-dict `:` functional-type(operands, results)";
} }
// To handle runtime assertions, torchscript provides us `torch._assert` operation. // To handle runtime assertions, torchscript provides us `torch._assert` operation.
// But TS compiler introduces control flow for `torch._assert` operation. The // But TS compiler introduces control flow for `torch._assert` operation. The
// `torch._assert` would introduce control flow like: // `torch._assert` would introduce control flow like:
// //
// %cond = "torch.aten.Bool.Tensor"(%0) : (!torch.tensor) -> !torch.bool // %cond = "torch.aten.Bool.Tensor"(%0) : (!torch.tensor) -> !torch.bool
// "torch.prim.If"(%cond) ({ // "torch.prim.If"(%cond) ({
// "torch.prim.If.yield"() : () -> () // "torch.prim.If.yield"() : () -> ()

View File

@ -369,7 +369,7 @@ def LowerToBackendContract
to the backend contract. This pass does not do any global program to the backend contract. This pass does not do any global program
restructuring -- it works entirely within a single semantic model restructuring -- it works entirely within a single semantic model
of a `builtin.module` with `torch.global_slot` ops and `func.func` ops. of a `builtin.module` with `torch.global_slot` ops and `func.func` ops.
This pass runs a set of simplifications within that semantic model until This pass runs a set of simplifications within that semantic model until
the backend contract is satisfied, and fails if it cannot be satisfied. the backend contract is satisfied, and fails if it cannot be satisfied.
In particular, the backend contract consists of: In particular, the backend contract consists of:

View File

@ -628,42 +628,39 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
binder.op, resultType, operand); binder.op, resultType, operand);
return success(); return success();
}); });
patterns.onOp("Not", 1, patterns.onOp(
[](OpBinder binder, ConversionPatternRewriter &rewriter) { "Not", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType; Torch::ValueTensorType resultType;
Value operand; Value operand;
if (binder.tensorOperand(operand) || if (binder.tensorOperand(operand) ||
binder.tensorResultType(resultType)) { binder.tensorResultType(resultType)) {
return failure(); return failure();
} }
auto loc = binder.getLoc(); auto loc = binder.getLoc();
auto operandTy = auto operandTy = cast<Torch::ValueTensorType>(operand.getType());
cast<Torch::ValueTensorType>(operand.getType()); auto eTy = operandTy.getDtype();
auto eTy = operandTy.getDtype();
if (!eTy.isInteger(1)) { if (!eTy.isInteger(1)) {
auto i1ty = rewriter.getI1Type(); auto i1ty = rewriter.getI1Type();
auto ty = rewriter.getType<Torch::ValueTensorType>( auto ty = rewriter.getType<Torch::ValueTensorType>(
operandTy.getSizes(), i1ty); operandTy.getSizes(), i1ty);
auto torchqTy = Torch::getScalarTypeForType(i1ty); auto torchqTy = Torch::getScalarTypeForType(i1ty);
Value tyConst = rewriter.create<Torch::ConstantIntOp>( Value tyConst = rewriter.create<Torch::ConstantIntOp>(
binder.getLoc(), rewriter.getType<Torch::IntType>(), binder.getLoc(), rewriter.getType<Torch::IntType>(),
rewriter.getIntegerAttr( rewriter.getIntegerAttr(rewriter.getIntegerType(64),
rewriter.getIntegerType(64), static_cast<int64_t>(torchqTy)));
static_cast<int64_t>(torchqTy))); Value none = rewriter.create<Torch::ConstantNoneOp>(loc);
Value none = rewriter.create<Torch::ConstantNoneOp>(loc); Value cstFalse = rewriter.create<Torch::ConstantBoolOp>(loc, false);
Value cstFalse = operand = rewriter.create<Torch::AtenToDtypeOp>(
rewriter.create<Torch::ConstantBoolOp>(loc, false); loc, ty, operand, tyConst,
operand = rewriter.create<Torch::AtenToDtypeOp>( /*non_blocking=*/cstFalse, /*copy=*/cstFalse,
loc, ty, operand, tyConst, /*memory_format=*/none);
/*non_blocking=*/cstFalse, /*copy=*/cstFalse, }
/*memory_format=*/none); rewriter.replaceOpWithNewOp<Torch::AtenBitwiseNotOp>(
} binder.op, resultType, operand);
rewriter.replaceOpWithNewOp<Torch::AtenBitwiseNotOp>( return success();
binder.op, resultType, operand); });
return success();
});
patterns.onOp("Or", 1, patterns.onOp("Or", 1,
[](OpBinder binder, ConversionPatternRewriter &rewriter) { [](OpBinder binder, ConversionPatternRewriter &rewriter) {
Torch::ValueTensorType resultType; Torch::ValueTensorType resultType;

View File

@ -189,9 +189,8 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
do_bcast = true; do_bcast = true;
} else { } else {
op->emitError("The size of tensor a (") op->emitError("The size of tensor a (")
<< inDim << ")" << inDim << ")" << "must match the size of tensor b (" << outDim
<< "must match the size of tensor b (" << outDim << ")" << ")" << "at non-singleton dimension " << inPos;
<< "at non-singleton dimension " << inPos;
} }
} }
std::reverse(bcastDims.begin(), bcastDims.end()); std::reverse(bcastDims.begin(), bcastDims.end());

View File

@ -287,19 +287,19 @@ static LogicalResult checkValidityOfCast(Type src, Type dest) {
(src.isInteger(1) && dest.isInteger(64)) || (src.isInteger(1) && dest.isInteger(64)) ||
(src.isInteger(1) && dest.isF32()) || (src.isInteger(1) && dest.isF32()) ||
// f64 -> * // f64 -> *
(src.isF64() && dest.isF32()) || (src.isF64() && dest.isF32()) ||
(src.isF64() && dest.isBF16()) || (src.isF64() && dest.isBF16()) ||
// f32 -> * // f32 -> *
(src.isF32() && dest.isF64()) || (src.isF32() && dest.isF64()) ||
(src.isF32() && dest.isBF16()) || (src.isF32() && dest.isBF16()) ||
(src.isF32() && dest.isF16()) || (src.isF32() && dest.isF16()) ||
(src.isF32() && dest.isInteger(8)) || (src.isF32() && dest.isInteger(8)) ||
(src.isF32() && dest.isInteger(64)) || (src.isF32() && dest.isInteger(64)) ||
(src.isF32() && dest.isInteger(1)) || (src.isF32() && dest.isInteger(1)) ||
// bf16 -> * // bf16 -> *
(src.isBF16() && dest.isInteger(8)) || (src.isBF16() && dest.isInteger(8)) ||
(src.isBF16() && dest.isInteger(16)) || (src.isBF16() && dest.isInteger(16)) ||
(src.isBF16() && dest.isInteger(32)) || (src.isBF16() && dest.isInteger(32)) ||
(src.isBF16() && dest.isF32())) { (src.isBF16() && dest.isF32())) {
return success(); return success();
} }

View File

@ -22,4 +22,4 @@ add_mlir_library(TorchMLIRTMTensorPasses
MLIRTransforms MLIRTransforms
) )
torch_mlir_target_includes(TorchMLIRTMTensorPasses) torch_mlir_target_includes(TorchMLIRTMTensorPasses)

View File

@ -305,8 +305,7 @@ public:
return signalPassFailure(); return signalPassFailure();
} while (!satisfiesBackendContract(module, target)); } while (!satisfiesBackendContract(module, target));
LLVM_DEBUG({ LLVM_DEBUG({
llvm::dbgs() << "LowerToBackendContractPass: " llvm::dbgs() << "LowerToBackendContractPass: " << "succeeded after " << i
<< "succeeded after " << i
<< " iterations of the simplification pipeline\n"; << " iterations of the simplification pipeline\n";
}); });
} }

View File

@ -21,7 +21,7 @@ endif()
add_mlir_library(TorchMLIRTorchConversionPasses add_mlir_library(TorchMLIRTorchConversionPasses
BackendTypeConversion.cpp BackendTypeConversion.cpp
BackendTypeConversionPasses.cpp BackendTypeConversionPasses.cpp
Passes.cpp Passes.cpp
ConvertCustomQuantOp.cpp ConvertCustomQuantOp.cpp
UnpackQuantTensor.cpp UnpackQuantTensor.cpp

View File

@ -44,16 +44,16 @@ if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER OR TORCH_MLIR_ENABLE_LTC)
message(FATAL_ERROR "Without TORCH_MLIR_USE_INSTALLED_PYTORCH, expected to find Torch configuration at ${Torch_DIR}, which does not exist") message(FATAL_ERROR "Without TORCH_MLIR_USE_INSTALLED_PYTORCH, expected to find Torch configuration at ${Torch_DIR}, which does not exist")
endif() endif()
endif() endif()
find_package(Torch 1.11 REQUIRED) find_package(Torch 1.11 REQUIRED)
set(TORCHGEN_DIR ${Torch_ROOT}/../../../torchgen) set(TORCHGEN_DIR ${Torch_ROOT}/../../../torchgen)
include_directories(BEFORE include_directories(BEFORE
${TORCH_INCLUDE_DIRS} ${TORCH_INCLUDE_DIRS}
${Python3_INCLUDE_DIRS} ${Python3_INCLUDE_DIRS}
) )
link_directories("${TORCH_INSTALL_PREFIX}/lib") link_directories("${TORCH_INSTALL_PREFIX}/lib")
message(STATUS "TORCH_CXXFLAGS is = ${TORCH_CXXFLAGS}") message(STATUS "TORCH_CXXFLAGS is = ${TORCH_CXXFLAGS}")
if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND NOT TORCH_CXXFLAGS) if(${CMAKE_SYSTEM_NAME} STREQUAL "Linux" AND NOT TORCH_CXXFLAGS)
message(WARNING message(WARNING

View File

@ -713,4 +713,4 @@ at::Tensor &LazyNativeFunctions::logsumexp_out(const at::Tensor &self,
void InitializeAtenBindings() {} void InitializeAtenBindings() {}
} // namespace lazy } // namespace lazy
} // namespace torch } // namespace torch

View File

@ -34,4 +34,4 @@ public:
}; };
} // namespace lazy } // namespace lazy
} // namespace torch } // namespace torch

View File

@ -56,7 +56,7 @@ endif()
# Can we build the JIT IR importer with `declare_mlir_python_extension`? # Can we build the JIT IR importer with `declare_mlir_python_extension`?
# Then it would "just work". # Then it would "just work".
if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER) if(TORCH_MLIR_ENABLE_JIT_IR_IMPORTER)
add_dependencies(TorchMLIRPythonTorchExtensionsSources add_dependencies(TorchMLIRPythonTorchExtensionsSources
TorchMLIRJITIRImporter TorchMLIRJITIRImporter
TorchMLIRJITIRImporterPybind TorchMLIRJITIRImporterPybind
TorchMLIRE2ETestPythonModules TorchMLIRE2ETestPythonModules
@ -65,7 +65,7 @@ endif()
if(TORCH_MLIR_ENABLE_LTC) if(TORCH_MLIR_ENABLE_LTC)
# Add Torch-MLIR LTC backend as dependency # Add Torch-MLIR LTC backend as dependency
add_dependencies(TorchMLIRPythonTorchExtensionsSources add_dependencies(TorchMLIRPythonTorchExtensionsSources
torch_mlir_ltc_backend torch_mlir_ltc_backend
reference_lazy_backend reference_lazy_backend
) )

View File

@ -28,4 +28,3 @@ set_target_properties(torch_mlir_custom_op_example PROPERTIES
) )
torch_mlir_python_target_compile_options(torch_mlir_custom_op_example) torch_mlir_python_target_compile_options(torch_mlir_custom_op_example)
mlir_check_all_link_libraries(torch_mlir_custom_op_example) mlir_check_all_link_libraries(torch_mlir_custom_op_example)

View File

@ -13,7 +13,7 @@ configure_lit_site_cfg(
set(TORCH_MLIR_TEST_DEPENDS set(TORCH_MLIR_TEST_DEPENDS
FileCheck count not FileCheck count not
TorchMLIRPythonModules TorchMLIRPythonModules
torch-mlir-opt torch-mlir-opt
torch-mlir-capi-torch-test torch-mlir-capi-torch-test
) )

View File

@ -1 +1 @@
config.suffixes.add('.c') config.suffixes.add('.c')

View File

@ -36,7 +36,7 @@ static void testTensor(MlirContext ctx, intptr_t numSizes, int64_t *sizes,
fprintf(stderr, #TTT "Type %s rank: %zu\n", testName, \ fprintf(stderr, #TTT "Type %s rank: %zu\n", testName, \
torchMlirTorch##TTT##TypeGetRank(TTT##Type)); \ torchMlirTorch##TTT##TypeGetRank(TTT##Type)); \
int64_t *TTT##Sizes = malloc(sizeof(int64_t) * numSizes); \ int64_t *TTT##Sizes = malloc(sizeof(int64_t) * numSizes); \
torchMlirTorch##TTT##TypeGetSizes(TTT##Type, TTT##Sizes); \ torchMlirTorch##TTT##TypeGetSizes(TTT##Type, TTT##Sizes); \
for (int i = 0; i < numSizes; ++i) { \ for (int i = 0; i < numSizes; ++i) { \
fprintf(stderr, #TTT "Type %s pos %d size: %ld\n", testName, i, \ fprintf(stderr, #TTT "Type %s pos %d size: %ld\n", testName, i, \
TTT##Sizes[i]); \ TTT##Sizes[i]); \
@ -157,22 +157,26 @@ static void testTypeMetaDataAccessors(MlirContext ctx) {
MlirType dictType1 = torchMlirTorchDictTypeGet(strType, floatType); MlirType dictType1 = torchMlirTorchDictTypeGet(strType, floatType);
fprintf(stderr, "dict keyType: "); fprintf(stderr, "dict keyType: ");
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType1), printToStderr, NULL); mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType1), printToStderr,
NULL);
fprintf(stderr, "\n"); fprintf(stderr, "\n");
// CHECK: dict keyType: !torch.str // CHECK: dict keyType: !torch.str
fprintf(stderr, "dict valueType: "); fprintf(stderr, "dict valueType: ");
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType1), printToStderr, NULL); mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType1), printToStderr,
NULL);
fprintf(stderr, "\n"); fprintf(stderr, "\n");
// CHECK: dict valueType: !torch.float // CHECK: dict valueType: !torch.float
MlirType dictType2 = torchMlirTorchDictTypeGet(floatType, strType); MlirType dictType2 = torchMlirTorchDictTypeGet(floatType, strType);
fprintf(stderr, "dict keyType: "); fprintf(stderr, "dict keyType: ");
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType2), printToStderr, NULL); mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType2), printToStderr,
NULL);
fprintf(stderr, "\n"); fprintf(stderr, "\n");
// CHECK: dict keyType: !torch.float // CHECK: dict keyType: !torch.float
fprintf(stderr, "dict valueType: "); fprintf(stderr, "dict valueType: ");
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType2), printToStderr, NULL); mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType2), printToStderr,
NULL);
fprintf(stderr, "\n"); fprintf(stderr, "\n");
// CHECK: dict valueType: !torch.str // CHECK: dict valueType: !torch.str
} }

View File

@ -14,7 +14,7 @@ configure_lit_site_cfg(
set(TORCH_MLIR_TEST_DEPENDS set(TORCH_MLIR_TEST_DEPENDS
FileCheck count not FileCheck count not
TorchMLIRPythonModules TorchMLIRPythonModules
torch-mlir-opt torch-mlir-opt
torch-mlir-capi-torch-test torch-mlir-capi-torch-test
) )

View File

@ -86,7 +86,7 @@ func.func @test_argmax_negative_axis_keepdims_random_select_last_index(%arg0: !t
// CHECK: %[[C1:.*]] = torch.constant.int 1 // CHECK: %[[C1:.*]] = torch.constant.int 1
// CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMAX]], %[[C3]], %[[C1]] : !torch.vtensor<[2,3,1],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,3,1],si64> // CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMAX]], %[[C3]], %[[C1]] : !torch.vtensor<[2,3,1],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,3,1],si64>
// CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,3,1],si64> -> !torch.vtensor<[2,3,1],si64> // CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,3,1],si64> -> !torch.vtensor<[2,3,1],si64>
%0 = torch.operator "onnx.ArgMax"(%arg0) {torch.onnx.axis = -1 : si64, torch.onnx.keepdims = 1 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1],si64> %0 = torch.operator "onnx.ArgMax"(%arg0) {torch.onnx.axis = -1 : si64, torch.onnx.keepdims = 1 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1],si64>
return %0 : !torch.vtensor<[2,3,1],si64> return %0 : !torch.vtensor<[2,3,1],si64>
} }
@ -115,7 +115,7 @@ func.func @test_argmax_no_keepdims_random_select_last_index(%arg0: !torch.vtenso
// CHECK: %[[C1_1:.*]] = torch.constant.int 1 // CHECK: %[[C1_1:.*]] = torch.constant.int 1
// CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMAX]], %[[C2]], %[[C1_1]] : !torch.vtensor<[2,4],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,4],si64> // CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMAX]], %[[C2]], %[[C1_1]] : !torch.vtensor<[2,4],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,4],si64>
// CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,4],si64> -> !torch.vtensor<[2,4],si64> // CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,4],si64> -> !torch.vtensor<[2,4],si64>
%0 = torch.operator "onnx.ArgMax"(%arg0) {torch.onnx.axis = 1 : si64, torch.onnx.keepdims = 0 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,4],si64> %0 = torch.operator "onnx.ArgMax"(%arg0) {torch.onnx.axis = 1 : si64, torch.onnx.keepdims = 0 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,4],si64>
return %0 : !torch.vtensor<[2,4],si64> return %0 : !torch.vtensor<[2,4],si64>
} }
@ -155,7 +155,7 @@ func.func @test_argmin_negative_axis_keepdims_random_select_last_index(%arg0: !t
// CHECK: %[[C1:.*]] = torch.constant.int 1 // CHECK: %[[C1:.*]] = torch.constant.int 1
// CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMIN]], %[[C3]], %[[C1]] : !torch.vtensor<[2,3,1],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,3,1],si64> // CHECK: %[[SUB:.*]] = torch.aten.sub.Scalar %[[ARGMIN]], %[[C3]], %[[C1]] : !torch.vtensor<[2,3,1],si64>, !torch.int, !torch.int -> !torch.vtensor<[2,3,1],si64>
// CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,3,1],si64> -> !torch.vtensor<[2,3,1],si64> // CHECK: %[[ABS:.*]] = torch.aten.abs %[[SUB]] : !torch.vtensor<[2,3,1],si64> -> !torch.vtensor<[2,3,1],si64>
%0 = torch.operator "onnx.ArgMin"(%arg0) {torch.onnx.axis = -1 : si64, torch.onnx.keepdims = 1 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1],si64> %0 = torch.operator "onnx.ArgMin"(%arg0) {torch.onnx.axis = -1 : si64, torch.onnx.keepdims = 1 : si64, torch.onnx.select_last_index = 1 : si64} : (!torch.vtensor<[2,3,4],f32>) -> !torch.vtensor<[2,3,1],si64>
return %0 : !torch.vtensor<[2,3,1],si64> return %0 : !torch.vtensor<[2,3,1],si64>
} }
@ -851,7 +851,7 @@ func.func @test_dynamicquantizelinear(%arg0: !torch.vtensor<[3,4,5],f32>) -> (!t
// CHECK: %[[SCALE:.*]] = torch.aten.item %[[SCALE_T]] : !torch.vtensor<[],f32> -> !torch.float // CHECK: %[[SCALE:.*]] = torch.aten.item %[[SCALE_T]] : !torch.vtensor<[],f32> -> !torch.float
// CHECK: %[[QUANT:.*]] = torch.aten.quantize_per_tensor %arg0, %[[SCALE]], %[[ZP]], %[[CI13]] : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[3,4,5],!torch.quint8> // CHECK: %[[QUANT:.*]] = torch.aten.quantize_per_tensor %arg0, %[[SCALE]], %[[ZP]], %[[CI13]] : !torch.vtensor<[3,4,5],f32>, !torch.float, !torch.int, !torch.int -> !torch.vtensor<[3,4,5],!torch.quint8>
// CHECK: %[[INTQUANT:.*]] = torch.aten.int_repr %[[QUANT]] : !torch.vtensor<[3,4,5],!torch.quint8> -> !torch.vtensor<[3,4,5],ui8> // CHECK: %[[INTQUANT:.*]] = torch.aten.int_repr %[[QUANT]] : !torch.vtensor<[3,4,5],!torch.quint8> -> !torch.vtensor<[3,4,5],ui8>
%0:3 = torch.operator "onnx.DynamicQuantizeLinear"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> (!torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8>) %0:3 = torch.operator "onnx.DynamicQuantizeLinear"(%arg0) : (!torch.vtensor<[3,4,5],f32>) -> (!torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8>)
// CHECK: return %[[INTQUANT]], %[[SCALE_T]], %[[ZP_T]] : !torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8> // CHECK: return %[[INTQUANT]], %[[SCALE_T]], %[[ZP_T]] : !torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8>
return %0#0, %0#1, %0#2 : !torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8> return %0#0, %0#1, %0#2 : !torch.vtensor<[3,4,5],ui8>, !torch.vtensor<[],f32>, !torch.vtensor<[],ui8>
} }
@ -1035,7 +1035,7 @@ func.func @test_convinteger_without_padding(%arg0: !torch.vtensor<[1,1,3,3],ui8>
// CHECK: %[[WEIGHT:.*]] = torch.aten._make_per_tensor_quantized_tensor %arg1, %[[SCALE]], %[[WEIGHT_ZP]] : !torch.vtensor<[1,1,2,2],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,2,2],!torch.quint8> // CHECK: %[[WEIGHT:.*]] = torch.aten._make_per_tensor_quantized_tensor %arg1, %[[SCALE]], %[[WEIGHT_ZP]] : !torch.vtensor<[1,1,2,2],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,2,2],!torch.quint8>
// CHECK: torch.aten.convolution %[[INPUT]], %[[WEIGHT]], %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],!torch.quint8>, !torch.vtensor<[1,1,2,2],!torch.quint8>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1,2,2],si32> // CHECK: torch.aten.convolution %[[INPUT]], %[[WEIGHT]], %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],!torch.quint8>, !torch.vtensor<[1,1,2,2],!torch.quint8>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1,2,2],si32>
%none = torch.constant.none %none = torch.constant.none
%0 = torch.operator "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) : (!torch.vtensor<[1,1,3,3],ui8>, !torch.vtensor<[1,1,2,2],ui8>, !torch.vtensor<[],ui8>, !torch.vtensor<[1],ui8>) -> !torch.vtensor<[1,1,2,2],si32> %0 = torch.operator "onnx.ConvInteger"(%arg0, %arg1, %arg2, %arg3) : (!torch.vtensor<[1,1,3,3],ui8>, !torch.vtensor<[1,1,2,2],ui8>, !torch.vtensor<[],ui8>, !torch.vtensor<[1],ui8>) -> !torch.vtensor<[1,1,2,2],si32>
return %0 : !torch.vtensor<[1,1,2,2],si32> return %0 : !torch.vtensor<[1,1,2,2],si32>
} }
@ -1066,7 +1066,7 @@ func.func @test_convinteger_with_padding(%arg0: !torch.vtensor<[1,1,3,3],ui8>, %
// CHECK: %[[WEIGHT:.*]] = torch.aten._make_per_tensor_quantized_tensor %arg1, %[[SCALE]], %[[WEIGHT_ZP]] : !torch.vtensor<[1,1,2,2],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,2,2],!torch.quint8> // CHECK: %[[WEIGHT:.*]] = torch.aten._make_per_tensor_quantized_tensor %arg1, %[[SCALE]], %[[WEIGHT_ZP]] : !torch.vtensor<[1,1,2,2],ui8>, !torch.float, !torch.int -> !torch.vtensor<[1,1,2,2],!torch.quint8>
// CHECK: torch.aten.convolution %[[INPUT]], %[[WEIGHT]], %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],!torch.quint8>, !torch.vtensor<[1,1,2,2],!torch.quint8>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1,4,4],si32> // CHECK: torch.aten.convolution %[[INPUT]], %[[WEIGHT]], %[[BIAS]], %[[STRIDE]], %[[PADDING]], %[[DILATIONS]], %[[TRANSPOSED]], %[[OUTPUT_PADDING]], %[[GROUPS]] : !torch.vtensor<[1,1,3,3],!torch.quint8>, !torch.vtensor<[1,1,2,2],!torch.quint8>, !torch.none, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,1,4,4],si32>
%none = torch.constant.none %none = torch.constant.none
%0 = torch.operator "onnx.ConvInteger"(%arg0, %arg1, %arg2) {torch.onnx.pads = [1 : si64, 1 : si64, 1 : si64, 1 : si64]} : (!torch.vtensor<[1,1,3,3],ui8>, !torch.vtensor<[1,1,2,2],ui8>, !torch.vtensor<[],ui8>) -> !torch.vtensor<[1,1,4,4],si32> %0 = torch.operator "onnx.ConvInteger"(%arg0, %arg1, %arg2) {torch.onnx.pads = [1 : si64, 1 : si64, 1 : si64, 1 : si64]} : (!torch.vtensor<[1,1,3,3],ui8>, !torch.vtensor<[1,1,2,2],ui8>, !torch.vtensor<[],ui8>) -> !torch.vtensor<[1,1,4,4],si32>
return %0 : !torch.vtensor<[1,1,4,4],si32> return %0 : !torch.vtensor<[1,1,4,4],si32>
} }
@ -1597,9 +1597,9 @@ func.func @dense_constant() -> () attributes {torch.onnx_meta.ir_version = 8 : s
// CHECK-LABEL: @dense_constant_i1 // CHECK-LABEL: @dense_constant_i1
func.func @dense_constant_i1() -> !torch.vtensor<[5],i1> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64} { func.func @dense_constant_i1() -> !torch.vtensor<[5],i1> attributes {torch.onnx_meta.ir_version = 8 : si64, torch.onnx_meta.opset_version = 17 : si64} {
// CHECK: %[[CST:.+]] = torch.vtensor.literal(dense<[true, false, false, true, true]> : tensor<5xi1>) : !torch.vtensor<[5],i1> // CHECK: %[[CST:.+]] = torch.vtensor.literal(dense<[true, false, false, true, true]> : tensor<5xi1>) : !torch.vtensor<[5],i1>
// CHECK: return %[[CST]] : !torch.vtensor<[5],i1> // CHECK: return %[[CST]] : !torch.vtensor<[5],i1>
%0 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_> : tensor<5xi1>} : () -> !torch.vtensor<[5],i1> %0 = torch.operator "onnx.Constant"() {torch.onnx.value = dense_resource<_> : tensor<5xi1>} : () -> !torch.vtensor<[5],i1>
return %0 : !torch.vtensor<[5],i1> return %0 : !torch.vtensor<[5],i1>
} }

View File

@ -782,7 +782,7 @@ func.func @test_mod_int64_no_fmod(%arg0: !torch.vtensor<[6],si64>, %arg1: !torch
// CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[NONE:.*]] = torch.constant.none
// CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %arg0, %[[CIM1]], %[[NONE]] : !torch.vtensor<[1,3],f32>, !torch.int, !torch.none -> !torch.vtensor<[1,3],f32> // CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %arg0, %[[CIM1]], %[[NONE]] : !torch.vtensor<[1,3],f32>, !torch.int, !torch.none -> !torch.vtensor<[1,3],f32>
// CHECK: return %[[LSM]] : !torch.vtensor<[1,3],f32> // CHECK: return %[[LSM]] : !torch.vtensor<[1,3],f32>
%0 = torch.operator "onnx.LogSoftmax"(%arg0) : (!torch.vtensor<[1,3],f32>) -> !torch.vtensor<[1,3],f32> %0 = torch.operator "onnx.LogSoftmax"(%arg0) : (!torch.vtensor<[1,3],f32>) -> !torch.vtensor<[1,3],f32>
return %0 : !torch.vtensor<[1,3],f32> return %0 : !torch.vtensor<[1,3],f32>
} }
@ -794,7 +794,7 @@ func.func @test_mod_int64_no_fmod(%arg0: !torch.vtensor<[6],si64>, %arg1: !torch
// CHECK: %[[NONE:.*]] = torch.constant.none // CHECK: %[[NONE:.*]] = torch.constant.none
// CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %arg0, %[[CI2]], %[[NONE]] : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,4,5],f32> // CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %arg0, %[[CI2]], %[[NONE]] : !torch.vtensor<[3,4,5],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,4,5],f32>
// CHECK: return %[[LSM]] : !torch.vtensor<[3,4,5],f32> // CHECK: return %[[LSM]] : !torch.vtensor<[3,4,5],f32>
%0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 2 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
return %0 : !torch.vtensor<[3,4,5],f32> return %0 : !torch.vtensor<[3,4,5],f32>
} }
@ -812,7 +812,7 @@ func.func @test_mod_int64_no_fmod(%arg0: !torch.vtensor<[6],si64>, %arg1: !torch
// CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %[[FLAT_IN]], %[[CI1]], %[[NONE]] : !torch.vtensor<[3,?],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,?],f32> // CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %[[FLAT_IN]], %[[CI1]], %[[NONE]] : !torch.vtensor<[3,?],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,?],f32>
// CHECK: %[[UNFLAT:.*]] = torch.aten.unflatten.int %[[LSM]], %[[CI1]], %[[LIST]] : !torch.vtensor<[3,?],f32>, !torch.int, !torch.list<int> -> !torch.vtensor<[3,4,?],f32> // CHECK: %[[UNFLAT:.*]] = torch.aten.unflatten.int %[[LSM]], %[[CI1]], %[[LIST]] : !torch.vtensor<[3,?],f32>, !torch.int, !torch.list<int> -> !torch.vtensor<[3,4,?],f32>
// CHECK: return %[[UNFLAT]] : !torch.vtensor<[3,4,?],f32> // CHECK: return %[[UNFLAT]] : !torch.vtensor<[3,4,?],f32>
%0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4,?],f32>) -> !torch.vtensor<[3,4,?],f32> %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4,?],f32>) -> !torch.vtensor<[3,4,?],f32>
return %0 : !torch.vtensor<[3,4,?],f32> return %0 : !torch.vtensor<[3,4,?],f32>
} }
@ -830,7 +830,7 @@ func.func @test_mod_int64_no_fmod(%arg0: !torch.vtensor<[6],si64>, %arg1: !torch
// CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %[[FLAT_IN]], %[[CI1]], %[[NONE]] : !torch.vtensor<[3,20],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,20],f32> // CHECK: %[[LSM:.*]] = torch.aten.log_softmax.int %[[FLAT_IN]], %[[CI1]], %[[NONE]] : !torch.vtensor<[3,20],f32>, !torch.int, !torch.none -> !torch.vtensor<[3,20],f32>
// CHECK: %[[UNFLAT:.*]] = torch.aten.unflatten.int %[[LSM]], %[[CI1]], %[[LIST]] : !torch.vtensor<[3,20],f32>, !torch.int, !torch.list<int> -> !torch.vtensor<[3,4,5],f32> // CHECK: %[[UNFLAT:.*]] = torch.aten.unflatten.int %[[LSM]], %[[CI1]], %[[LIST]] : !torch.vtensor<[3,20],f32>, !torch.int, !torch.list<int> -> !torch.vtensor<[3,4,5],f32>
// CHECK: return %[[UNFLAT]] : !torch.vtensor<[3,4,5],f32> // CHECK: return %[[UNFLAT]] : !torch.vtensor<[3,4,5],f32>
%0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32> %0 = torch.operator "onnx.LogSoftmax"(%arg0) {torch.onnx.axis = 1 : si64} : (!torch.vtensor<[3,4,5],f32>) -> !torch.vtensor<[3,4,5],f32>
return %0 : !torch.vtensor<[3,4,5],f32> return %0 : !torch.vtensor<[3,4,5],f32>
} }

View File

@ -1842,7 +1842,7 @@ func.func @test_random_normal() -> !torch.vtensor<[10],f32> attributes {torch.on
// CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00 // CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00
// CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00 // CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00
// CHECK: torch.aten.normal_functional %[[EMPTY_TENSOR]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32> // CHECK: torch.aten.normal_functional %[[EMPTY_TENSOR]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32>
%0 = torch.operator "onnx.RandomNormal"() {torch.onnx.dtype = 1 : si64, torch.onnx.mean = 0.000000e+00 : f32, torch.onnx.scale = 1.000000e+00 : f32, torch.onnx.shape = [10 : si64]} : () -> !torch.vtensor<[10],f32> %0 = torch.operator "onnx.RandomNormal"() {torch.onnx.dtype = 1 : si64, torch.onnx.mean = 0.000000e+00 : f32, torch.onnx.scale = 1.000000e+00 : f32, torch.onnx.shape = [10 : si64]} : () -> !torch.vtensor<[10],f32>
return %0 : !torch.vtensor<[10],f32> return %0 : !torch.vtensor<[10],f32>
} }
@ -1857,7 +1857,7 @@ func.func @test_random_normal_like(%arg0: !torch.vtensor<[10],f32>) -> !torch.vt
// CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00 // CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00
// CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00 // CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00
// CHECK: torch.aten.normal_functional %[[CAST]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32> // CHECK: torch.aten.normal_functional %[[CAST]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32>
%0 = torch.operator "onnx.RandomNormalLike"(%arg0) {torch.onnx.dtype = 1 : si64, torch.onnx.mean = 0.000000e+00 : f32, torch.onnx.scale = 1.000000e+00 : f32} : (!torch.vtensor<[10],f32>) -> !torch.vtensor<[10],f32> %0 = torch.operator "onnx.RandomNormalLike"(%arg0) {torch.onnx.dtype = 1 : si64, torch.onnx.mean = 0.000000e+00 : f32, torch.onnx.scale = 1.000000e+00 : f32} : (!torch.vtensor<[10],f32>) -> !torch.vtensor<[10],f32>
return %0 : !torch.vtensor<[10],f32> return %0 : !torch.vtensor<[10],f32>
} }
@ -1873,7 +1873,7 @@ func.func @test_random_uniform() -> !torch.vtensor<[10],f32> attributes {torch.o
// CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00 // CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00
// CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00 // CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00
// CHECK: torch.aten.uniform %[[EMPTY_TENSOR]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32> // CHECK: torch.aten.uniform %[[EMPTY_TENSOR]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32>
%0 = torch.operator "onnx.RandomUniform"() {torch.onnx.dtype = 1 : si64, torch.onnx.high = 1.000000e+00 : f32, torch.onnx.low = 0.000000e+00 : f32, torch.onnx.shape = [10 : si64]} : () -> !torch.vtensor<[10],f32> %0 = torch.operator "onnx.RandomUniform"() {torch.onnx.dtype = 1 : si64, torch.onnx.high = 1.000000e+00 : f32, torch.onnx.low = 0.000000e+00 : f32, torch.onnx.shape = [10 : si64]} : () -> !torch.vtensor<[10],f32>
return %0 : !torch.vtensor<[10],f32> return %0 : !torch.vtensor<[10],f32>
} }
@ -1888,6 +1888,6 @@ func.func @test_random_uniform_like(%arg0: !torch.vtensor<[10],f32>) -> !torch.v
// CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00 // CHECK-DAG: %[[F0:.+]] = torch.constant.float 0.000000e+00
// CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00 // CHECK-DAG: %[[F1:.+]] = torch.constant.float 1.000000e+00
// CHECK: torch.aten.uniform %[[CAST]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32> // CHECK: torch.aten.uniform %[[CAST]], %[[F0]], %[[F1]], %[[NONE]] : !torch.vtensor<[10],f32>, !torch.float, !torch.float, !torch.none -> !torch.vtensor<[10],f32>
%0 = torch.operator "onnx.RandomUniformLike"(%arg0) {torch.onnx.dtype = 1 : si64, torch.onnx.high = 1.000000e+00 : f32, torch.onnx.low = 0.000000e+00 : f32} : (!torch.vtensor<[10],f32>) -> !torch.vtensor<[10],f32> %0 = torch.operator "onnx.RandomUniformLike"(%arg0) {torch.onnx.dtype = 1 : si64, torch.onnx.high = 1.000000e+00 : f32, torch.onnx.low = 0.000000e+00 : f32} : (!torch.vtensor<[10],f32>) -> !torch.vtensor<[10],f32>
return %0 : !torch.vtensor<[10],f32> return %0 : !torch.vtensor<[10],f32>
} }

View File

@ -45,4 +45,4 @@ func.func @cumsum_operation(%arg0: !torch.vtensor<[2,3],f64>,
-> !torch.vtensor<[2,3],f64> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 11 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} { -> !torch.vtensor<[2,3],f64> attributes {torch.onnx_meta.ir_version = 9 : si64, torch.onnx_meta.opset_version = 11 : si64, torch.onnx_meta.producer_name = "backend-test", torch.onnx_meta.producer_version = ""} {
%212 = torch.operator "onnx.CumSum"(%arg0, %arg1) : (!torch.vtensor<[2,3],f64>, !torch.vtensor<[],si32>) -> !torch.vtensor<[2,3],f64> %212 = torch.operator "onnx.CumSum"(%arg0, %arg1) : (!torch.vtensor<[2,3],f64>, !torch.vtensor<[],si32>) -> !torch.vtensor<[2,3],f64>
return %212 : !torch.vtensor<[2,3],f64> return %212 : !torch.vtensor<[2,3],f64>
} }

View File

@ -82,5 +82,3 @@ func.func @torch.aten.flatten.using_ints$rank0(%arg0: !torch.vtensor<[],f32>) ->
%0 = torch.aten.flatten.using_ints %arg0, %int0, %int0 : !torch.vtensor<[],f32>, !torch.int, !torch.int -> !torch.vtensor<[1],f32> %0 = torch.aten.flatten.using_ints %arg0, %int0, %int0 : !torch.vtensor<[],f32>, !torch.int, !torch.int -> !torch.vtensor<[1],f32>
return %0 : !torch.vtensor<[1],f32> return %0 : !torch.vtensor<[1],f32>
} }

View File

@ -86,4 +86,3 @@ func.func @grid_sampler3(%arg0: !torch.vtensor<[?,?,?,?],f32>, %arg1: !torch.vte
%4 = torch.aten.grid_sampler %arg0, %arg1, %int0, %int1, %false : !torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[?,?,?,?],f32>, !torch.int, !torch.int, !torch.bool -> !torch.vtensor<[?,?,?,?],f32> %4 = torch.aten.grid_sampler %arg0, %arg1, %int0, %int1, %false : !torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[?,?,?,?],f32>, !torch.int, !torch.int, !torch.bool -> !torch.vtensor<[?,?,?,?],f32>
return %4 : !torch.vtensor<[?,?,?,?],f32> return %4 : !torch.vtensor<[?,?,?,?],f32>
} }

View File

@ -254,4 +254,3 @@ func.func @torch.aten.view$dynamicInferredSame(%arg0: !torch.vtensor<[10,?,2,3],
%1 = torch.aten.view %arg0, %0 : !torch.vtensor<[10,?,2,3],f32>, !torch.list<int> -> !torch.vtensor<[2,5,?,6],f32> %1 = torch.aten.view %arg0, %0 : !torch.vtensor<[10,?,2,3],f32>, !torch.list<int> -> !torch.vtensor<[2,5,?,6],f32>
return %1 : !torch.vtensor<[2,5,?,6],f32> return %1 : !torch.vtensor<[2,5,?,6],f32>
} }

View File

@ -636,4 +636,4 @@ func.func @torch.aten.div.Tensor_mode$floor(%arg0: !torch.vtensor<[?,?,?,?],f32>
func.func @torch.aten.abs(%arg0: !torch.vtensor<[15,15],si64>) -> !torch.vtensor<[15,15],si64>{ func.func @torch.aten.abs(%arg0: !torch.vtensor<[15,15],si64>) -> !torch.vtensor<[15,15],si64>{
%0 = torch.aten.abs %arg0 : !torch.vtensor<[15,15],si64> -> !torch.vtensor<[15,15],si64> %0 = torch.aten.abs %arg0 : !torch.vtensor<[15,15],si64> -> !torch.vtensor<[15,15],si64>
return %0 : !torch.vtensor<[15,15],si64> return %0 : !torch.vtensor<[15,15],si64>
} }

View File

@ -63,4 +63,3 @@ func.func @torch.aten.embedding$rank_two_indices(%weight: !torch.vtensor<[?,?],f
%ret = torch.aten.embedding %weight, %indices, %int-1, %false, %false : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,1], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[?,1,?],f32> %ret = torch.aten.embedding %weight, %indices, %int-1, %false, %false : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,1], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[?,1,?],f32>
return %ret: !torch.vtensor<[?,1,?],f32> return %ret: !torch.vtensor<[?,1,?],f32>
} }

View File

@ -32,4 +32,4 @@ func.func @forward(%arg0: !torch.vtensor<[?,?],si64>, %arg1: !torch.vtensor<[?,?
%int0 = torch.constant.int 0 %int0 = torch.constant.int 0
%0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64> %0 = torch.aten.scatter.src %arg0, %int0, %arg1, %arg2 : !torch.vtensor<[?,?],si64>, !torch.int, !torch.vtensor<[?,?],si64>, !torch.vtensor<[?,?],si64> -> !torch.vtensor<[?,?],si64>
return %0 : !torch.vtensor<[?,?],si64> return %0 : !torch.vtensor<[?,?],si64>
} }

View File

@ -565,4 +565,3 @@ func.func @torch.aten.unsqueeze$from_end(%arg0: !torch.vtensor<[?,?,?,?],f32>) -
%0 = torch.aten.unsqueeze %arg0, %int-2 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.vtensor<[?,?,?,1,?],f32> %0 = torch.aten.unsqueeze %arg0, %int-2 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.vtensor<[?,?,?,1,?],f32>
return %0 : !torch.vtensor<[?,?,?,1,?],f32> return %0 : !torch.vtensor<[?,?,?,1,?],f32>
} }

View File

@ -1,6 +1,6 @@
// RUN: torch-mlir-opt <%s -convert-torch-to-tosa -split-input-file // RUN: torch-mlir-opt <%s -convert-torch-to-tosa -split-input-file
// CHECK: %{{.*}} = tosa.cast %{{.*}} : (tensor<1x32x220x220xf32>) -> tensor<1x32x220x220xf16> // CHECK: %{{.*}} = tosa.cast %{{.*}} : (tensor<1x32x220x220xf32>) -> tensor<1x32x220x220xf16>
func.func @forward(%arg0: !torch.vtensor<[1,32,220,220],f32>) -> !torch.vtensor<[1,32,220,220],f16> { func.func @forward(%arg0: !torch.vtensor<[1,32,220,220],f32>) -> !torch.vtensor<[1,32,220,220],f16> {
%int5 = torch.constant.int 5 %int5 = torch.constant.int 5
%false = torch.constant.bool false %false = torch.constant.bool false
@ -8,5 +8,3 @@ func.func @forward(%arg0: !torch.vtensor<[1,32,220,220],f32>) -> !torch.vtensor<
%out = torch.aten.to.dtype %arg0, %int5, %false, %false, %none : !torch.vtensor<[1,32,220,220],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,220,220],f16> %out = torch.aten.to.dtype %arg0, %int5, %false, %false, %none : !torch.vtensor<[1,32,220,220],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,220,220],f16>
return %out : !torch.vtensor<[1,32,220,220],f16> return %out : !torch.vtensor<[1,32,220,220],f16>
} }

View File

@ -15,4 +15,3 @@ func.func @forward(%input: !torch.vtensor<[1,64,1,100],f32>) -> !torch.vtensor<[
%output = torch.aten.convolution %input, %weight, %bias, %stride, %int1x1, %int1x1, %true, %int1x1, %int1 : !torch.vtensor<[1,64,1,100],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,2,200],f32> %output = torch.aten.convolution %input, %weight, %bias, %stride, %int1x1, %int1x1, %true, %int1x1, %int1 : !torch.vtensor<[1,64,1,100],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,2,200],f32>
return %output : !torch.vtensor<[1,64,2,200],f32> return %output : !torch.vtensor<[1,64,2,200],f32>
} }

View File

@ -1524,7 +1524,7 @@ func.func @torch.aten.tensor.float() -> !torch.vtensor<[],f32> {
// CHECK-NEXT: torch.vtensor.literal(dense<45> : tensor<si32>) : !torch.vtensor<[],si32> // CHECK-NEXT: torch.vtensor.literal(dense<45> : tensor<si32>) : !torch.vtensor<[],si32>
func.func @torch.aten.tensor.int() -> !torch.vtensor<[],si32> { func.func @torch.aten.tensor.int() -> !torch.vtensor<[],si32> {
%none = torch.constant.none %none = torch.constant.none
%false = torch.constant.bool false %false = torch.constant.bool false
%int45 = torch.constant.int 45 %int45 = torch.constant.int 45
%67 = torch.aten.tensor.int %int45, %none, %none, %false : !torch.int, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[],si32> %67 = torch.aten.tensor.int %int45, %none, %none, %false : !torch.int, !torch.none, !torch.none, !torch.bool -> !torch.vtensor<[],si32>
return %67 : !torch.vtensor<[],si32> return %67 : !torch.vtensor<[],si32>
@ -2091,7 +2091,7 @@ func.func @torch.aten.broadcast_to$fold(%arg0: !torch.vtensor<[3,4,2],f32>) -> !
// ----- // -----
// CHECK-LABEL: func.func @torch.aten.broadcast_to$fold_splat // CHECK-LABEL: func.func @torch.aten.broadcast_to$fold_splat
// CHECK: %[[CST:.+]] = torch.vtensor.literal(dense<3.000000e+00> : tensor<3x4x2xf32>) : !torch.vtensor<[3,4,2],f32> // CHECK: %[[CST:.+]] = torch.vtensor.literal(dense<3.000000e+00> : tensor<3x4x2xf32>) : !torch.vtensor<[3,4,2],f32>
// CHECK: return %[[CST]] // CHECK: return %[[CST]]
func.func @torch.aten.broadcast_to$fold_splat() -> !torch.vtensor<[3,4,2],f32> { func.func @torch.aten.broadcast_to$fold_splat() -> !torch.vtensor<[3,4,2],f32> {
%tensor = torch.vtensor.literal(dense<3.0> : tensor<1x4x1xf32>) : !torch.vtensor<[1,4,1],f32> %tensor = torch.vtensor.literal(dense<3.0> : tensor<1x4x1xf32>) : !torch.vtensor<[1,4,1],f32>

View File

@ -186,4 +186,3 @@ func.func @torch.permute$negative_index_valid (%arg0: !torch.vtensor<[1,2,3],f32
%3 = torch.aten.permute %arg0, %perm : !torch.vtensor<[1,2,3],f32>, !torch.list<int> -> !torch.vtensor<[1,2,3],f32> %3 = torch.aten.permute %arg0, %perm : !torch.vtensor<[1,2,3],f32>, !torch.list<int> -> !torch.vtensor<[1,2,3],f32>
return %3 : !torch.vtensor<[1,2,3],f32> return %3 : !torch.vtensor<[1,2,3],f32>
} }

View File

@ -4,6 +4,6 @@ try:
import torch import torch
if torch.__version__ >= "2.3.0": if torch.__version__ >= "2.3.0":
print("Enabling Torch v2.3+ tests") print("Enabling Torch v2.3+ tests")
config.unsupported = False config.unsupported = False
except ModuleNotFoundError: except ModuleNotFoundError:
... ...

View File

@ -2,6 +2,6 @@
# See https://llvm.org/LICENSE.txt for license information. # See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
# Skip the following directories when overlaying # Skip the following directories when overlaying
utils/bazel utils/bazel
externals externals