mirror of https://github.com/llvm/torch-mlir
[NFC reformat] Run pre-commit on all files and format misc.
This is part 1 of ~3, formatting all miscellaneous text files and CPP files matched by a first run of pre-commit. These tend to be low change-traffic and are likely not disruptive. Subsequent patches will format Python files and remaining CPP files.pull/3244/head
parent
6679728c56
commit
5d4b803914
|
@ -140,4 +140,3 @@ torch-mlir's representation:
|
||||||
|
|
||||||
* `ConstantOfShape`: Mapped to `torch.vtensor.literal` with
|
* `ConstantOfShape`: Mapped to `torch.vtensor.literal` with
|
||||||
a corresponding `value` attribute.
|
a corresponding `value` attribute.
|
||||||
|
|
||||||
|
|
|
@ -277,4 +277,3 @@ directly provided a way to plug into this.
|
||||||
|
|
||||||
Additionally, we can leverage the [`pytorch-jit-paritybench`](https://github.com/jansel/pytorch-jit-paritybench)
|
Additionally, we can leverage the [`pytorch-jit-paritybench`](https://github.com/jansel/pytorch-jit-paritybench)
|
||||||
to verify our end-to-end correctness on real models.
|
to verify our end-to-end correctness on real models.
|
||||||
|
|
||||||
|
|
|
@ -628,8 +628,8 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
|
||||||
binder.op, resultType, operand);
|
binder.op, resultType, operand);
|
||||||
return success();
|
return success();
|
||||||
});
|
});
|
||||||
patterns.onOp("Not", 1,
|
patterns.onOp(
|
||||||
[](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
"Not", 1, [](OpBinder binder, ConversionPatternRewriter &rewriter) {
|
||||||
Torch::ValueTensorType resultType;
|
Torch::ValueTensorType resultType;
|
||||||
Value operand;
|
Value operand;
|
||||||
if (binder.tensorOperand(operand) ||
|
if (binder.tensorOperand(operand) ||
|
||||||
|
@ -638,8 +638,7 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
|
||||||
}
|
}
|
||||||
|
|
||||||
auto loc = binder.getLoc();
|
auto loc = binder.getLoc();
|
||||||
auto operandTy =
|
auto operandTy = cast<Torch::ValueTensorType>(operand.getType());
|
||||||
cast<Torch::ValueTensorType>(operand.getType());
|
|
||||||
auto eTy = operandTy.getDtype();
|
auto eTy = operandTy.getDtype();
|
||||||
|
|
||||||
if (!eTy.isInteger(1)) {
|
if (!eTy.isInteger(1)) {
|
||||||
|
@ -649,12 +648,10 @@ void mlir::torch::onnx_c::populateDefaultDomainGtoP(
|
||||||
auto torchqTy = Torch::getScalarTypeForType(i1ty);
|
auto torchqTy = Torch::getScalarTypeForType(i1ty);
|
||||||
Value tyConst = rewriter.create<Torch::ConstantIntOp>(
|
Value tyConst = rewriter.create<Torch::ConstantIntOp>(
|
||||||
binder.getLoc(), rewriter.getType<Torch::IntType>(),
|
binder.getLoc(), rewriter.getType<Torch::IntType>(),
|
||||||
rewriter.getIntegerAttr(
|
rewriter.getIntegerAttr(rewriter.getIntegerType(64),
|
||||||
rewriter.getIntegerType(64),
|
|
||||||
static_cast<int64_t>(torchqTy)));
|
static_cast<int64_t>(torchqTy)));
|
||||||
Value none = rewriter.create<Torch::ConstantNoneOp>(loc);
|
Value none = rewriter.create<Torch::ConstantNoneOp>(loc);
|
||||||
Value cstFalse =
|
Value cstFalse = rewriter.create<Torch::ConstantBoolOp>(loc, false);
|
||||||
rewriter.create<Torch::ConstantBoolOp>(loc, false);
|
|
||||||
operand = rewriter.create<Torch::AtenToDtypeOp>(
|
operand = rewriter.create<Torch::AtenToDtypeOp>(
|
||||||
loc, ty, operand, tyConst,
|
loc, ty, operand, tyConst,
|
||||||
/*non_blocking=*/cstFalse, /*copy=*/cstFalse,
|
/*non_blocking=*/cstFalse, /*copy=*/cstFalse,
|
||||||
|
|
|
@ -189,9 +189,8 @@ Value promoteAndBroadcast(ConversionPatternRewriter &rewriter, Value input,
|
||||||
do_bcast = true;
|
do_bcast = true;
|
||||||
} else {
|
} else {
|
||||||
op->emitError("The size of tensor a (")
|
op->emitError("The size of tensor a (")
|
||||||
<< inDim << ")"
|
<< inDim << ")" << "must match the size of tensor b (" << outDim
|
||||||
<< "must match the size of tensor b (" << outDim << ")"
|
<< ")" << "at non-singleton dimension " << inPos;
|
||||||
<< "at non-singleton dimension " << inPos;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
std::reverse(bcastDims.begin(), bcastDims.end());
|
std::reverse(bcastDims.begin(), bcastDims.end());
|
||||||
|
|
|
@ -305,8 +305,7 @@ public:
|
||||||
return signalPassFailure();
|
return signalPassFailure();
|
||||||
} while (!satisfiesBackendContract(module, target));
|
} while (!satisfiesBackendContract(module, target));
|
||||||
LLVM_DEBUG({
|
LLVM_DEBUG({
|
||||||
llvm::dbgs() << "LowerToBackendContractPass: "
|
llvm::dbgs() << "LowerToBackendContractPass: " << "succeeded after " << i
|
||||||
<< "succeeded after " << i
|
|
||||||
<< " iterations of the simplification pipeline\n";
|
<< " iterations of the simplification pipeline\n";
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
|
@ -28,4 +28,3 @@ set_target_properties(torch_mlir_custom_op_example PROPERTIES
|
||||||
)
|
)
|
||||||
torch_mlir_python_target_compile_options(torch_mlir_custom_op_example)
|
torch_mlir_python_target_compile_options(torch_mlir_custom_op_example)
|
||||||
mlir_check_all_link_libraries(torch_mlir_custom_op_example)
|
mlir_check_all_link_libraries(torch_mlir_custom_op_example)
|
||||||
|
|
||||||
|
|
|
@ -157,22 +157,26 @@ static void testTypeMetaDataAccessors(MlirContext ctx) {
|
||||||
MlirType dictType1 = torchMlirTorchDictTypeGet(strType, floatType);
|
MlirType dictType1 = torchMlirTorchDictTypeGet(strType, floatType);
|
||||||
|
|
||||||
fprintf(stderr, "dict keyType: ");
|
fprintf(stderr, "dict keyType: ");
|
||||||
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType1), printToStderr, NULL);
|
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType1), printToStderr,
|
||||||
|
NULL);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
// CHECK: dict keyType: !torch.str
|
// CHECK: dict keyType: !torch.str
|
||||||
fprintf(stderr, "dict valueType: ");
|
fprintf(stderr, "dict valueType: ");
|
||||||
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType1), printToStderr, NULL);
|
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType1), printToStderr,
|
||||||
|
NULL);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
// CHECK: dict valueType: !torch.float
|
// CHECK: dict valueType: !torch.float
|
||||||
|
|
||||||
MlirType dictType2 = torchMlirTorchDictTypeGet(floatType, strType);
|
MlirType dictType2 = torchMlirTorchDictTypeGet(floatType, strType);
|
||||||
|
|
||||||
fprintf(stderr, "dict keyType: ");
|
fprintf(stderr, "dict keyType: ");
|
||||||
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType2), printToStderr, NULL);
|
mlirTypePrint(torchMlirTorchDictTypeGetKeyType(dictType2), printToStderr,
|
||||||
|
NULL);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
// CHECK: dict keyType: !torch.float
|
// CHECK: dict keyType: !torch.float
|
||||||
fprintf(stderr, "dict valueType: ");
|
fprintf(stderr, "dict valueType: ");
|
||||||
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType2), printToStderr, NULL);
|
mlirTypePrint(torchMlirTorchDictTypeGetValueType(dictType2), printToStderr,
|
||||||
|
NULL);
|
||||||
fprintf(stderr, "\n");
|
fprintf(stderr, "\n");
|
||||||
// CHECK: dict valueType: !torch.str
|
// CHECK: dict valueType: !torch.str
|
||||||
}
|
}
|
||||||
|
|
|
@ -82,5 +82,3 @@ func.func @torch.aten.flatten.using_ints$rank0(%arg0: !torch.vtensor<[],f32>) ->
|
||||||
%0 = torch.aten.flatten.using_ints %arg0, %int0, %int0 : !torch.vtensor<[],f32>, !torch.int, !torch.int -> !torch.vtensor<[1],f32>
|
%0 = torch.aten.flatten.using_ints %arg0, %int0, %int0 : !torch.vtensor<[],f32>, !torch.int, !torch.int -> !torch.vtensor<[1],f32>
|
||||||
return %0 : !torch.vtensor<[1],f32>
|
return %0 : !torch.vtensor<[1],f32>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -86,4 +86,3 @@ func.func @grid_sampler3(%arg0: !torch.vtensor<[?,?,?,?],f32>, %arg1: !torch.vte
|
||||||
%4 = torch.aten.grid_sampler %arg0, %arg1, %int0, %int1, %false : !torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[?,?,?,?],f32>, !torch.int, !torch.int, !torch.bool -> !torch.vtensor<[?,?,?,?],f32>
|
%4 = torch.aten.grid_sampler %arg0, %arg1, %int0, %int1, %false : !torch.vtensor<[?,?,?,?],f32>, !torch.vtensor<[?,?,?,?],f32>, !torch.int, !torch.int, !torch.bool -> !torch.vtensor<[?,?,?,?],f32>
|
||||||
return %4 : !torch.vtensor<[?,?,?,?],f32>
|
return %4 : !torch.vtensor<[?,?,?,?],f32>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -254,4 +254,3 @@ func.func @torch.aten.view$dynamicInferredSame(%arg0: !torch.vtensor<[10,?,2,3],
|
||||||
%1 = torch.aten.view %arg0, %0 : !torch.vtensor<[10,?,2,3],f32>, !torch.list<int> -> !torch.vtensor<[2,5,?,6],f32>
|
%1 = torch.aten.view %arg0, %0 : !torch.vtensor<[10,?,2,3],f32>, !torch.list<int> -> !torch.vtensor<[2,5,?,6],f32>
|
||||||
return %1 : !torch.vtensor<[2,5,?,6],f32>
|
return %1 : !torch.vtensor<[2,5,?,6],f32>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -63,4 +63,3 @@ func.func @torch.aten.embedding$rank_two_indices(%weight: !torch.vtensor<[?,?],f
|
||||||
%ret = torch.aten.embedding %weight, %indices, %int-1, %false, %false : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,1], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[?,1,?],f32>
|
%ret = torch.aten.embedding %weight, %indices, %int-1, %false, %false : !torch.vtensor<[?,?],f32>, !torch.vtensor<[?,1], si64>, !torch.int, !torch.bool, !torch.bool -> !torch.vtensor<[?,1,?],f32>
|
||||||
return %ret: !torch.vtensor<[?,1,?],f32>
|
return %ret: !torch.vtensor<[?,1,?],f32>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -565,4 +565,3 @@ func.func @torch.aten.unsqueeze$from_end(%arg0: !torch.vtensor<[?,?,?,?],f32>) -
|
||||||
%0 = torch.aten.unsqueeze %arg0, %int-2 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.vtensor<[?,?,?,1,?],f32>
|
%0 = torch.aten.unsqueeze %arg0, %int-2 : !torch.vtensor<[?,?,?,?],f32>, !torch.int -> !torch.vtensor<[?,?,?,1,?],f32>
|
||||||
return %0 : !torch.vtensor<[?,?,?,1,?],f32>
|
return %0 : !torch.vtensor<[?,?,?,1,?],f32>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -8,5 +8,3 @@ func.func @forward(%arg0: !torch.vtensor<[1,32,220,220],f32>) -> !torch.vtensor<
|
||||||
%out = torch.aten.to.dtype %arg0, %int5, %false, %false, %none : !torch.vtensor<[1,32,220,220],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,220,220],f16>
|
%out = torch.aten.to.dtype %arg0, %int5, %false, %false, %none : !torch.vtensor<[1,32,220,220],f32>, !torch.int, !torch.bool, !torch.bool, !torch.none -> !torch.vtensor<[1,32,220,220],f16>
|
||||||
return %out : !torch.vtensor<[1,32,220,220],f16>
|
return %out : !torch.vtensor<[1,32,220,220],f16>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -15,4 +15,3 @@ func.func @forward(%input: !torch.vtensor<[1,64,1,100],f32>) -> !torch.vtensor<[
|
||||||
%output = torch.aten.convolution %input, %weight, %bias, %stride, %int1x1, %int1x1, %true, %int1x1, %int1 : !torch.vtensor<[1,64,1,100],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,2,200],f32>
|
%output = torch.aten.convolution %input, %weight, %bias, %stride, %int1x1, %int1x1, %true, %int1x1, %int1 : !torch.vtensor<[1,64,1,100],f32>, !torch.vtensor<[64,64,3,3],f32>, !torch.vtensor<[64],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int -> !torch.vtensor<[1,64,2,200],f32>
|
||||||
return %output : !torch.vtensor<[1,64,2,200],f32>
|
return %output : !torch.vtensor<[1,64,2,200],f32>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -186,4 +186,3 @@ func.func @torch.permute$negative_index_valid (%arg0: !torch.vtensor<[1,2,3],f32
|
||||||
%3 = torch.aten.permute %arg0, %perm : !torch.vtensor<[1,2,3],f32>, !torch.list<int> -> !torch.vtensor<[1,2,3],f32>
|
%3 = torch.aten.permute %arg0, %perm : !torch.vtensor<[1,2,3],f32>, !torch.list<int> -> !torch.vtensor<[1,2,3],f32>
|
||||||
return %3 : !torch.vtensor<[1,2,3],f32>
|
return %3 : !torch.vtensor<[1,2,3],f32>
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue