* Bump LLVM

---------

Co-authored-by: Matthias Gehre <matthias.gehre@xilinx.com>
pull/2163/head
Maksim Levental 2023-06-13 09:17:23 -05:00 committed by GitHub
parent ddea56a832
commit 0caaf8d32a
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 69 additions and 35 deletions

@ -1 +1 @@
Subproject commit 26ee8947702d79ce2cab8e577f713685a5ca4a55
Subproject commit 2b4807ba044230ed6243f5c3a1329a9344de758d

2
externals/mlir-hlo vendored

@ -1 +1 @@
Subproject commit 4805d8498dfb81566076f56f52273b426c1cc5bf
Subproject commit ac26bdba7a5edfe6060ba5be528b9d20c987297d

View File

@ -116,6 +116,10 @@ void CreateReplaceOpAndInfer(PatternRewriter &rewriter, Operation *op,
rewriter.replaceOp(op, result->getResults());
}
// Get accumulator type for AvgPool2dOp.
LogicalResult getAvgPool2dAccType(PatternRewriter &rewriter, Value input,
TypeAttr &accType);
} // namespace tosa
} // namespace mlir

View File

@ -98,9 +98,6 @@ getMaxInDim(ConversionPatternRewriter &rewriter, Operation *op, Value &input,
initIndex = hlo::getConstTensor<int64_t>(rewriter, op, {0}, {}).value();
}
DenseIntElementsAttr dimensions = DenseIntElementsAttr::get(
RankedTensorType::get({}, rewriter.getI64Type()), dim);
auto inputShapeTensor = rewriter.create<mlir::tensor::FromElementsOp>(
op->getLoc(), inputShapeVec);
auto indexTensor = rewriter.create<stablehlo::DynamicIotaOp>(
@ -115,7 +112,7 @@ getMaxInDim(ConversionPatternRewriter &rewriter, Operation *op, Value &input,
initValue,
initIndex,
},
dimensions);
rewriter.getI64TensorAttr(dim));
Block &block = stablehloReduceOp.getBody().emplaceBlock();

View File

@ -3985,10 +3985,25 @@ public:
return rewriter.notifyMatchFailure(
op, "Failed to process inputs for pooling");
auto pooledOutput =
rewriter
.create<TosaOpT>(op->getLoc(), outputTy, input, kernel, stride, pad)
Value pooledOutput;
static_assert(std::is_same<TosaOpT, tosa::MaxPool2dOp>::value ||
std::is_same<TosaOpT, tosa::AvgPool2dOp>::value,
"Expected either tosa::MaxPool2dOp or tosa::AvgPool2dOp");
if constexpr (std::is_same<TosaOpT, tosa::MaxPool2dOp>::value) {
pooledOutput = rewriter
.create<TosaOpT>(op->getLoc(), outputTy, input, kernel,
stride, pad)
.getResult();
} else if constexpr (std::is_same<TosaOpT, tosa::AvgPool2dOp>::value) {
TypeAttr accType;
if (failed(tosa::getAvgPool2dAccType(rewriter, input, accType)))
return rewriter.notifyMatchFailure(
op, "Failed to get accumulator type for pooling");
pooledOutput = rewriter
.create<TosaOpT>(op->getLoc(), outputTy, input, kernel,
stride, pad, accType)
.getResult();
}
auto transposedOutput =
ConvertAtenPoolingBaseOp<AtenOpT, TosaOpT>::transposePoolingOutputToChw(

View File

@ -169,7 +169,6 @@ std::optional<Value> getZerosLikeTensor(PatternRewriter &rewriter,
.getResult();
}
// Templated function to create a constant op for given type and shape.
// T: storage C type.
// Default template creates a constant tensor in T.
@ -243,8 +242,7 @@ std::optional<Value> getConstTensor<float>(PatternRewriter &rewriter,
}
static LogicalResult checkValidityOfCast(Type src, Type dest) {
if ((src == dest) ||
(src.isInteger(64) && dest.isInteger(32)) ||
if ((src == dest) || (src.isInteger(64) && dest.isInteger(32)) ||
(src.isInteger(64) && dest.isInteger(8)) ||
(src.isInteger(64) && dest.isInteger(1)) ||
(src.isInteger(64) && dest.isF32()) ||
@ -256,18 +254,14 @@ static LogicalResult checkValidityOfCast(Type src, Type dest) {
(src.isInteger(8) && dest.isInteger(1)) ||
(src.isInteger(8) && dest.isBF16()) ||
(src.isInteger(1) && dest.isInteger(64)) ||
(src.isInteger(1) && dest.isF32()) ||
(src.isF32() && dest.isF64()) ||
(src.isF32() && dest.isBF16()) ||
(src.isF64() && dest.isF32()) ||
(src.isF64() && dest.isBF16()) ||
(src.isF32() && dest.isInteger(8)) ||
(src.isInteger(1) && dest.isF32()) || (src.isF32() && dest.isF64()) ||
(src.isF32() && dest.isBF16()) || (src.isF64() && dest.isF32()) ||
(src.isF64() && dest.isBF16()) || (src.isF32() && dest.isInteger(8)) ||
(src.isF32() && dest.isInteger(64)) ||
(src.isF32() && dest.isInteger(1)) ||
(src.isBF16() && dest.isInteger(8)) ||
(src.isBF16() && dest.isInteger(16)) ||
(src.isBF16() && dest.isInteger(32)) ||
(src.isBF16() && dest.isF32())) {
(src.isBF16() && dest.isInteger(32)) || (src.isBF16() && dest.isF32())) {
return success();
}
return failure();
@ -341,5 +335,27 @@ template std::optional<Value> getConstTensor<int64_t>(PatternRewriter &,
Operation *,
ArrayRef<int64_t> vec,
ArrayRef<int64_t> shape);
LogicalResult getAvgPool2dAccType(PatternRewriter &rewriter, Value input,
TypeAttr &accType) {
auto inputTy = llvm::dyn_cast<ShapedType>(input.getType());
if (!inputTy)
return failure();
auto inputETy = inputTy.getElementType();
if (auto quantType =
llvm::dyn_cast<mlir::quant::UniformQuantizedType>(inputETy))
inputETy = quantType.getStorageType();
// Tosa supports FP16 and FP32 accumulator type for FP16 input. When the time
// FP16 is supported, the accumulator type can be selected based on trade-off
// between performance and accuracy. Set to FP32 by default.
accType = inputETy.isa<FloatType>()
? mlir::TypeAttr::get(rewriter.getF32Type())
: mlir::TypeAttr::get(rewriter.getIntegerType(32));
return success();
}
} // namespace tosa
} // namespace mlir

View File

@ -12,6 +12,7 @@
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Builders.h"
#include "mlir/IR/BuiltinOps.h"
#include "mlir/Support/LogicalResult.h"
#include "mlir/Transforms/DialectConversion.h"
#include "mlir/Transforms/GreedyPatternRewriteDriver.h"
#include "torch-mlir/Dialect/Torch/IR/TorchDialect.h"
@ -239,13 +240,13 @@ static LogicalResult adjustCallingConventions(func::FuncOp func,
typeConverter.addConversion([](Type type) { return type; });
typeConverter.addConversion(
[](Torch::TupleType type,
SmallVectorImpl<Type> &types) -> Optional<LogicalResult> {
SmallVectorImpl<Type> &types) -> LogicalResult {
llvm::append_range(types, type.getContainedTypes());
return success();
});
typeConverter.addConversion(
[](Torch::NoneType type,
SmallVectorImpl<Type> &types) -> Optional<LogicalResult> {
SmallVectorImpl<Type> &types) -> LogicalResult {
return success();
});

View File

@ -29,14 +29,15 @@ setupValueTensorToBuiltinTensorConversion(ConversionTarget &target,
target.addLegalOp<TorchConversion::ToBuiltinTensorOp,
TorchConversion::FromBuiltinTensorOp>();
typeConverter.addConversion(
[](Torch::ValueTensorType type) -> Optional<Type> {
[](Torch::ValueTensorType type) -> std::optional<Type> {
return type.toBuiltinTensor();
});
typeConverter.addTargetMaterialization([](OpBuilder &builder, TensorType type,
ValueRange inputs,
Location loc) -> Value {
assert(inputs.size() == 1);
assert(inputs[0].getType().isa<Torch::BaseTensorType>());
if (!inputs[0].getType().isa<Torch::BaseTensorType>())
return {};
return builder.create<ToBuiltinTensorOp>(loc, inputs[0]);
});
auto sourceMaterialization = [](OpBuilder &builder,
@ -53,12 +54,12 @@ setupValueTensorToBuiltinTensorConversion(ConversionTarget &target,
static void setupTorchBoolToI1Conversion(ConversionTarget &target,
TypeConverter &typeConverter) {
target.addLegalOp<TorchConversion::ToI1Op, TorchConversion::FromI1Op>();
typeConverter.addConversion([](Torch::BoolType type) -> Optional<Type> {
typeConverter.addConversion([](Torch::BoolType type) -> std::optional<Type> {
return IntegerType::get(type.getContext(), 1);
});
typeConverter.addTargetMaterialization([](OpBuilder &builder,
IntegerType type, ValueRange inputs,
Location loc) -> Optional<Value> {
Location loc) -> std::optional<Value> {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 1 && type.isSignless()))
return std::nullopt;
@ -79,12 +80,12 @@ static void setupTorchBoolToI1Conversion(ConversionTarget &target,
static void setupTorchIntToI64Conversion(ConversionTarget &target,
TypeConverter &typeConverter) {
target.addLegalOp<TorchConversion::ToI64Op, TorchConversion::FromI64Op>();
typeConverter.addConversion([](Torch::IntType type) -> Optional<Type> {
typeConverter.addConversion([](Torch::IntType type) -> std::optional<Type> {
return IntegerType::get(type.getContext(), 64);
});
typeConverter.addTargetMaterialization([](OpBuilder &builder,
IntegerType type, ValueRange inputs,
Location loc) -> Optional<Value> {
Location loc) -> std::optional<Value> {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 64 && type.isSignless()))
return std::nullopt;
@ -108,12 +109,12 @@ static void setupTorchIntToI64Conversion(ConversionTarget &target,
static void setupTorchFloatToF64Conversion(ConversionTarget &target,
TypeConverter &typeConverter) {
target.addLegalOp<TorchConversion::ToF64Op, TorchConversion::FromF64Op>();
typeConverter.addConversion([](Torch::FloatType type) -> Optional<Type> {
typeConverter.addConversion([](Torch::FloatType type) -> std::optional<Type> {
return Float64Type::get(type.getContext());
});
typeConverter.addTargetMaterialization([](OpBuilder &builder,
Float64Type type, ValueRange inputs,
Location loc) -> Optional<Value> {
Location loc) -> std::optional<Value> {
assert(inputs.size() == 1);
assert(inputs[0].getType().isa<Torch::FloatType>());
return builder.create<ToF64Op>(loc, inputs[0]).getResult();
@ -132,12 +133,12 @@ static void setupTorchGeneratorToI64Conversion(ConversionTarget &target,
TypeConverter &typeConverter) {
target.addLegalOp<TorchConversion::GeneratorToI64Op,
TorchConversion::I64ToGeneratorOp>();
typeConverter.addConversion([](Torch::GeneratorType type) -> Optional<Type> {
typeConverter.addConversion([](Torch::GeneratorType type) -> std::optional<Type> {
return IntegerType::get(type.getContext(), 64);
});
typeConverter.addTargetMaterialization([](OpBuilder &builder,
IntegerType type, ValueRange inputs,
Location loc) -> Optional<Value> {
Location loc) -> std::optional<Value> {
// Other builtin integer types could be handled by other materializers.
if (!(type.getWidth() == 64 && type.isSignless()))
return std::nullopt;

View File

@ -437,6 +437,6 @@ def import_fx_graph_as_func(g: torch.fx.Graph, func_name: str) -> ir.Module:
# The reason is that the supported subset only involves stateless
# fx.Graph's, so the state held on the fx.GraphModule is not necessary.
_verify_fx_graph_conforms_to_subset(g)
with ir.Context() as context:
with ir.Context() as context, ir.Location.unknown(context=context):
torch_dialect.register_dialect(context)
return _FXGraphImporter(g, func_name).import_graph()

View File

@ -799,7 +799,7 @@ func.func @torch.aten.dropout$basic(%arg0: !torch.vtensor<[?,?],f32> ) -> !torch
// CHECK: %[[VAL_10:.*]] = torch.prim.ListConstruct %[[VAL_4]], %[[VAL_4]] : (!torch.int, !torch.int) -> !torch.list<int>
// CHECK: %[[VAL_11:.*]] = "tosa.const"() <{value = dense<[0, 2, 3, 1]> : tensor<4xi32>}> : () -> tensor<4xi32>
// CHECK: %[[VAL_12:.*]] = "tosa.transpose"(%[[VAL_1]], %[[VAL_11]]) : (tensor<1x512x7x7xf32>, tensor<4xi32>) -> tensor<1x7x7x512xf32>
// CHECK: %[[VAL_13:.*]] = "tosa.avg_pool2d"(%[[VAL_12]]) <{kernel = array<i64: 7, 7>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>}> : (tensor<1x7x7x512xf32>) -> tensor<1x1x1x512xf32>
// CHECK: %[[VAL_13:.*]] = "tosa.avg_pool2d"(%[[VAL_12]]) <{acc_type = f32, kernel = array<i64: 7, 7>, pad = array<i64: 0, 0, 0, 0>, stride = array<i64: 1, 1>}> : (tensor<1x7x7x512xf32>) -> tensor<1x1x1x512xf32>
// CHECK: %[[VAL_14:.*]] = "tosa.const"() <{value = dense<[0, 3, 1, 2]> : tensor<4xi32>}> : () -> tensor<4xi32>
// CHECK: %[[VAL_15:.*]] = "tosa.transpose"(%[[VAL_13]], %[[VAL_14]]) : (tensor<1x1x1x512xf32>, tensor<4xi32>) -> tensor<1x512x1x1xf32>
// CHECK: %[[VAL_16:.*]] = tensor.cast %[[VAL_15]] : tensor<1x512x1x1xf32> to tensor<1x512x1x1xf32>