mirror of https://github.com/llvm/torch-mlir
Update to LLVM 3157f03a349cfc852cdd994675eaa9652caa2e3a (#2060)
New requirement to explicitly cast for interfaces https://reviews.llvm.org/D148493pull/2063/head snapshot-20230426.820
parent
6b19c5e582
commit
6a833e1922
|
@ -87,7 +87,7 @@ static TMTensorOp createTMTensorOpOnBuffers(ConversionPatternRewriter &rewriter,
|
|||
ValueRange outputs) {
|
||||
SmallVector<Value, 8> newOperands = inputs;
|
||||
newOperands.append(outputs.begin(), outputs.end());
|
||||
return tmtensorOp.clone(rewriter, tmtensorOp->getLoc(), {}, newOperands);
|
||||
return cast<TMTensorOp>(tmtensorOp.clone(rewriter, tmtensorOp->getLoc(), {}, newOperands));
|
||||
}
|
||||
|
||||
/// Generic conversion pattern that matches any TMTensorOp. This avoids template
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit 3abae1c88416858cf2e9a7ed9417bc52033933b4
|
||||
Subproject commit 3157f03a349cfc852cdd994675eaa9652caa2e3a
|
|
@ -1 +1 @@
|
|||
Subproject commit 70ebc73a28767b772e09e0dd03cf3e47457435a2
|
||||
Subproject commit a9ac343738945be7744e304b72704128e48aa06f
|
|
@ -80,7 +80,7 @@ static LogicalResult createPoolingOp(
|
|||
highPaddingIncludingNC[2] += strideInts[0];
|
||||
highPaddingIncludingNC[3] += strideInts[1];
|
||||
}
|
||||
Value initValue = rewriter.create<arith::ConstantOp>(loc, initValueAttr);
|
||||
Value initValue = rewriter.create<arith::ConstantOp>(loc, cast<TypedAttr>(initValueAttr));
|
||||
paddedInput = torch_to_linalg::getPaddedTensor(
|
||||
op, rewriter, self, lowPaddingIncludingNC, highPaddingIncludingNC,
|
||||
initValue);
|
||||
|
@ -154,7 +154,7 @@ public:
|
|||
return rewriter.notifyMatchFailure(op, "invalid pooling parameters");
|
||||
|
||||
Type elementType = self.getType().cast<RankedTensorType>().getElementType();
|
||||
auto smallestFPValueAttr = rewriter.getFloatAttr(
|
||||
TypedAttr smallestFPValueAttr = rewriter.getFloatAttr(
|
||||
elementType,
|
||||
APFloat::getLargest(
|
||||
elementType.cast<mlir::FloatType>().getFloatSemantics(),
|
||||
|
|
|
@ -277,7 +277,7 @@ static Value createLinalgPayloadForReduceOp(OpBuilder &b, Location loc,
|
|||
Value result = payloadArgs[1];
|
||||
Value self = convertScalarToDtype(b, loc, elem, resultElementType);
|
||||
auto abs = b.create<math::AbsFOp>(loc, self);
|
||||
Attribute twoAttr = b.getFloatAttr(resultElementType, 2.0);
|
||||
TypedAttr twoAttr = b.getFloatAttr(resultElementType, 2.0);
|
||||
auto ord = b.create<arith::ConstantOp>(loc, twoAttr);
|
||||
auto pow = b.create<math::PowFOp>(loc, abs, ord);
|
||||
return b.create<arith::AddFOp>(loc, pow, result);
|
||||
|
@ -403,7 +403,7 @@ private:
|
|||
return rewriter.notifyMatchFailure(op, "unimplemented: ord = +/- inf");
|
||||
|
||||
// Raise each summed value to the inverse of the order of the norm.
|
||||
Attribute oneAttr = rewriter.getFloatAttr(elemType, 1.0);
|
||||
TypedAttr oneAttr = rewriter.getFloatAttr(elemType, 1.0);
|
||||
auto oneValue = rewriter.create<arith::ConstantOp>(loc, oneAttr);
|
||||
auto inverseOrdValue =
|
||||
rewriter.create<arith::DivFOp>(loc, oneValue, ordValue);
|
||||
|
|
|
@ -56,7 +56,7 @@ using namespace mlir::torch::TMTensor;
|
|||
// that these patterns become mostly mechanical associations of
|
||||
// "aten.foo -> linalg.foo".
|
||||
|
||||
static Attribute getNumericLimit(PatternRewriter &rewriter, Type elementType,
|
||||
static TypedAttr getNumericLimit(PatternRewriter &rewriter, Type elementType,
|
||||
bool getMin = true) {
|
||||
auto bitWidth = elementType.getIntOrFloatBitWidth();
|
||||
if (llvm::isa<mlir::IntegerType>(elementType)) {
|
||||
|
@ -1184,7 +1184,7 @@ public:
|
|||
if (reduceEnum == torch_upstream::ReductionType::MEAN) {
|
||||
SmallVector<Value> selfShape =
|
||||
getTensorSizes(rewriter, loc, adaptor.getSelf());
|
||||
Attribute initAttr;
|
||||
TypedAttr initAttr;
|
||||
if (llvm::isa<mlir::FloatType>(srcType.getElementType())) {
|
||||
initAttr = rewriter.getFloatAttr(srcType.getElementType(), 1);
|
||||
} else if (llvm::isa<mlir::IntegerType>(srcType.getElementType())) {
|
||||
|
@ -1220,13 +1220,13 @@ public:
|
|||
} else if (reduceEnum == torch_upstream::ReductionType::MAX) {
|
||||
// Set the values in the input tensor to the smallest element of that
|
||||
// type
|
||||
auto minAttr = getNumericLimit(rewriter, srcType.getElementType(),
|
||||
TypedAttr minAttr = getNumericLimit(rewriter, srcType.getElementType(),
|
||||
/*getMin=*/true);
|
||||
normalizationValue = rewriter.create<arith::ConstantOp>(loc, minAttr);
|
||||
} else if (reduceEnum == torch_upstream::ReductionType::MIN) {
|
||||
// Set the values in the input tensor to the largest element of that
|
||||
// type
|
||||
auto maxAttr = getNumericLimit(rewriter, srcType.getElementType(),
|
||||
TypedAttr maxAttr = getNumericLimit(rewriter, srcType.getElementType(),
|
||||
/*getMin=*/false);
|
||||
normalizationValue = rewriter.create<arith::ConstantOp>(loc, maxAttr);
|
||||
}
|
||||
|
|
|
@ -197,7 +197,7 @@ Value getTensorSize(OpBuilder &b, Location loc, Value tensor) {
|
|||
|
||||
// Creates a constant of type `elemType` with value `val`.
|
||||
Value getConstant(OpBuilder &b, Location loc, int64_t val, Type elemType) {
|
||||
Attribute attr = {};
|
||||
TypedAttr attr = {};
|
||||
if (elemType.isa<mlir::FloatType>())
|
||||
attr = b.getFloatAttr(elemType, val);
|
||||
if (elemType.isa<mlir::IndexType>())
|
||||
|
|
|
@ -73,5 +73,5 @@ Operation *TorchConversionDialect::materializeConstant(OpBuilder &builder,
|
|||
value.cast<IntegerAttr>());
|
||||
}
|
||||
|
||||
return builder.create<arith::ConstantOp>(loc, value, type);
|
||||
return arith::ConstantOp::materialize(builder, value, type, loc);
|
||||
}
|
||||
|
|
|
@ -77,7 +77,7 @@ class VerifyLinalgOnTensorsBackendContractPass
|
|||
// Tensor operations should go through linalg and the tensor dialect.
|
||||
target.addDynamicallyLegalDialect<linalg::LinalgDialect>(opHasLegalTypes);
|
||||
target.addDynamicallyLegalDialect<tensor::TensorDialect>(opHasLegalTypes);
|
||||
target.addDynamicallyLegalDialect<AffineDialect>(opHasLegalTypes);
|
||||
target.addDynamicallyLegalDialect<affine::AffineDialect>(opHasLegalTypes);
|
||||
target.addDynamicallyLegalDialect<cf::ControlFlowDialect>(opHasLegalTypes);
|
||||
target.addDynamicallyLegalDialect<TMTensorDialect>(opHasLegalTypes);
|
||||
target.addDynamicallyLegalDialect<scf::SCFDialect>(opHasLegalTypes);
|
||||
|
|
Loading…
Reference in New Issue