[build] Update llvm tag to a3f2751f (#2397)

This commit updates the `llvm-project` and `mlir-hlo` submodules to
commits:

llvm-project: a3f2751f782f3cdc6ba4790488ec20163a40ac37
mlir-hlo: 97c7e4b4506c3a2441c923e592833f45da439009

Changes made:

- Rename `getSuccessorEntryOperands` with `getEntrySuccessorOperands`
and remove `operands` from
`getSuccessorRegions` (https://reviews.llvm.org/D157506)
- Make `TypeConverter` a `const` (https://reviews.llvm.org/D157601)
pull/2294/merge snapshot-20230816.932
Ramiro Leal-Cavazos 2023-08-15 16:53:28 +00:00 committed by GitHub
parent 94f7593c9b
commit 41bafe13cc
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
15 changed files with 43 additions and 48 deletions

@ -1 +1 @@
Subproject commit f580901d5d30e37755212f1c09e5b587587fbfeb
Subproject commit a3f2751f782f3cdc6ba4790488ec20163a40ac37

2
externals/mlir-hlo vendored

@ -1 +1 @@
Subproject commit 503736d156c25022813c51cbdbe3b862d67a6916
Subproject commit 97c7e4b4506c3a2441c923e592833f45da439009

View File

@ -76,7 +76,7 @@ SmallVector<Value> getAsConstantIndexValues(OpBuilder &b, Location loc,
// convert their elements to valid target type.
// TODO: remove this when list gets full support.
SmallVector<Value> getTypeConvertedValues(OpBuilder &b, Location loc,
TypeConverter *converter,
const TypeConverter *converter,
SmallVectorImpl<Value> &vs);
mlir::RankedTensorType GetTypeFromTensorShape(llvm::ArrayRef<int64_t> shape,

View File

@ -507,7 +507,7 @@ def Torch_PrimCallMethodOp : Torch_Op<"prim.CallMethod", []> {
}
def Torch_PrimLoopOp : Torch_Op<"prim.Loop", [
DeclareOpInterfaceMethods<RegionBranchOpInterface, ["getSuccessorEntryOperands"]>]> {
DeclareOpInterfaceMethods<RegionBranchOpInterface, ["getEntrySuccessorOperands"]>]> {
let summary = "TorchScript prim::Loop op";
let description = [{
This op (together with prim.Loop.condition) define a looping construct

View File

@ -328,7 +328,7 @@ public:
SmallVector<int64_t> inputShape =
makeShapeTorchCompatible(inputType.getShape());
int64_t inputRank = inputType.getRank();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto resultType =
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
int64_t resultRank = resultType.getRank();
@ -695,7 +695,7 @@ public:
Value input = adaptor.getSelf();
auto inputType = input.getType().cast<RankedTensorType>();
int64_t inputRank = inputType.getRank();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto resultType =
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
int64_t resultRank = resultType.getRank();
@ -804,7 +804,7 @@ public:
op, "unimplemented: dim(th) dimension is not expected to be dynamic");
}
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto resultType =
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
int64_t resultRank = resultType.getRank();
@ -1046,7 +1046,7 @@ public:
return failure();
Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto input = adaptor.getSelf();
RankedTensorType resultType =
@ -1081,7 +1081,7 @@ public:
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();
Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
// Collect all the tensors to be concatenated.
auto tensorList = op.getTensors();
@ -1312,7 +1312,7 @@ public:
return failure();
Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto input = adaptor.getSelf();
@ -1361,7 +1361,7 @@ public:
return failure();
Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
MLIRContext *context = rewriter.getContext();
auto input = adaptor.getSelf();

View File

@ -32,7 +32,7 @@ using namespace mlir::torch::Torch;
template <typename OpTy>
static LogicalResult
checkAndGetPoolingParameters(OpTy op, ConversionPatternRewriter &rewriter,
TypeConverter *typeConverter, bool &ceilMode,
const TypeConverter *typeConverter, bool &ceilMode,
SmallVectorImpl<Value> &kernelSizeIntValues,
SmallVectorImpl<int64_t> &strideInts,
SmallVectorImpl<int64_t> &paddingInts) {
@ -72,7 +72,6 @@ checkAndGetPoolingParameters(OpTy op, ConversionPatternRewriter &rewriter,
return success();
}
// Creates a pooling operation based on the type specified by `OpTy` and
// arguments passed.
template <typename OpTy>
@ -153,7 +152,7 @@ public:
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
Value self = adaptor.getSelf();
int64_t selfRank = self.getType().cast<RankedTensorType>().getRank();
// TODO: Add support for 3D inputs.
@ -225,7 +224,7 @@ public:
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();
Location loc = op->getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
Value self = adaptor.getSelf();
RankedTensorType selfType = self.getType().cast<RankedTensorType>();
Type elementType = selfType.getElementType();
@ -386,7 +385,7 @@ public:
return failure();
Location loc = op->getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
Value self = adaptor.getSelf();
Type inputElementType =

View File

@ -106,7 +106,7 @@ public:
}
Location loc = op.getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
SmallVector<Value> resultSizeTorchInt, resultSize, resultSizeIndex;
if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) {
return rewriter.notifyMatchFailure(
@ -211,7 +211,7 @@ public:
}
Location loc = op.getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
SmallVector<Value> resultSizeTorchInt, resultSize, resultSizeIndex;
if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) {
return rewriter.notifyMatchFailure(
@ -282,7 +282,7 @@ public:
}
Location loc = op.getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
RankedTensorType resultType =
typeConverter->convertType(op->getResult(0).getType())
.cast<RankedTensorType>();

View File

@ -127,8 +127,10 @@ static Value buildUnitNormalCdf(OpBuilder &b, Location &loc, Value x) {
}
template <typename MathOpTy>
static Value createCalculationForMathOpWithDtypeConversion(
OpBuilder &b, TypeConverter *converter, Value payloadArg, Operation *op) {
static Value
createCalculationForMathOpWithDtypeConversion(OpBuilder &b,
const TypeConverter *converter,
Value payloadArg, Operation *op) {
Type dtype = converter->convertType(op->getResult(0).getType())
.template cast<RankedTensorType>()
.getElementType();
@ -207,7 +209,7 @@ createTriangularMatrix(OpBuilder &b, Location loc, ValueRange payloadArgs,
}
static Value createLinalgPayloadCalculationForElementwiseOp(
OpBuilder &b, Location loc, TypeConverter *converter,
OpBuilder &b, Location loc, const TypeConverter *converter,
ValueRange payloadArgs, Operation *op, ArrayRef<Value> operands) {
if (isa<AtenFloorOp>(op))
return b.create<math::FloorOp>(loc, payloadArgs[0]);

View File

@ -77,7 +77,7 @@ public:
if (op.isForLike())
return failure();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
SmallVector<Type, 1> newResultTypes;
if (failed(
typeConverter->convertTypes(op.getResultTypes(), newResultTypes)))
@ -217,7 +217,7 @@ public:
if (!op.isForLike())
return failure();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
SmallVector<Type, 1> newResultTypes;
if (failed(
typeConverter->convertTypes(op.getResultTypes(), newResultTypes)))

View File

@ -1555,7 +1555,7 @@ LogicalResult ConvertAtenOp<AtenEmptyMemoryFormatOp>::matchAndRewrite(
}
Location loc = op.getLoc();
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
SmallVector<Value> resultSizeTorchInt, resultSize, resultSizeIndex;
if (!getListConstructElements(op.getSize(), resultSizeTorchInt)) {
return rewriter.notifyMatchFailure(

View File

@ -342,7 +342,7 @@ LogicalResult ConvertAtenOp<AtenSliceScatterOp>::matchAndRewrite(
return failure();
Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
auto input = adaptor.getSelf();

View File

@ -309,7 +309,7 @@ public:
if (failed(verifyLinalgCompatibleTypes(op, rewriter)))
return failure();
Location loc = op.getLoc();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
Value self = adaptor.getSelf();
Value index = adaptor.getIndex();
Value src = adaptor.getSrc();
@ -361,7 +361,7 @@ public:
return failure();
Location loc = op.getLoc();
MLIRContext *context = op->getContext();
TypeConverter *typeConverter = getTypeConverter();
const TypeConverter *typeConverter = getTypeConverter();
Value input = adaptor.getSelf();
Value torchTypeInput = op.getSelf();
Value minlength = adaptor.getMinlength();

View File

@ -2121,7 +2121,7 @@ LogicalResult ConvertAtenOp<AtenBatchNormOp>::matchAndRewrite(
// reshaped so it sits on the same dim as 'C'.
auto reshapeToNormInputDim = [&](Operation *op,
ConversionPatternRewriter &rewriter,
TypeConverter *converter, Type outType,
const TypeConverter *converter, Type outType,
const Value toBcast, Value &result) {
RankedTensorType toBcastType =
toBcast.getType().dyn_cast<RankedTensorType>();
@ -3809,7 +3809,7 @@ LogicalResult ConvertAtenOp<AtenArangeStartStepOp>::matchAndRewrite(
AtenArangeStartStepOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
RankedTensorType resultType =
typeConverter->convertType(op->getResult(0).getType())
.cast<RankedTensorType>();
@ -3859,7 +3859,7 @@ LogicalResult ConvertAtenOp<PrimNumToTensorScalarOp>::matchAndRewrite(
PrimNumToTensorScalarOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
RankedTensorType resultType =
typeConverter->convertType(op->getResult(0).getType())
.cast<RankedTensorType>();
@ -4673,7 +4673,7 @@ template <>
LogicalResult ConvertAtenOp<AtenCatOp>::matchAndRewrite(
AtenCatOp op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
TypeConverter *typeConverter = this->getTypeConverter();
const TypeConverter *typeConverter = this->getTypeConverter();
auto outType =
typeConverter->convertType(op.getType()).cast<RankedTensorType>();
int64_t rank = outType.getRank();

View File

@ -230,7 +230,7 @@ SmallVector<Value> getAsConstantIndexValues(OpBuilder &b, Location loc,
// convert their elements to valid target type.
// TODO: remove this when list gets full support.
SmallVector<Value> getTypeConvertedValues(OpBuilder &b, Location loc,
TypeConverter *converter,
const TypeConverter *converter,
SmallVectorImpl<Value> &vs) {
return llvm::to_vector<4>(llvm::map_range(vs, [&](Value v) {
return converter->materializeTargetConversion(

View File

@ -302,15 +302,13 @@ LogicalResult ClassTypeOp::verify() {
//===----------------------------------------------------------------------===//
OperandRange
PrimLoopOp::getSuccessorEntryOperands(std::optional<unsigned int> index) {
PrimLoopOp::getEntrySuccessorOperands(std::optional<unsigned int> index) {
assert(index.has_value() && index.value() == 0);
return getIterArgsInit();
}
void PrimLoopOp::getSuccessorRegions(
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
(void)operands;
std::optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
if (!index.has_value()) {
regions.emplace_back(&getRegion(), getRegion().getArguments().slice(1));
@ -381,7 +379,6 @@ void PrimIfOp::print(OpAsmPrinter &p) {
}
void PrimIfOp::getSuccessorRegions(std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
// The `then` and the `else` region branch back to the parent operation.
if (index.has_value()) {
@ -390,9 +387,9 @@ void PrimIfOp::getSuccessorRegions(std::optional<unsigned> index,
}
// If the condition is constant, we can give a more precise answer.
if (auto condAttr = operands.front().dyn_cast_or_null<IntegerAttr>()) {
Region *executedRegion =
condAttr.getValue().isOne() ? &getThenRegion() : &getElseRegion();
bool condition;
if (matchPattern(getCondition(), m_TorchConstantBool(&condition))) {
Region *executedRegion = condition ? &getThenRegion() : &getElseRegion();
regions.push_back(RegionSuccessor(executedRegion));
return;
}
@ -2720,7 +2717,6 @@ OpFoldResult PrimMinIntOp::fold(FoldAdaptor adaptor) {
template <typename CalculateOp>
static void
getSuccessorRegionsForCalculateOp(CalculateOp op, std::optional<unsigned> index,
ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
if (!index.has_value()) {
// First thing the op does is branch into the calculation.
@ -2738,9 +2734,8 @@ getSuccessorRegionsForCalculateOp(CalculateOp op, std::optional<unsigned> index,
}
void ShapeCalculateOp::getSuccessorRegions(
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, operands, regions);
std::optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, regions);
}
//===----------------------------------------------------------------------===//
@ -2748,9 +2743,8 @@ void ShapeCalculateOp::getSuccessorRegions(
//===----------------------------------------------------------------------===//
void DtypeCalculateOp::getSuccessorRegions(
std::optional<unsigned> index, ArrayRef<Attribute> operands,
SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, operands, regions);
std::optional<unsigned> index, SmallVectorImpl<RegionSuccessor> &regions) {
getSuccessorRegionsForCalculateOp(*this, index, regions);
}
//===----------------------------------------------------------------------===//