build: update llvm tag to bebc9695 (#1415)

Summary of changes:
 - Renamed OptionalArrayRefParameter since the name conflicts with an
   upstream symbol that has a different meaning
   (https://reviews.llvm.org/D133819)
 - Removed extraneous dependency between TorchMLIRTorchToMhlo and
   ChloOps, since the existing dependency on MhloDialect is sufficient
 - Fixed code to prevent warnings related to comparisons between signed
   and unsigned values
pull/1417/head
Ashay Rane 2022-09-26 11:44:54 -05:00 committed by GitHub
parent 3e27aa2be3
commit a60acf272d
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
5 changed files with 9 additions and 10 deletions

@ -1 +1 @@
Subproject commit 458598ccc50c5118107f05d60f3d043772a91f26
Subproject commit bebc96956b76bdbc36f1d82a788c810e5b12e2c5

2
externals/mlir-hlo vendored

@ -1 +1 @@
Subproject commit cd9da150e729fd046109e7962e5f63f5fe067a3b
Subproject commit 7b0ecf7827e3fc07d2af90e147bcedc165bc78ac

View File

@ -45,7 +45,7 @@ def Torch_NnModuleType : Torch_Type<"NnModule", "nn.Module"> {
}
// For standard ArrayRefs, which require allocation.
class OptionalArrayRefParameter<string arrayOf, string desc = ""> :
class OptionalArrayRefTorchParameter<string arrayOf, string desc = ""> :
AttrOrTypeParameter<
"::llvm::Optional<::llvm::ArrayRef<" # arrayOf # ">>", desc> {
let allocator = [{
@ -146,7 +146,7 @@ class AnyTorchTensorType<string name, string typeMnemonic>
- `getElementType()` -> `getDtype()` (but be sure that `hasDtype()` though).
}];
let parameters = (ins
OptionalArrayRefParameter<"int64_t", "sizes of dimensions">:$optionalSizes,
OptionalArrayRefTorchParameter<"int64_t", "sizes of dimensions">:$optionalSizes,
"::mlir::Type":$optionalDtype
);
let genVerifyDecl = 1;

View File

@ -22,7 +22,6 @@ add_mlir_conversion_library(TorchMLIRTorchToMhlo
Core
LINK_LIBS PUBLIC
ChloOps
MLIRIR
MLIRPass
MhloDialect

View File

@ -102,13 +102,13 @@ RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op,
}
SmallVector<int64_t> outShape;
// set batch dims, will skip invalid dimensions
for (size_t k = 0; k < lhsShape.size(); ++k) {
for (int64_t k = 0; k < static_cast<int64_t>(lhsShape.size()); ++k) {
if (k == lhsResultDim || k == lhsContractingDim)
continue;
outShape.push_back(lhsShape[k]);
}
for (size_t k = 0, b = 0; k < rhsShape.size(); ++k) {
if (b >= outShape.size())
for (int64_t k = 0, b = 0; k < static_cast<int64_t>(rhsShape.size()); ++k) {
if (b >= static_cast<int64_t>(outShape.size()))
break;
if (k == rhsResultDim || k == rhsContractingDim)
continue;
@ -119,10 +119,10 @@ RankedTensorType castContractingDim(PatternRewriter &rewriter, Operation *op,
}
// set result dimensions
if (lhsResultDim < lhsShape.size() && lhsResultDim >= 0) {
if (lhsResultDim < static_cast<int64_t>(lhsShape.size()) && lhsResultDim >= 0) {
outShape.push_back(lhsShape[lhsResultDim]);
}
if (rhsResultDim < rhsShape.size() && rhsResultDim >= 0) {
if (rhsResultDim < static_cast<int64_t>(rhsShape.size()) && rhsResultDim >= 0) {
outShape.push_back(rhsShape[rhsResultDim]);
}
return RankedTensorType::get(outShape, lhsTy.getElementType());