mirror of https://github.com/llvm/torch-mlir
Bump revision and fix issues.
* llvm revision = 4836188ad9b3334b0c1e055d45ccaa54ed797e4b * iree revision = 091482e8fdf599d6cb5c701d5b3ccb27fc66c014pull/1/head
parent
529873d13c
commit
fc5f10c5c5
|
@ -27,7 +27,7 @@ export LDFLAGS=-fuse-ld=$(which ld.lld-$LLVM_VERSION)
|
|||
export LLVM_SRC_DIR=/path/to/llvm-project
|
||||
|
||||
# Check out last known good commit.
|
||||
(cd $LLVM_SRC_DIR && git checkout 52cae05e087b3d4fd02849fc37c387c720055ffb)
|
||||
(cd $LLVM_SRC_DIR && git checkout 4836188ad9b3334b0c1e055d45ccaa54ed797e4b)
|
||||
|
||||
./tools/install_mlir.sh
|
||||
./tools/cmake_configure.sh
|
||||
|
|
|
@ -26,7 +26,7 @@ public:
|
|||
LogicalResult
|
||||
matchAndRewrite(shape::BroadcastOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
shape::BroadcastOp::OperandAdaptor adaptor(operands);
|
||||
shape::BroadcastOp::Adaptor adaptor(operands);
|
||||
auto lhs = adaptor.lhs().getDefiningOp<shape::FromExtentsOp>();
|
||||
auto rhs = adaptor.rhs().getDefiningOp<shape::FromExtentsOp>();
|
||||
if (!lhs || !rhs)
|
||||
|
@ -88,7 +88,7 @@ public:
|
|||
LogicalResult
|
||||
matchAndRewrite(tcp::GetExtentOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
tcp::GetExtentOp::OperandAdaptor adaptor(operands);
|
||||
tcp::GetExtentOp::Adaptor adaptor(operands);
|
||||
auto fromExtents = adaptor.shape().getDefiningOp<shape::FromExtentsOp>();
|
||||
if (!fromExtents)
|
||||
return rewriter.notifyMatchFailure(op, "not a from_extents op");
|
||||
|
|
|
@ -28,7 +28,7 @@ public:
|
|||
LogicalResult
|
||||
matchAndRewrite(tcp::AbortIfOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
tcp::AbortIfOp::OperandAdaptor adaptor(operands);
|
||||
tcp::AbortIfOp::Adaptor adaptor(operands);
|
||||
rewriter.replaceOpWithNewOp<LLVM::CallOp>(op, abortIfFunc, adaptor.pred());
|
||||
return success();
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ class LowerToLLVM : public LowerToLLVMBase<LowerToLLVM> {
|
|||
populateStdToLLVMConversionPatterns(converter, patterns);
|
||||
patterns.insert<LowerAbortIf>(abortIfFunc);
|
||||
|
||||
if (failed(applyFullConversion(module, target, patterns, &converter))) {
|
||||
if (failed(applyFullConversion(module, target, patterns))) {
|
||||
return signalPassFailure();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,7 +28,7 @@ public:
|
|||
LogicalResult
|
||||
matchAndRewrite(TensorStoreOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
TensorStoreOp::OperandAdaptor adaptor(operands);
|
||||
TensorStoreOp::Adaptor adaptor(operands);
|
||||
// The tensor has been converted to an unranked memref. We need to cast
|
||||
// it to the original memref type and copy it to the destination.
|
||||
//
|
||||
|
@ -52,7 +52,7 @@ public:
|
|||
LogicalResult
|
||||
matchAndRewrite(TensorLoadOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
TensorLoadOp::OperandAdaptor adaptor(operands);
|
||||
TensorLoadOp::Adaptor adaptor(operands);
|
||||
auto type = UnrankedMemRefType::get(op.getType().getElementType(), 0);
|
||||
// TODO: This won't work. The LLVM unranked memref calling convention
|
||||
// doesn't allow returning an unranked memref becuase it lowers it to
|
||||
|
@ -93,7 +93,7 @@ public:
|
|||
LogicalResult
|
||||
matchAndRewrite(shape::ShapeOfOp op, ArrayRef<Value> operands,
|
||||
ConversionPatternRewriter &rewriter) const override {
|
||||
shape::ShapeOfOp::OperandAdaptor adaptor(operands);
|
||||
shape::ShapeOfOp::Adaptor adaptor(operands);
|
||||
auto tensorType = op.arg().getType().cast<RankedTensorType>();
|
||||
auto rankedMemRefType =
|
||||
MemRefType::get(tensorType.getShape(), tensorType.getElementType());
|
||||
|
|
Loading…
Reference in New Issue