Bump llvm and stablehlo (#3377)

* bump llvm to 1e5f29af81a5f6fda308074f6345b9fba4faa71c
* bump stablehlo to c44d9af8d4879adccf1054cb61a53377ae5898cb
pull/3378/head
Yuanqiang Liu 2024-05-22 23:28:45 +08:00 committed by GitHub
parent 4d7cdba4bf
commit f4bfe3f948
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
6 changed files with 64 additions and 16 deletions

@ -1 +1 @@
Subproject commit 70e227a404e51f9248c7ad5d79953805b2afacb4 Subproject commit 1e5f29af81a5f6fda308074f6345b9fba4faa71c

2
externals/stablehlo vendored

@ -1 +1 @@
Subproject commit ab92adeda9119a6c3914cd42367b0a2b70765e91 Subproject commit c44d9af8d4879adccf1054cb61a53377ae5898cb

View File

@ -101,6 +101,8 @@ Value gatherTensorAlongSingleAxis(PatternRewriter &rewriter, Operation *op,
rewriter.getContext(), rewriter.getContext(),
/*offsetDims=*/offsetDims, /*offsetDims=*/offsetDims,
/*collapsedSliceDims=*/collapsedSliceDims, /*collapsedSliceDims=*/collapsedSliceDims,
/*operandBatchingDims=*/{},
/*startIndicesBatchingDims=*/{},
/*startIndexMap=*/startIndexMap, /*startIndexMap=*/startIndexMap,
/*indexVecDim=*/indexVecDim); /*indexVecDim=*/indexVecDim);
@ -584,6 +586,8 @@ LogicalResult ConvertAtenOp<AtenGatherOp>::matchAndRewrite(
rewriter.getContext(), rewriter.getContext(),
/*offsetDims=*/{}, /*offsetDims=*/{},
/*collapsedSliceDims=*/collapsedDims, /*collapsedSliceDims=*/collapsedDims,
/*operandBatchingDims=*/{},
/*startIndicesBatchingDims=*/{},
/*startIndexMap=*/startIndexMap, /*startIndexMap=*/startIndexMap,
/*indexVecDim=*/indexVecDim); /*indexVecDim=*/indexVecDim);
@ -744,6 +748,8 @@ public:
rewriter.getContext(), rewriter.getContext(),
/*updateWindowDims=*/{}, /*updateWindowDims=*/{},
/*insertedWindowDims=*/insertedWindowDims, /*insertedWindowDims=*/insertedWindowDims,
/*inputBatchingDims=*/{},
/*scatterIndicesBatchingDims=*/{},
/*scatterDimsToOperandDim=*/scatterDimOperandDimMap, /*scatterDimsToOperandDim=*/scatterDimOperandDimMap,
/*indexVectorDim=*/indexVecDim); /*indexVectorDim=*/indexVecDim);
@ -826,6 +832,8 @@ LogicalResult ConvertAtenOp<AtenIndexTensorHackedTwinOp>::matchAndRewrite(
rewriter.getContext(), rewriter.getContext(),
/*offsetDims=*/offsetDims, /*offsetDims=*/offsetDims,
/*collapsedSliceDims=*/collapsedDims, /*collapsedSliceDims=*/collapsedDims,
/*operandBatchingDims=*/{},
/*startIndicesBatchingDims=*/{},
/*startIndexMap=*/startIndexMap, /*startIndexMap=*/startIndexMap,
/*indexVecDim=*/indexVecDim); /*indexVecDim=*/indexVecDim);
@ -900,6 +908,8 @@ LogicalResult ConvertAtenOp<AtenIndexPutHackedTwinOp>::matchAndRewrite(
rewriter.getContext(), rewriter.getContext(),
/*updateWindowDims=*/updateWindowDims, /*updateWindowDims=*/updateWindowDims,
/*insertedWindowDims=*/insertedWindowDims, /*insertedWindowDims=*/insertedWindowDims,
/*inputBatchingDims=*/{},
/*scatterIndicesBatchingDims=*/{},
/*scatterDimsToOperandDim=*/scatterDimOperandDimMap, /*scatterDimsToOperandDim=*/scatterDimOperandDimMap,
/*indexVectorDim=*/indexVecDim); /*indexVectorDim=*/indexVecDim);

View File

@ -172,6 +172,7 @@ public:
if (!rankType) if (!rankType)
return op.emitError("Only ranked tensor types are currently supported"); return op.emitError("Only ranked tensor types are currently supported");
// collect Value of dims
SmallVector<Value, 4> dimSizes; SmallVector<Value, 4> dimSizes;
if (!getAtenViewOpSizes(op, adaptor, rewriter, dimSizes)) { if (!getAtenViewOpSizes(op, adaptor, rewriter, dimSizes)) {
return op.emitError("Dims size must be a list of Scalar"); return op.emitError("Dims size must be a list of Scalar");
@ -187,6 +188,20 @@ public:
return success(); return success();
} }
// collect constant dim size which == -1
SmallVector<size_t> negOneIndex;
for (size_t i = 0; i < dimSizes.size(); i++) {
int64_t dim;
if (matchPattern(dimSizes[i], m_TorchConstantInt(&dim))) {
if (dim == -1) {
negOneIndex.push_back(i);
}
}
}
if (negOneIndex.size() > 1) {
return op.emitError("Only support at most one -1 in view target dims");
}
std::for_each(dimSizes.begin(), dimSizes.end(), [&](Value &dSize) { std::for_each(dimSizes.begin(), dimSizes.end(), [&](Value &dSize) {
dSize = rewriter.create<ToI64Op>(loc, dSize).getResult(); dSize = rewriter.create<ToI64Op>(loc, dSize).getResult();
return dSize; return dSize;
@ -194,16 +209,29 @@ public:
Value numel = rewriter.create<shape::NumElementsOp>( Value numel = rewriter.create<shape::NumElementsOp>(
loc, rewriter.create<shape::ShapeOfOp>(loc, adaptor.getSelf())); loc, rewriter.create<shape::ShapeOfOp>(loc, adaptor.getSelf()));
numel =
rewriter.create<arith::IndexCastOp>(loc, rewriter.getI64Type(), numel);
// note: assuming that -1 doesn't arise from dynamic value
if (negOneIndex.size() == 1) {
size_t index = negOneIndex[0];
Value realDim = numel;
for (size_t i = 0; i < dimSizes.size(); i++) {
if (i != index) {
realDim = rewriter.create<arith::DivUIOp>(loc, realDim, dimSizes[i]);
}
}
// update -1 to realDim
dimSizes[index] = realDim;
}
Value stablehloShape = Value stablehloShape =
rewriter.create<tensor::FromElementsOp>(loc, dimSizes); rewriter.create<tensor::FromElementsOp>(loc, dimSizes);
Value computedShape = rewriter.create<stablehlo::ComputeReshapeShapeOp>(
loc, stablehloShape.getType(), numel, stablehloShape);
rewriter.replaceOpWithNewOp<stablehlo::DynamicReshapeOp>( rewriter.replaceOpWithNewOp<stablehlo::DynamicReshapeOp>(
op, op,
OpConversionPattern<AtenOpT>::getTypeConverter()->convertType( OpConversionPattern<AtenOpT>::getTypeConverter()->convertType(
op.getType()), op.getType()),
adaptor.getSelf(), computedShape); adaptor.getSelf(), stablehloShape);
return success(); return success();
} }

View File

@ -1449,6 +1449,13 @@ STABLEHLO_CRASHING_SET = {
# Write the TOSA set as a "passing" set as it is very early in development # Write the TOSA set as a "passing" set as it is very early in development
# and very few tests work yet. # and very few tests work yet.
TOSA_PASS_SET = { TOSA_PASS_SET = {
"ElementwiseAddScalar_NumToTensorFloat_Module_basic",
"ElementwiseDivTensorFloatModule_basic",
"ElementwiseMulTensorFloatModule_basic",
"ElementwiseWhereScalarSelfStaticModule_basic",
"GroupNormModule_basic",
"GroupNormNoWeightAndBiasModule_basic",
"NativeGroupNormModule_basic",
"AtenDotModule_basic", "AtenDotModule_basic",
"ElementwiseFloatTensorGtIntScalarModule_basic", "ElementwiseFloatTensorGtIntScalarModule_basic",
"ElementwiseLogSigmoidModule_basic", "ElementwiseLogSigmoidModule_basic",
@ -1946,7 +1953,6 @@ MAKE_FX_TOSA_PASS_SET = (
"Conv2dNoPaddingModule_basic", "Conv2dNoPaddingModule_basic",
"Conv2dWithPaddingDilationStrideModule_basic", "Conv2dWithPaddingDilationStrideModule_basic",
"Conv2dWithPaddingModule_basic", "Conv2dWithPaddingModule_basic",
"AtenInstanceNormModule_basic",
# failed to legalize operation 'torch.operator' # failed to legalize operation 'torch.operator'
"ElementwisePreluModule_basic", "ElementwisePreluModule_basic",
"ElementwisePreluStaticModule_basic", "ElementwisePreluStaticModule_basic",

View File

@ -310,11 +310,12 @@ func.func @torch.aten.slice.none.static$slice_like(%arg0: !torch.vtensor<[4,65,2
// CHECK: %[[T3:.*]] = torch_c.to_i64 %[[INT224]] // CHECK: %[[T3:.*]] = torch_c.to_i64 %[[INT224]]
// CHECK: %[[T4:.*]] = shape.shape_of %[[T0]] : tensor<?x?x?x?xf32> -> tensor<4xindex> // CHECK: %[[T4:.*]] = shape.shape_of %[[T0]] : tensor<?x?x?x?xf32> -> tensor<4xindex>
// CHECK: %[[T5:.*]] = shape.num_elements %[[T4]] : tensor<4xindex> -> index // CHECK: %[[T5:.*]] = shape.num_elements %[[T4]] : tensor<4xindex> -> index
// CHECK: %[[FROM_ELEMENTS:.*]] = tensor.from_elements %[[T2]], %[[T3]] : tensor<2xi64> // CHECK: %[[T6:.*]] = arith.index_cast %[[T5]] : index to i64
// CHECK: %[[T6:.*]] = stablehlo.compute_reshape_shape %[[T5]], %[[FROM_ELEMENTS]] : (index, tensor<2xi64>) -> tensor<2xi64> // CHECK: %[[T7:.*]] = arith.divui %[[T6]], %[[T3]] : i64
// CHECK: %[[T7:.*]] = stablehlo.dynamic_reshape %[[T0]], %[[T6]] : (tensor<?x?x?x?xf32>, tensor<2xi64>) -> tensor<?x224xf32> // CHECK: %[[FROM_ELEMENTS:.*]] = tensor.from_elements %[[T7]], %[[T3]] : tensor<2xi64>
// CHECK: %[[T8:.*]] = torch_c.from_builtin_tensor %[[T7]] : tensor<?x224xf32> -> !torch.vtensor<[?,224],f32> // CHECK: %[[T8:.*]] = stablehlo.dynamic_reshape %[[T0]], %[[FROM_ELEMENTS]] : (tensor<?x?x?x?xf32>, tensor<2xi64>) -> tensor<?x224xf32>
// CHECK: return %[[T8]] : !torch.vtensor<[?,224],f32> // CHECK: %[[T9:.*]] = torch_c.from_builtin_tensor %[[T8]] : tensor<?x224xf32> -> !torch.vtensor<[?,224],f32>
// CHECK: return %[[T9]] : !torch.vtensor<[?,224],f32>
func.func @torch.aten.view$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,224],f32> { func.func @torch.aten.view$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch.vtensor<[?,224],f32> {
%int-1 = torch.constant.int -1 %int-1 = torch.constant.int -1
%int224 = torch.constant.int 224 %int224 = torch.constant.int 224
@ -339,11 +340,14 @@ func.func @torch.aten.view$basic(%arg0: !torch.vtensor<[?,?,?,?],f32>) -> !torch
// CHECK: %[[T5:.*]] = torch_c.to_i64 %[[INT64]] // CHECK: %[[T5:.*]] = torch_c.to_i64 %[[INT64]]
// CHECK: %[[T6:.*]] = shape.shape_of %[[T0]] : tensor<?x?x?x?x?xf32> -> tensor<5xindex> // CHECK: %[[T6:.*]] = shape.shape_of %[[T0]] : tensor<?x?x?x?x?xf32> -> tensor<5xindex>
// CHECK: %[[T7:.*]] = shape.num_elements %[[T6]] : tensor<5xindex> -> index // CHECK: %[[T7:.*]] = shape.num_elements %[[T6]] : tensor<5xindex> -> index
// CHECK: %[[FROM_ELEMENTS:.*]] = tensor.from_elements %[[T2]], %[[T3]], %[[T4]], %[[T5]] : tensor<4xi64> // CHECK: %[[T8:.*]] = arith.index_cast %[[T7]] : index to i64
// CHECK: %[[T8:.*]] = stablehlo.compute_reshape_shape %[[T7]], %[[FROM_ELEMENTS]] : (index, tensor<4xi64>) -> tensor<4xi64> // CHECK: %[[T9:.*]] = arith.divui %[[T8]], %[[T3]] : i64
// CHECK: %[[T9:.*]] = stablehlo.dynamic_reshape %[[T0]], %[[T8]] : (tensor<?x?x?x?x?xf32>, tensor<4xi64>) -> tensor<?x120x4x64xf32> // CHECK: %[[T10:.*]] = arith.divui %[[T9]], %[[T4]] : i64
// CHECK: %[[T10:.*]] = torch_c.from_builtin_tensor %[[T9]] : tensor<?x120x4x64xf32> -> !torch.vtensor<[?,120,4,64],f32> // CHECK: %[[T11:.*]] = arith.divui %[[T10]], %[[T5]] : i64
// CHECK: return %[[T10]] : !torch.vtensor<[?,120,4,64],f32> // CHECK: %[[FROM_ELEMENTS:.*]] = tensor.from_elements %[[T11]], %[[T3]], %[[T4]], %[[T5]] : tensor<4xi64>
// CHECK: %[[T12:.*]] = stablehlo.dynamic_reshape %[[T0]], %[[FROM_ELEMENTS]] : (tensor<?x?x?x?x?xf32>, tensor<4xi64>) -> tensor<?x120x4x64xf32>
// CHECK: %[[T13:.*]] = torch_c.from_builtin_tensor %[[T12]] : tensor<?x120x4x64xf32> -> !torch.vtensor<[?,120,4,64],f32>
// CHECK: return %[[T13]] : !torch.vtensor<[?,120,4,64],f32>
func.func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?,?],f32>) -> !torch.vtensor<[?,120,4,64],f32> { func.func @torch.aten.reshape$basic(%arg0: !torch.vtensor<[?,?,?,?,?],f32>) -> !torch.vtensor<[?,120,4,64],f32> {
%int-1 = torch.constant.int -1 %int-1 = torch.constant.int -1
%int120 = torch.constant.int 120 %int120 = torch.constant.int 120