mirror of https://github.com/llvm/torch-mlir
build: manually update PyTorch version (#3627)
Set PyTorch and TorchVision version to nightly release 2024-08-18. This commit also updates the `scaled_dot_product_attention` op. A new attribute `enable_gqa` has been added. As of now, only the default value for the same is supported. Signed-Off By: Vivek Khandelwal <vivekkhandelwal1424@gmail.com>pull/3646/head
parent
56a663690c
commit
0a86deb59a
|
@ -13734,7 +13734,7 @@ def Torch_AtenScaledDotProductAttentionOp : Torch_Op<"aten.scaled_dot_product_at
|
||||||
HasValueSemantics,
|
HasValueSemantics,
|
||||||
ReadOnly
|
ReadOnly
|
||||||
]> {
|
]> {
|
||||||
let summary = "Generated op for `aten::scaled_dot_product_attention : (Tensor, Tensor, Tensor, Tensor?, float, bool, float?) -> (Tensor)`";
|
let summary = "Generated op for `aten::scaled_dot_product_attention : (Tensor, Tensor, Tensor, Tensor?, float, bool, float?, bool) -> (Tensor)`";
|
||||||
let arguments = (ins
|
let arguments = (ins
|
||||||
AnyTorchTensorType:$query,
|
AnyTorchTensorType:$query,
|
||||||
AnyTorchTensorType:$key,
|
AnyTorchTensorType:$key,
|
||||||
|
@ -13742,7 +13742,8 @@ def Torch_AtenScaledDotProductAttentionOp : Torch_Op<"aten.scaled_dot_product_at
|
||||||
AnyTorchOptionalTensorType:$attn_mask,
|
AnyTorchOptionalTensorType:$attn_mask,
|
||||||
Torch_FloatType:$dropout_p,
|
Torch_FloatType:$dropout_p,
|
||||||
Torch_BoolType:$is_causal,
|
Torch_BoolType:$is_causal,
|
||||||
AnyTorchOptionalFloatType:$scale
|
AnyTorchOptionalFloatType:$scale,
|
||||||
|
Torch_BoolType:$enable_gqa
|
||||||
);
|
);
|
||||||
let results = (outs
|
let results = (outs
|
||||||
AnyTorchOptionalTensorType:$result
|
AnyTorchOptionalTensorType:$result
|
||||||
|
@ -13750,10 +13751,10 @@ def Torch_AtenScaledDotProductAttentionOp : Torch_Op<"aten.scaled_dot_product_at
|
||||||
let hasCustomAssemblyFormat = 1;
|
let hasCustomAssemblyFormat = 1;
|
||||||
let extraClassDefinition = [{
|
let extraClassDefinition = [{
|
||||||
ParseResult AtenScaledDotProductAttentionOp::parse(OpAsmParser &parser, OperationState &result) {
|
ParseResult AtenScaledDotProductAttentionOp::parse(OpAsmParser &parser, OperationState &result) {
|
||||||
return parseDefaultTorchOp(parser, result, 7, 1);
|
return parseDefaultTorchOp(parser, result, 8, 1);
|
||||||
}
|
}
|
||||||
void AtenScaledDotProductAttentionOp::print(OpAsmPrinter &printer) {
|
void AtenScaledDotProductAttentionOp::print(OpAsmPrinter &printer) {
|
||||||
printDefaultTorchOp(printer, *this, 7, 1);
|
printDefaultTorchOp(printer, *this, 8, 1);
|
||||||
}
|
}
|
||||||
}];
|
}];
|
||||||
}
|
}
|
||||||
|
|
|
@ -1582,6 +1582,7 @@ public:
|
||||||
Value dropoutP = op.getDropoutP();
|
Value dropoutP = op.getDropoutP();
|
||||||
Value isCausal = op.getIsCausal();
|
Value isCausal = op.getIsCausal();
|
||||||
Value scale = op.getScale();
|
Value scale = op.getScale();
|
||||||
|
Value enableGQA = op.getEnableGqa();
|
||||||
Type elementType =
|
Type elementType =
|
||||||
cast<ShapedType>(adaptor.getQuery().getType()).getElementType();
|
cast<ShapedType>(adaptor.getQuery().getType()).getElementType();
|
||||||
|
|
||||||
|
@ -1604,6 +1605,11 @@ public:
|
||||||
return rewriter.notifyMatchFailure(op.getLoc(),
|
return rewriter.notifyMatchFailure(op.getLoc(),
|
||||||
"only default scale supported");
|
"only default scale supported");
|
||||||
}
|
}
|
||||||
|
bool isGQAEnabled;
|
||||||
|
if (!matchPattern(enableGQA, m_TorchConstantBool(&isGQAEnabled)) ||
|
||||||
|
isGQAEnabled)
|
||||||
|
return rewriter.notifyMatchFailure(
|
||||||
|
op.getLoc(), "grouped query attention not supported");
|
||||||
|
|
||||||
auto opTy = cast<ValueTensorType>(op.getType()).toBuiltinTensor();
|
auto opTy = cast<ValueTensorType>(op.getType()).toBuiltinTensor();
|
||||||
auto query = adaptor.getQuery();
|
auto query = adaptor.getQuery();
|
||||||
|
|
|
@ -8832,7 +8832,7 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
|
||||||
" %0 = call @__torch__.torch.jit._shape_functions.linear(%arg0, %arg1, %arg2) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>) -> !torch.list<int>\n"
|
" %0 = call @__torch__.torch.jit._shape_functions.linear(%arg0, %arg1, %arg2) : (!torch.list<int>, !torch.list<int>, !torch.optional<list<int>>) -> !torch.list<int>\n"
|
||||||
" return %0 : !torch.list<int>\n"
|
" return %0 : !torch.list<int>\n"
|
||||||
" }\n"
|
" }\n"
|
||||||
" func.func @\"__torch_mlir_shape_fn.aten.scaled_dot_product_attention\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.list<int>, %arg3: !torch.optional<list<int>>, %arg4: !torch.float, %arg5: !torch.bool, %arg6: !torch.optional<float>) -> !torch.list<int> {\n"
|
" func.func @\"__torch_mlir_shape_fn.aten.scaled_dot_product_attention\"(%arg0: !torch.list<int>, %arg1: !torch.list<int>, %arg2: !torch.list<int>, %arg3: !torch.optional<list<int>>, %arg4: !torch.float, %arg5: !torch.bool, %arg6: !torch.optional<float>, %arg7: !torch.bool) -> !torch.list<int> {\n"
|
||||||
" %int-1 = torch.constant.int -1\n"
|
" %int-1 = torch.constant.int -1\n"
|
||||||
" %0 = torch.aten.__getitem__.t %arg2, %int-1 : !torch.list<int>, !torch.int -> !torch.int\n"
|
" %0 = torch.aten.__getitem__.t %arg2, %int-1 : !torch.list<int>, !torch.int -> !torch.int\n"
|
||||||
" %1 = torch.aten._set_item.t %arg0, %int-1, %0 : !torch.list<int>, !torch.int, !torch.int -> !torch.list<int>\n"
|
" %1 = torch.aten._set_item.t %arg0, %int-1, %0 : !torch.list<int>, !torch.int, !torch.int -> !torch.list<int>\n"
|
||||||
|
@ -12446,7 +12446,7 @@ StringRef mlir::torch::Torch::getAbstractInterpLibrary() {
|
||||||
" %int11 = torch.constant.int 11\n"
|
" %int11 = torch.constant.int 11\n"
|
||||||
" return %int11 : !torch.int\n"
|
" return %int11 : !torch.int\n"
|
||||||
" }\n"
|
" }\n"
|
||||||
" func.func @\"__torch_mlir_dtype_fn.aten.scaled_dot_product_attention\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.tuple<int, int>, %arg2: !torch.tuple<int, int>, %arg3: !torch.optional<tuple<int, int>>, %arg4: !torch.float, %arg5: !torch.bool, %arg6: !torch.optional<float>) -> !torch.int {\n"
|
" func.func @\"__torch_mlir_dtype_fn.aten.scaled_dot_product_attention\"(%arg0: !torch.tuple<int, int>, %arg1: !torch.tuple<int, int>, %arg2: !torch.tuple<int, int>, %arg3: !torch.optional<tuple<int, int>>, %arg4: !torch.float, %arg5: !torch.bool, %arg6: !torch.optional<float>, %arg7: !torch.bool) -> !torch.int {\n"
|
||||||
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
|
" %0:2 = torch.prim.TupleUnpack %arg0 : !torch.tuple<int, int> -> !torch.int, !torch.int\n"
|
||||||
" return %0#1 : !torch.int\n"
|
" return %0#1 : !torch.int\n"
|
||||||
" }\n"
|
" }\n"
|
||||||
|
|
|
@ -246,6 +246,9 @@ void TorchMatchSpecializedBackendOp::populateSpecializedConversions(
|
||||||
llvm::SmallVector<Value> newOperands{
|
llvm::SmallVector<Value> newOperands{
|
||||||
oldOperands[0], oldOperands[1], oldOperands[2], oldOperands[5],
|
oldOperands[0], oldOperands[1], oldOperands[2], oldOperands[5],
|
||||||
oldOperands[3], oldOperands[4], oldOperands[6]};
|
oldOperands[3], oldOperands[4], oldOperands[6]};
|
||||||
|
Value enableGQA =
|
||||||
|
rewriter.create<ConstantBoolOp>(op->getLoc(), false);
|
||||||
|
newOperands.push_back(enableGQA);
|
||||||
|
|
||||||
auto newOp = rewriter.create<Torch::AtenScaledDotProductAttentionOp>(
|
auto newOp = rewriter.create<Torch::AtenScaledDotProductAttentionOp>(
|
||||||
op.getLoc(), op->getResultTypes()[0], newOperands,
|
op.getLoc(), op->getResultTypes()[0], newOperands,
|
||||||
|
|
|
@ -33,6 +33,13 @@ LINALG_XFAIL_SET = COMMON_TORCH_MLIR_LOWERING_XFAILS | {
|
||||||
"UnfoldModule_basic",
|
"UnfoldModule_basic",
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if torch_version_for_comparison() < version.parse("2.5.0.dev"):
|
||||||
|
LINALG_XFAIL_SET = LINALG_XFAIL_SET | {
|
||||||
|
# Error: 'torch.aten.scaled_dot_product_attention' op expected 8 operands, but found 7
|
||||||
|
"ScaledDotProductAttentionDifferentModule_basic",
|
||||||
|
"ScaledDotProductAttentionSameModule_basic",
|
||||||
|
}
|
||||||
|
|
||||||
LINALG_CRASHING_SET = {
|
LINALG_CRASHING_SET = {
|
||||||
# Runtime op verification: Out of bounds access
|
# Runtime op verification: Out of bounds access
|
||||||
"AtenDiagEmbedNegOffsetDiag_basic",
|
"AtenDiagEmbedNegOffsetDiag_basic",
|
||||||
|
|
|
@ -1277,7 +1277,7 @@ def aten〇linear〡shape(input: List[int], weight: List[int], bias: Optional[Li
|
||||||
Invocation(TensorOfShape(3, 2, 8, 4), TensorOfShape(3, 2, 8, 4), TensorOfShape(3, 2, 8, 4)), # Same shape
|
Invocation(TensorOfShape(3, 2, 8, 4), TensorOfShape(3, 2, 8, 4), TensorOfShape(3, 2, 8, 4)), # Same shape
|
||||||
Invocation(TensorOfShape(3, 2, 16, 8), TensorOfShape(3, 2, 8, 8), TensorOfShape(3, 2, 8, 4)), # Different shape
|
Invocation(TensorOfShape(3, 2, 16, 8), TensorOfShape(3, 2, 8, 8), TensorOfShape(3, 2, 8, 4)), # Different shape
|
||||||
])
|
])
|
||||||
def aten〇scaled_dot_product_attention〡shape(query: List[int], key: List[int], value: List[int], attn_mask: Optional[List[int]] = None, dropout_p: float = 0., is_causal: bool = False, scale: Optional[float] = None) -> List[int]:
|
def aten〇scaled_dot_product_attention〡shape(query: List[int], key: List[int], value: List[int], attn_mask: Optional[List[int]] = None, dropout_p: float = 0., is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False) -> List[int]:
|
||||||
outshape = query
|
outshape = query
|
||||||
outshape[-1] = value[-1]
|
outshape[-1] = value[-1]
|
||||||
return outshape
|
return outshape
|
||||||
|
@ -3558,7 +3558,7 @@ def aten〇isclose〡dtype(self_rank_dtype: Tuple[int, int], other_rank_dtype: T
|
||||||
return torch.bool
|
return torch.bool
|
||||||
|
|
||||||
@check_dtype_function(_check_tensors_with_the_same_dtype(tensor_shapes=[(3, 4, 32, 16), (3, 4, 32, 16), (3, 4, 32, 16)]))
|
@check_dtype_function(_check_tensors_with_the_same_dtype(tensor_shapes=[(3, 4, 32, 16), (3, 4, 32, 16), (3, 4, 32, 16)]))
|
||||||
def aten〇scaled_dot_product_attention〡dtype(query_rank_dtype: Tuple[int, int], key_rank_dtype: Tuple[int, int], value_rank_dtype: Tuple[int, int], attn_mask_rank_dtype: Optional[Tuple[int, int]] = None, dropout_p: float = 0., is_causal: bool = False, scale: Optional[float] = None) -> int:
|
def aten〇scaled_dot_product_attention〡dtype(query_rank_dtype: Tuple[int, int], key_rank_dtype: Tuple[int, int], value_rank_dtype: Tuple[int, int], attn_mask_rank_dtype: Optional[Tuple[int, int]] = None, dropout_p: float = 0., is_causal: bool = False, scale: Optional[float] = None, enable_gqa: bool = False) -> int:
|
||||||
_, query_dtype = query_rank_dtype
|
_, query_dtype = query_rank_dtype
|
||||||
return query_dtype
|
return query_dtype
|
||||||
|
|
||||||
|
|
|
@ -988,7 +988,7 @@ def emit_ops(emitter_td: TextEmitter, registry: Registry):
|
||||||
emit("aten::upsample_nearest2d : (Tensor, int[], float?, float?) -> (Tensor)")
|
emit("aten::upsample_nearest2d : (Tensor, int[], float?, float?) -> (Tensor)")
|
||||||
emit("aten::upsample_nearest2d.vec : (Tensor, int[]?, float[]?) -> (Tensor)")
|
emit("aten::upsample_nearest2d.vec : (Tensor, int[]?, float[]?) -> (Tensor)")
|
||||||
emit(
|
emit(
|
||||||
"aten::scaled_dot_product_attention : (Tensor, Tensor, Tensor, Tensor?, float, bool, float?) -> (Tensor)"
|
"aten::scaled_dot_product_attention : (Tensor, Tensor, Tensor, Tensor?, float, bool, float?, bool) -> (Tensor)"
|
||||||
)
|
)
|
||||||
emit("aten::grid_sampler : (Tensor, Tensor, int, int, bool) -> (Tensor)")
|
emit("aten::grid_sampler : (Tensor, Tensor, int, int, bool) -> (Tensor)")
|
||||||
|
|
||||||
|
|
|
@ -1 +1 @@
|
||||||
d6ea1eb2bc8ba770fd5a689a30e234837df27384
|
748db193d71a1c29471a87c7841da6a5a0a0dbae
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
-f https://download.pytorch.org/whl/nightly/cpu/torch/
|
-f https://download.pytorch.org/whl/nightly/cpu/torch/
|
||||||
--pre
|
--pre
|
||||||
torch==2.5.0.dev20240804
|
torch==2.5.0.dev20240818
|
||||||
|
|
|
@ -193,7 +193,7 @@ func.func @torch.aten.bernoulli_.float(%t: !torch.tensor) -> !torch.tensor {
|
||||||
// CHECK: %[[FALSE:.+]] = torch.constant.bool false
|
// CHECK: %[[FALSE:.+]] = torch.constant.bool false
|
||||||
// CHECK: %[[NONE0:.+]] = torch.constant.none
|
// CHECK: %[[NONE0:.+]] = torch.constant.none
|
||||||
// CHECK: %[[NONE1:.+]] = torch.constant.none
|
// CHECK: %[[NONE1:.+]] = torch.constant.none
|
||||||
// CHECK: %[[ATTEN:.+]] = torch.aten.scaled_dot_product_attention %[[ARG0]], %[[ARG1]], %[[ARG2]], %[[NONE0]], %[[ZERO]], %[[FALSE]], %[[NONE1]]
|
// CHECK: %[[ATTEN:.+]] = torch.aten.scaled_dot_product_attention %[[ARG0]], %[[ARG1]], %[[ARG2]], %[[NONE0]], %[[ZERO]], %[[FALSE]], %[[NONE1]], %[[FALSE]]
|
||||||
// CHECK: return %[[ATTEN]]
|
// CHECK: return %[[ATTEN]]
|
||||||
func.func @scaled_dot_product_flash_attention_for_cpu(%arg0: !torch.vtensor<[1,1,5,5],f32>, %arg1: !torch.vtensor<[1,1,5,5],f32>, %arg2: !torch.vtensor<[1,1,5,5],f32>) -> !torch.vtensor<[1,1,5,5],f32> {
|
func.func @scaled_dot_product_flash_attention_for_cpu(%arg0: !torch.vtensor<[1,1,5,5],f32>, %arg1: !torch.vtensor<[1,1,5,5],f32>, %arg2: !torch.vtensor<[1,1,5,5],f32>) -> !torch.vtensor<[1,1,5,5],f32> {
|
||||||
%float0.000000e00 = torch.constant.float 0.000000e+00
|
%float0.000000e00 = torch.constant.float 0.000000e+00
|
||||||
|
|
|
@ -1,3 +1,3 @@
|
||||||
-f https://download.pytorch.org/whl/nightly/cpu/torchvision/
|
-f https://download.pytorch.org/whl/nightly/cpu/torchvision/
|
||||||
--pre
|
--pre
|
||||||
torchvision==0.20.0.dev20240804
|
torchvision==0.20.0.dev20240818
|
||||||
|
|
Loading…
Reference in New Issue