[MLIR][Torch] Canonicalize torch.from_i1 and torch.to_i1 (#3067)

When lowering `torch.aten.convolution`, it is expected that the
'transposed' argument is a torch.constant operation. In some cases, the
argument was a `from_i1` operation converting an `arith.constant`
operation into a torch.bool. This is not wrong semantically, but instead
of generalizing the legality of the `torch.aten.convolution` op, we
canonicalize `arith.constant` ops followed by `from_i1` ops to
`torch.bool` ops.

For example:
```
//===-------------------------------------------===//
Legalizing operation : 'torch.aten.convolution'(0x124705b90) {
  %33 = "torch.aten.convolution"(%arg0, %20, %21, %31, %29, %30, %19, %32, %0) : (!torch.vtensor<[1,1,28,28],f32>, !torch.vtensor<[10,1,5,5],f32>, !torch.vtensor<[10],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.vtensor<[1,10,24,24],f32>

  * Fold {
  } -> FAILURE : unable to fold

  * Pattern : 'torch.aten.convolution -> ()' {
    ** Failure : unimplemented: only constant transposed supported.      <-- Resolved by this PR
  } -> FAILURE : pattern failed to match

  * Pattern : 'torch.aten.convolution -> ()' {
    ** Failure : not a supported Scalar to Tensor like op
  } -> FAILURE : pattern failed to match

  * Pattern : 'torch.aten.convolution -> ()' {
    ** Failure : not a supported elementwise op
  } -> FAILURE : pattern failed to match

  * Pattern : 'torch.aten.convolution -> ()' {
    ** Failure : not a supported reduce op
  } -> FAILURE : pattern failed to match
} -> FAILURE : no matched legalization pattern
//===-------------------------------------------===//
<stdin>:21:11: error: failed to legalize operation 'torch.aten.convolution' that was explicitly marked illegal
    %17 = torch.operator "onnx.Conv"(%arg0, %0, %1) {torch.onnx.dilations = [1 : si64, 1 : si64], torch.onnx.group = 1 : si64, torch.onnx.kernel_shape = [5 : si64, 5 : si64], torch.onnx.pads = [0 : si64, 0 : si64, 0 : si64, 0 : si64], torch.onnx.strides = [1 : si64, 1 : si64]} : (!torch.vtensor<[1,1,28,28],f32>, !torch.vtensor<[10,1,5,5],f32>, !torch.vtensor<[10],f32>) -> !torch.vtensor<[1,10,24,24],f32> 
          ^
<stdin>:21:11: note: see current operation: %33 = "torch.aten.convolution"(%arg0, %20, %21, %31, %29, %30, %19, %32, %0) : (!torch.vtensor<[1,1,28,28],f32>, !torch.vtensor<[10,1,5,5],f32>, !torch.vtensor<[10],f32>, !torch.list<int>, !torch.list<int>, !torch.list<int>, !torch.bool, !torch.list<int>, !torch.int) -> !torch.vtensor<[1,10,24,24],f32>
```

Additionally, we require the canonicalization of `to_i1` operating on a
torch.constant bool to an `arith.constant ... : i1` for the e2e tests to
pass successfully.
pull/3093/head
Thomas Dietert 2024-04-01 15:25:51 -06:00 committed by GitHub
parent b98f7f75dc
commit 3c33dbd987
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 46 additions and 0 deletions

View File

@ -80,6 +80,7 @@ def TorchConversion_ToI1Op : TorchConversion_Op<"to_i1", [
let assemblyFormat = [{ let assemblyFormat = [{
$operand attr-dict $operand attr-dict
}]; }];
let hasFolder = 1;
} }
def TorchConversion_FromI1Op : TorchConversion_Op<"from_i1", [ def TorchConversion_FromI1Op : TorchConversion_Op<"from_i1", [
@ -98,6 +99,7 @@ def TorchConversion_FromI1Op : TorchConversion_Op<"from_i1", [
let assemblyFormat = [{ let assemblyFormat = [{
$operand attr-dict $operand attr-dict
}]; }];
let hasFolder = 1;
} }
def TorchConversion_ToI64Op : TorchConversion_Op<"to_i64", [ def TorchConversion_ToI64Op : TorchConversion_Op<"to_i64", [

View File

@ -71,6 +71,32 @@ LogicalResult FromBuiltinTensorOp::verify() {
return success(); return success();
} }
//===----------------------------------------------------------------------===//
// FromI1Op
//===----------------------------------------------------------------------===//
OpFoldResult FromI1Op::fold(FoldAdaptor adaptor) {
auto attr = adaptor.getOperand().dyn_cast_or_null<mlir::BoolAttr>();
if (attr) {
return attr;
} else {
return nullptr;
}
}
//===----------------------------------------------------------------------===//
// ToI1Op
//===----------------------------------------------------------------------===//
OpFoldResult ToI1Op::fold(FoldAdaptor adaptor) {
auto attr = adaptor.getOperand().dyn_cast_or_null<mlir::BoolAttr>();
if (attr) {
return attr;
} else {
return nullptr;
}
}
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
// FromI64Op // FromI64Op
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//

View File

@ -1,5 +1,23 @@
// RUN: torch-mlir-opt %s -canonicalize | FileCheck %s // RUN: torch-mlir-opt %s -canonicalize | FileCheck %s
// CHECK-LABEL: func.func @torch_c.from_i1() -> !torch.bool {
// CHECK: %[[TRUE:.*]] = torch.constant.bool true
// CHECK: return %[[TRUE]] : !torch.bool
func.func @torch_c.from_i1() -> !torch.bool {
%c1_i1 = arith.constant true
%0 = torch_c.from_i1 %c1_i1
return %0 : !torch.bool
}
// CHECK-LABEL: func.func @torch_c.to_i1() -> i1 {
// CHECK: %[[C1_I1:.*]] = arith.constant true
// CHECK: return %[[C1_I1]] : i1
func.func @torch_c.to_i1() -> i1 {
%bool1 = torch.constant.bool true
%0 = torch_c.to_i1 %bool1
return %0 : i1
}
// CHECK-LABEL: func.func @torch_c.from_i64() -> !torch.int { // CHECK-LABEL: func.func @torch_c.from_i64() -> !torch.int {
// CHECK: %[[INT5:.*]] = torch.constant.int 5 // CHECK: %[[INT5:.*]] = torch.constant.int 5
// CHECK: return %[[INT5]] : !torch.int // CHECK: return %[[INT5]] : !torch.int