mirror of https://github.com/llvm/torch-mlir
Bump llvm-project to 72ddd559b8aafef402091f8e192e025022e4ebef
- Fixup to OpBuilderDAG - Update for affine map namingpull/103/head
parent
29c715b6b1
commit
0761df9f58
|
@ -1 +1 @@
|
|||
Subproject commit c8c07b76b2cf2ada8e7ec132f7f57b97d76743cf
|
||||
Subproject commit 72ddd559b8aafef402091f8e192e025022e4ebef
|
|
@ -242,7 +242,7 @@ def Basicpy_ExecOp : Basicpy_Op<"exec", [
|
|||
|
||||
let skipDefaultBuilders = 1;
|
||||
let builders = [
|
||||
OpBuilder<"OpBuilder &builder, OperationState &result">,
|
||||
OpBuilderDAG<(ins)>,
|
||||
];
|
||||
let extraClassDeclaration = [{
|
||||
OpBuilder getBodyBuilder() {
|
||||
|
@ -291,7 +291,7 @@ def Basicpy_FuncTemplateCallOp : Basicpy_Op<"func_template_call", []> {
|
|||
let verifier = [{ return verifyBasicpyOp(*this); }];
|
||||
let skipDefaultBuilders = 1;
|
||||
let builders = [
|
||||
OpBuilder<"OpBuilder &builder, OperationState &result">,
|
||||
OpBuilderDAG<(ins)>,
|
||||
];
|
||||
}
|
||||
|
||||
|
@ -364,7 +364,7 @@ def Basicpy_FuncTemplateOp : Basicpy_Op<"func_template", [
|
|||
|
||||
let skipDefaultBuilders = 1;
|
||||
let builders = [
|
||||
OpBuilder<"OpBuilder &builder, OperationState &result">,
|
||||
OpBuilderDAG<(ins)>,
|
||||
];
|
||||
let extraClassDeclaration = [{
|
||||
OpBuilder getBodyBuilder() {
|
||||
|
|
|
@ -21,7 +21,7 @@ func @tcp_broadcast_to(%arg0: tensor<?xf32>, %arg1: tensor<?xindex>) -> tensor<?
|
|||
// CHECK: %[[RHS:.*]] = tensor_to_memref %[[RHS_TENSOR]] : memref<?xf32>
|
||||
// CHECK: %[[SHAPE:.*]] = shape.shape_of %[[LHS_TENSOR]] : tensor<?xf32> -> tensor<?xindex>
|
||||
// CHECK: %[[RESULT:.*]] = refback.alloc_memref %[[SHAPE]] : memref<?xf32>
|
||||
// CHECK: linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel"]} ins(%[[LHS]], %[[RHS]] : memref<?xf32>, memref<?xf32>) outs(%[[RESULT]] : memref<?xf32>) {
|
||||
// CHECK: linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel"]} ins(%[[LHS]], %[[RHS]] : memref<?xf32>, memref<?xf32>) outs(%[[RESULT]] : memref<?xf32>) {
|
||||
// CHECK: ^bb0(%[[LHS_SCALR:.*]]: f32, %[[RHS_SCALAR:.*]]: f32, %{{.*}}: f32):
|
||||
// CHECK: %[[RESULT_SCALAR:.*]] = addf %[[LHS_SCALR]], %[[RHS_SCALAR]] : f32
|
||||
// CHECK: linalg.yield %[[RESULT_SCALAR]] : f32
|
||||
|
@ -41,7 +41,7 @@ func @tcp_add(%arg0: tensor<?xf32>, %arg1: tensor<?xf32>) -> tensor<?xf32> {
|
|||
// CHECK: %[[RHS:.*]] = tensor_to_memref %[[RHS_TENSOR]] : memref<?xf32>
|
||||
// CHECK: %[[SHAPE:.*]] = shape.shape_of %[[LHS_TENSOR]] : tensor<?xf32> -> tensor<?xindex>
|
||||
// CHECK: %[[RESULT:.*]] = refback.alloc_memref %[[SHAPE]] : memref<?xf32>
|
||||
// CHECK: linalg.generic {indexing_maps = [#map0, #map0, #map0], iterator_types = ["parallel"]} ins(%[[LHS]], %[[RHS]] : memref<?xf32>, memref<?xf32>) outs(%[[RESULT]] : memref<?xf32>) {
|
||||
// CHECK: linalg.generic {indexing_maps = [#map, #map, #map], iterator_types = ["parallel"]} ins(%[[LHS]], %[[RHS]] : memref<?xf32>, memref<?xf32>) outs(%[[RESULT]] : memref<?xf32>) {
|
||||
// CHECK: ^bb0(%[[LHS_SCALR:.*]]: f32, %[[RHS_SCALAR:.*]]: f32, %{{.*}}: f32):
|
||||
// CHECK: %[[RESULT_SCALAR:.*]] = mulf %[[LHS_SCALR]], %[[RHS_SCALAR]] : f32
|
||||
// CHECK: linalg.yield %[[RESULT_SCALAR]] : f32
|
||||
|
|
Loading…
Reference in New Issue