2022-03-24 22:15:43 +08:00
|
|
|
blacklist:
|
2023-08-30 18:29:39 +08:00
|
|
|
# Disabled in favour of `aten::index_put` which supports optional indices via `hacked_twin` JIT hack.
|
|
|
|
# It also doesn't have confusing `unsafe` argument.
|
|
|
|
- _index_put_impl
|
2022-03-24 22:15:43 +08:00
|
|
|
|
2023-05-19 10:07:58 +08:00
|
|
|
# Ops with list of tensors output
|
2023-05-24 03:43:33 +08:00
|
|
|
- split.Tensor
|
2023-09-04 09:59:26 +08:00
|
|
|
- split_with_sizes
|
2023-05-19 10:07:58 +08:00
|
|
|
- unbind.int
|
2023-05-26 10:05:19 +08:00
|
|
|
- chunk
|
2023-05-19 10:07:58 +08:00
|
|
|
|
2022-03-24 22:15:43 +08:00
|
|
|
# Additional ops which autogen is supported for but don't compile yet
|
2022-07-14 01:28:05 +08:00
|
|
|
- _convolution
|
Enable support for LTC Input/Output Mapping (#764)
* Save InputOutputAliases to TorchMlirComputation
* Implement GetResultShape for TorchMlirLoweringContext
* Use optional return type for GetResultShape
* Remove support for aten::detach
With this op enabled, tensors were being copied, which resulted in incorrect aliasing.
* Add newline before printing I/O alias mapping
* Changed printout to use "Input param" as label instead of "Input"
* Remote shape inference function for aten::detach
* Moved implementation of SetUpAlias to MlirLoweringContext
As part of this change, TorchMlirComputation has been moved to the end of mlir_lowering_context.h so that it can access some new structs in TorchMlirLoweringContext
* Use updated PyTorch API
* Remove GetResultShape
Complements this upstream PyTorch PR: pytorch/pytorch#75828
This PR adds support for mapping input and output tensors which alias each other. (e.g. maps input weight tensor in parameter to the same tensor in output after a training iteration)
MLIR:
func @graph(%arg0: !torch.vtensor<[1,5],f32>, %arg1: !torch.vtensor<[1],si64>, ..., %arg6: !torch.vtensor<[10,5],f32>, %arg7: !torch.vtensor<[10],f32>, ...) {
...
return %arg0, %arg1, %17, %23, ... : !torch.vtensor<[1,5],f32>, !torch.vtensor<[1],si64>, !torch.vtensor<[10,5],f32>, !torch.vtensor<[10],f32>, ...
}
Input/Output Alias Mapping:
Output: 0 -> Input: 0
Output: 1 -> Input: 1
Output: 2 -> Input: 6
Output: 3 -> Input: 7
The aten::detach op has also been disabled in this PR to fix the issue of tensors not aliasing properly due to copying.
2022-04-28 01:48:04 +08:00
|
|
|
- detach
|
2022-03-24 22:15:43 +08:00
|
|
|
- item
|
|
|
|
- size
|
|
|
|
- copy_
|
2022-06-08 02:38:50 +08:00
|
|
|
|
|
|
|
# Disabled for consistency with TS backend
|
2022-09-20 22:16:04 +08:00
|
|
|
- lift_fresh_copy
|
2022-07-01 03:19:05 +08:00
|
|
|
- new_empty
|
2022-06-08 02:38:50 +08:00
|
|
|
- rsub
|
2022-07-01 03:19:05 +08:00
|
|
|
- slice.Tensor # Disabled in favour of slice_copy.Tensor
|
2022-08-27 04:13:28 +08:00
|
|
|
- zeros
|
2022-10-14 22:28:21 +08:00
|
|
|
- ones
|
|
|
|
- arange
|
|
|
|
- arange.start
|
|
|
|
- arange.start_step
|
2022-10-28 23:06:11 +08:00
|
|
|
- fill.Scalar
|
2023-06-01 11:38:50 +08:00
|
|
|
- scalar_tensor
|
2022-07-01 03:19:05 +08:00
|
|
|
|
|
|
|
# Disabled in favour of functionalized alternatives
|
|
|
|
- _reshape_alias
|
|
|
|
- expand
|
|
|
|
- permute
|
|
|
|
- select.int
|
|
|
|
- squeeze
|
|
|
|
- squeeze.dim
|
|
|
|
- t
|
|
|
|
- transpose.int
|
|
|
|
- unsqueeze
|
|
|
|
- view
|
|
|
|
|
2022-10-14 22:28:21 +08:00
|
|
|
whitelist:
|
|
|
|
# Enabled for consistency with TS backend
|
|
|
|
- arange.start_out
|
|
|
|
|
2022-03-24 22:15:43 +08:00
|
|
|
# List of supported ops that we don't want to do the full codegen for
|
|
|
|
supported:
|
|
|
|
# - bernoulli
|
|
|
|
# - bernoulli_
|
2022-06-08 02:38:50 +08:00
|
|
|
- _to_copy
|
2022-03-24 22:15:43 +08:00
|
|
|
- clone
|
2022-06-08 02:38:50 +08:00
|
|
|
- empty.memory_format
|
|
|
|
- empty_strided
|
|
|
|
- fill_.Scalar
|
|
|
|
- _unsafe_view
|
2023-08-21 04:32:11 +08:00
|
|
|
- unbind_copy.int
|
|
|
|
- split_copy.Tensor
|
|
|
|
- split_with_sizes_copy
|
2023-08-30 18:29:39 +08:00
|
|
|
- index.Tensor
|
|
|
|
- index_put
|
2022-03-24 22:15:43 +08:00
|
|
|
|
2022-07-01 03:19:05 +08:00
|
|
|
# ops required for functionalization
|
|
|
|
- lift
|
2022-07-26 03:20:17 +08:00
|
|
|
- lift_fresh
|
2022-07-01 03:19:05 +08:00
|
|
|
# Below are all operators that are "composite" in core,
|
|
|
|
# but require us to explicitly re-enable functionalization in order to use them.
|
|
|
|
# Why? These operators are all CompositeExplicitAutograd, which mean that they run
|
|
|
|
# after functionalization,
|
|
|
|
# but their implementations call view operators (which we need to functionalize away).
|
|
|
|
- block_diag
|
|
|
|
- new_empty_strided
|
2022-09-20 22:16:04 +08:00
|
|
|
- narrow_copy
|
2022-07-01 03:19:05 +08:00
|
|
|
- pixel_shuffle
|
|
|
|
- pixel_unshuffle
|
|
|
|
- select_backward
|
|
|
|
- slice_backward
|
|
|
|
- diagonal_backward
|
|
|
|
- _trilinear
|
|
|
|
- linalg_pinv.atol_rtol_tensor
|
|
|
|
- logsumexp.out
|
|
|
|
|
2022-09-20 22:16:04 +08:00
|
|
|
# List of ops that will take in symints for the size instead of ints
|
|
|
|
symint:
|
|
|
|
- empty.memory_format
|
|
|
|
- new_empty_strided
|
|
|
|
- expand_copy
|
|
|
|
- narrow_copy
|
2022-09-27 00:16:49 +08:00
|
|
|
- slice_backward
|
|
|
|
- slice_copy.Tensor
|
2023-08-21 04:32:11 +08:00
|
|
|
- split_copy.Tensor
|
2022-09-27 00:16:49 +08:00
|
|
|
- slice_scatter
|
2022-09-20 22:16:04 +08:00
|
|
|
- view
|
|
|
|
- view_copy
|
2022-09-27 00:16:49 +08:00
|
|
|
- as_strided_copy
|
|
|
|
- as_strided_scatter
|
2023-08-21 04:32:11 +08:00
|
|
|
- split_with_sizes_copy
|
2022-09-20 22:16:04 +08:00
|
|
|
|
2022-07-01 03:19:05 +08:00
|
|
|
|
2022-03-24 22:15:43 +08:00
|
|
|
additional_ops:
|
|
|
|
# Additional ops to support that are not supported by Torch-MLIR explicitly
|
|
|
|
- _copy_from
|
|
|
|
- _copy_from_and_resize
|
2022-05-03 21:35:44 +08:00
|
|
|
|
|
|
|
# List of non native ops that we only want to do IR node class generation for
|
|
|
|
non_native:
|
2022-06-08 02:38:50 +08:00
|
|
|
- func: scalar(Scalar value, ScalarType type) -> Tensor
|
2022-05-03 21:35:44 +08:00
|
|
|
opkind: at::prim::Constant
|
2022-06-08 02:38:50 +08:00
|
|
|
properties:
|
|
|
|
- ShapeCompute
|
|
|
|
- TreatScalarsAsConstants
|
2022-10-14 22:28:21 +08:00
|
|
|
- func: expand(Tensor input, int[] size, bool is_scalar_expand) -> Tensor
|
2022-06-08 02:38:50 +08:00
|
|
|
- func: cast(Tensor input, ScalarType dtype, ScalarType? stype) -> Tensor
|
2022-05-03 21:35:44 +08:00
|
|
|
opkind: ltc_cast
|
2022-06-08 02:38:50 +08:00
|
|
|
properties:
|
|
|
|
- ShapeCompute
|