blacklist: # List of unsupported ops in LTC autogen because of some error - _index_put_impl_ # Error: TODO not sure if there are other valid types to handle here - _index_put_impl # Error: TODO not sure if there are other valid types to handle here - empty_like # Error: TODO add support for type BaseType(name=) - index.Tensor # Error: TODO not sure if there are other valid types to handle here - index_put # Error: TODO not sure if there are other valid types to handle here - index_put_ # Error: TODO not sure if there are other valid types to handle here # Additional ops which autogen is supported for but don't compile yet - _convolution - detach - item - size - copy_ # Disabled for consistency with TS backend - lift_fresh_copy - new_empty - rsub - slice.Tensor # Disabled in favour of slice_copy.Tensor - zeros - ones - arange - arange.start - arange.start_step - fill.Scalar # Disabled in favour of functionalized alternatives - _reshape_alias - expand - permute - select.int - squeeze - squeeze.dim - t - transpose.int - unsqueeze - view whitelist: # Enabled for consistency with TS backend - arange.start_out # List of ops to autogen even if not supported by Torch-MLIR explicitly #- split_copy.Tensor #- split_with_sizes_copy #- unbind_copy.int # List of supported ops that we don't want to do the full codegen for supported: # - bernoulli # - bernoulli_ - _to_copy - clone - empty.memory_format - empty_strided - fill_.Scalar - _unsafe_view # ops required for functionalization - lift - lift_fresh # Below are all operators that are "composite" in core, # but require us to explicitly re-enable functionalization in order to use them. # Why? These operators are all CompositeExplicitAutograd, which mean that they run # after functionalization, # but their implementations call view operators (which we need to functionalize away). - block_diag - new_empty_strided - narrow_copy - pixel_shuffle - pixel_unshuffle - select_backward - slice_backward - diagonal_backward - _trilinear - linalg_pinv.atol_rtol_tensor - logsumexp.out # List of ops that will take in symints for the size instead of ints symint: - empty.memory_format - new_empty_strided - expand_copy - narrow_copy - slice_backward - slice_copy.Tensor - slice_scatter - view - view_copy - as_strided_copy - as_strided_scatter additional_ops: # Additional ops to support that are not supported by Torch-MLIR explicitly - _copy_from - _copy_from_and_resize # List of non native ops that we only want to do IR node class generation for non_native: - func: scalar(Scalar value, ScalarType type) -> Tensor opkind: at::prim::Constant properties: - ShapeCompute - TreatScalarsAsConstants - func: expand(Tensor input, int[] size, bool is_scalar_expand) -> Tensor - func: cast(Tensor input, ScalarType dtype, ScalarType? stype) -> Tensor opkind: ltc_cast properties: - ShapeCompute