2021-10-16 06:23:59 +08:00
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
//
|
|
|
|
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "torch-mlir/Dialect/Torch/Utils/Utils.h"
|
Add type promotion code to refine types.
The types have different levels of categories: where
complex > floating > integral > boolean (> means left hand
side has higher category).
The operands have different levels of priorities where:
dimensioned tensor > 0-dim tensor > scalar == wrapped 0-dim tensor.
This is represented by the `ResultTypeState.dimResult`,
`ResultTypeState.zeroResult` and `ResultTypeState..wrappedResult` in
the source code.
For operands of the same priorities, the result type should be the
highest categories with sufficient width to hold all operands.
By default, only the highest priority operands participate in the type
promotion logic. Lower priority operands participate if they are in
a higher category than any higher priority operands.
For example, <[],f32> (lower priority) and <[1], si64> tensor would
result in <[?],f32> tensor because floating > integeral. Another example
<[],f64> (lower priority) and <[1], f32> tensor would result in
<[?], f32> tensor because f32 and f64 are the same category.
The ScalarType enum definition, type promotion table, ResultTypeState
struct definition and some helpers are copied from
aten/src/ATen/native/TypeProperties.*
Other references:
- https://pytorch.org/docs/stable/tensor_attributes.html#type-promotion-doc
- https://github.com/pytorch/pytorch/issues/9515
Other minor changes:
1. Fix `visitExpandLikeOp` to consider cases where the given sizes list
size is larger than the input rank.
2. Add back the somehow deleted `torch.aten.softmax.int` tests in
decompose-complex-ops.mlir.
2021-10-21 03:31:28 +08:00
|
|
|
#include "torch-mlir/Dialect/Torch/IR/TorchOps.h"
|
2021-10-16 06:23:59 +08:00
|
|
|
|
2022-02-04 19:43:25 +08:00
|
|
|
using namespace mlir::torch::torch_upstream;
|
|
|
|
|
2021-10-16 06:23:59 +08:00
|
|
|
namespace mlir {
|
|
|
|
namespace torch {
|
|
|
|
namespace Torch {
|
|
|
|
|
|
|
|
int64_t toPositiveDim(int64_t dim, int64_t inputRank) {
|
|
|
|
return dim >= 0 ? dim : dim + inputRank;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isValidDim(int64_t dim, int64_t inputRank) {
|
|
|
|
return dim >= 0 && dim < inputRank;
|
|
|
|
}
|
|
|
|
|
Add type promotion code to refine types.
The types have different levels of categories: where
complex > floating > integral > boolean (> means left hand
side has higher category).
The operands have different levels of priorities where:
dimensioned tensor > 0-dim tensor > scalar == wrapped 0-dim tensor.
This is represented by the `ResultTypeState.dimResult`,
`ResultTypeState.zeroResult` and `ResultTypeState..wrappedResult` in
the source code.
For operands of the same priorities, the result type should be the
highest categories with sufficient width to hold all operands.
By default, only the highest priority operands participate in the type
promotion logic. Lower priority operands participate if they are in
a higher category than any higher priority operands.
For example, <[],f32> (lower priority) and <[1], si64> tensor would
result in <[?],f32> tensor because floating > integeral. Another example
<[],f64> (lower priority) and <[1], f32> tensor would result in
<[?], f32> tensor because f32 and f64 are the same category.
The ScalarType enum definition, type promotion table, ResultTypeState
struct definition and some helpers are copied from
aten/src/ATen/native/TypeProperties.*
Other references:
- https://pytorch.org/docs/stable/tensor_attributes.html#type-promotion-doc
- https://github.com/pytorch/pytorch/issues/9515
Other minor changes:
1. Fix `visitExpandLikeOp` to consider cases where the given sizes list
size is larger than the input rank.
2. Add back the somehow deleted `torch.aten.softmax.int` tests in
decompose-complex-ops.mlir.
2021-10-21 03:31:28 +08:00
|
|
|
bool getListConstructElements(Value v, SmallVectorImpl<Value> &elems) {
|
|
|
|
auto listConstruct = v.getDefiningOp<PrimListConstructOp>();
|
|
|
|
if (!listConstruct)
|
|
|
|
return false;
|
|
|
|
elems = llvm::to_vector<4>(listConstruct.elements());
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2022-02-04 19:43:25 +08:00
|
|
|
ScalarType getScalarTypeForType(Type type) {
|
|
|
|
if (type.isa<Float32Type>())
|
|
|
|
return ScalarType::Float;
|
|
|
|
if (type.isa<Float64Type>())
|
|
|
|
return ScalarType::Double;
|
|
|
|
if (type.isSignedInteger(64))
|
|
|
|
return ScalarType::Long;
|
|
|
|
if (type.isSignedInteger(32))
|
|
|
|
return ScalarType::Int;
|
|
|
|
if (type.isUnsignedInteger(1))
|
|
|
|
return ScalarType::Bool;
|
|
|
|
llvm::report_fatal_error("unhandled type for getScalarTypeForType");
|
|
|
|
}
|
|
|
|
|
2022-02-15 02:39:36 +08:00
|
|
|
LogicalResult checkNotNone(PatternRewriter &rewriter, Operation *op, Value v) {
|
|
|
|
Type type = v.getType();
|
|
|
|
if (type.isa<OptionalType>() || type.isa<Torch::NoneType>() ||
|
|
|
|
type.isa<mlir::NoneType>())
|
|
|
|
return rewriter.notifyMatchFailure(op, "unimplemented None type arg");
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
|
2021-10-16 06:23:59 +08:00
|
|
|
} // namespace Torch
|
|
|
|
} // namespace torch
|
|
|
|
} // namespace mlir
|