mirror of https://github.com/llvm/torch-mlir
6b95dd461d
In constant folding progress, a new constant op will be created according to the origin op's result type. See the code in TorchDialect.cpp. ```cpp Operation *TorchDialect::materializeConstant(OpBuilder &builder, Attribute value, Type type, Location loc) { if (auto integerType = dyn_cast<Torch::IntType>(type)) return builder.create<Torch::ConstantIntOp>(loc, cast<IntegerAttr>(value)); if (auto floatType = dyn_cast<Torch::FloatType>(type)) return builder.create<Torch::ConstantFloatOp>(loc, cast<FloatAttr>(value)); if (auto numberType = dyn_cast<Torch::NumberType>(type)) { if (auto floatValue = dyn_cast<mlir::FloatAttr>(value)) { return builder.create<Torch::ConstantNumberOp>(loc, floatValue); } else if (auto intValue = dyn_cast<mlir::IntegerAttr>(value)) { return builder.create<Torch::ConstantNumberOp>(loc, intValue); } } if (isa<Torch::BoolType>(type)) { return builder.create<Torch::ConstantBoolOp>(loc, cast<IntegerAttr>(value)); } if (isa<Torch::NoneType>(type)) return builder.create<ConstantNoneOp>(loc); if (auto stringAttr = dyn_cast<StringAttr>(value)) return builder.create<ConstantStrOp>(loc, stringAttr); if (auto elementsAttr = dyn_cast<ElementsAttr>(value)) { // Only !torch.vtensor can be constant folded. !torch.tensor has // non-trivial aliasing semantics which prevent deduplicating it. assert(isa<ValueTensorType>(type) && "should be a vtensor type!"); return builder.create<ValueTensorLiteralOp>(loc, elementsAttr); } return nullptr; } ``` So when the op has a tensor result type, it must be "ValueTensorType" due to the **assert** statement. However, many fold methods in TorchOps.cpp only have a judgment of "BaseTensorType". |
||
---|---|---|
.. | ||
IR | ||
Transforms | ||
Utils | ||
CMakeLists.txt |