mirror of https://github.com/llvm/torch-mlir
parent
e8706957c0
commit
4199feffed
|
@ -47,7 +47,7 @@ install_requirements() {
|
|||
checkout_pytorch() {
|
||||
if [[ ! -d "$PYTORCH_ROOT" ]]; then
|
||||
# ${TORCH_MLIR_SRC_PYTORCH_BRANCH} could be a branch name or a commit hash.
|
||||
# Althought `git clone` can accept a branch name, the same command does not
|
||||
# Although `git clone` can accept a branch name, the same command does not
|
||||
# accept a commit hash, so we instead use `git fetch`. The alternative is
|
||||
# to clone the entire repository and then `git checkout` the requested
|
||||
# branch or commit hash, but that's too expensive.
|
||||
|
|
|
@ -56,7 +56,7 @@ def GlobalizeObjectGraph : Pass<"torch-globalize-object-graph", "ModuleOp"> {
|
|||
- Multiple instances of the same class type are allowed, as long as it is
|
||||
possible to monomorphize ("template instantiate") functions so that each
|
||||
argument of !torch.nn.Module type corresponds to a unique instance.
|
||||
In pratice, this limitation is either 1) (fundamental) due to truly
|
||||
In practice, this limitation is either 1) (fundamental) due to truly
|
||||
dynamic use of modules, such as `m1 if cond() else m2` in Python code,
|
||||
or 2) (incidental) imprecision of the static analysis used in this pass
|
||||
which is used to calculate when a single intance is relevant. In general,
|
||||
|
|
|
@ -60,7 +60,7 @@ Value convertTensorToDtype(PatternRewriter &rewriter, Location loc, Value input,
|
|||
|
||||
bool isBuiltInType(Type type);
|
||||
|
||||
// Helper funtion to get rank of `Base tensor type`.
|
||||
// Helper function to get rank of `Base tensor type`.
|
||||
// std::nullopt is returned if the tensorRank can't be determined.
|
||||
std::optional<unsigned> getTensorRank(Value tensor);
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ The components are subclasses of the backend API interface classes found under
|
|||
[torch/csrc/lazy/backend](https://github.com/pytorch/pytorch/tree/master/torch/csrc/lazy/backend).
|
||||
|
||||
Importantly, the subclasses are still abstract classes. Pure virtual methods
|
||||
such as `Compile` were purposefully not overriden as Torch-MLIR does not know
|
||||
such as `Compile` were purposefully not overridden as Torch-MLIR does not know
|
||||
how to compile the model for the target hardware.
|
||||
|
||||
The intent is that vendor hardware specific plugins will subclass the Torch-MLIR
|
||||
|
|
|
@ -168,7 +168,7 @@ at::Tensor LazyNativeFunctions::_copy_from(
|
|||
// materializing a lazy tensor (self) and copying its value into eager
|
||||
// tensor (dst)
|
||||
// detached=false lets us skip a copy in `ToTensor`, which should be safe
|
||||
// becuase we are only going to use the tensor for dst.copy_()
|
||||
// because we are only going to use the tensor for dst.copy_()
|
||||
CHECK(self_tensor);
|
||||
at::Tensor tensor = self_tensor->ToTensor(/*detached=*/false);
|
||||
at::Tensor typed_tensor =
|
||||
|
|
|
@ -74,7 +74,7 @@ private:
|
|||
// Note: shape is undefined for TensorList. We assert in some places that
|
||||
// #shapes matches #outputs and this stems from
|
||||
// the fact that currently all IR nodes represent tensors (there is no
|
||||
// type system for this IR). Becuase of this, TensorList is a bit of a
|
||||
// type system for this IR). Because of this, TensorList is a bit of a
|
||||
// hack.
|
||||
//
|
||||
// TODO(whc) once Shape() API is moved to Node base, also make it virtual, and
|
||||
|
|
|
@ -36,7 +36,7 @@ TorchMlirOpVector LowerTorchMlirBuiltin(
|
|||
const std::vector<c10::TypePtr> tensor_types,
|
||||
const std::vector<torch::jit::NamedValue>& arguments,
|
||||
const std::vector<torch::jit::NamedValue>& kwarguments) {
|
||||
// Workaround for ListType::isSubtypeOfExt behavoir which leads to
|
||||
// Workaround for ListType::isSubtypeOfExt behavior which leads to
|
||||
// the problems with JIT schema matching, so we need to keep
|
||||
// c10::ListType empty before magic_method->call function call.
|
||||
auto dummy_graph = torch::jit::Graph();
|
||||
|
|
|
@ -15,7 +15,7 @@ namespace torch {
|
|||
namespace lazy {
|
||||
|
||||
// IValueConstant IR Node represents a `prim::Constant` constructed with IValue
|
||||
// parameter which is helpfull in different usecases when we need custom
|
||||
// parameter which is helpful in different usecases when we need custom
|
||||
// native ops lowering to torch-mlir IR nodes.
|
||||
class IValueConstant : public torch::lazy::TorchMlirNode {
|
||||
public:
|
||||
|
|
Loading…
Reference in New Issue