torch-mlir/projects/ltc/csrc/base_lazy_backend/mlir_node.cpp

158 lines
5.4 KiB
C++
Raw Normal View History

2022-03-24 22:15:43 +08:00
//===- mlir_node.cpp ------------------------------------------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
// Also available under a BSD-style license. See LICENSE.
//
//===----------------------------------------------------------------------===//
// This file is adapted from pytorch/pytorch
// https://github.com/pytorch/pytorch/blob/lazy_tensor_staging/torch/csrc/lazy/ts_backend/ts_node.cpp
//===----------------------------------------------------------------------===//
#include "mlir_node.h"
#include "utils/exception.h"
2022-03-24 22:15:43 +08:00
namespace torch {
namespace lazy {
namespace {
hash_t OperandHashes(
const OpList& operands, const c10::ArrayRef<Shape>& shapes,
const hash_t& seed, bool bakeInSizes) {
hash_t hash = seed;
for (auto& operand : operands) {
if (!operand) {
hash = HashCombine(hash, static_cast<uint64_t>(kNullOpt));
continue;
}
auto operand_hash = bakeInSizes ? operand.shapeHash() : operand.hash();
hash = HashCombine(hash, operand_hash);
}
for (auto& shape : shapes) {
hash = HashCombine(hash, shape.hash(bakeInSizes));
}
return hash;
}
} // namespace
// Adds a static hook that is run after every single TorchMlirNode is initialized
static std::vector<std::function<void(TorchMlirNode*)>> constructor_hooks;
void TorchMlirNode::addConstructorHook(std::function<void(TorchMlirNode*)> f) {
constructor_hooks.emplace_back(f);
}
TorchMlirNode::TorchMlirNode(
OpKind op, OpList operands, std::vector<Shape>&& shapes, size_t num_outputs,
hash_t hash_seed)
: Node(op, operands, std::move(shapes), num_outputs) {
hash_seed = HashCombine(op.hash(), hash_seed);
shape_hash_ = OperandHashes(operands, this->shapes(), hash_seed, true);
dag_hash_ =
(enableDynamicShape()
? OperandHashes(operands, this->shapes(), hash_seed, false)
: shape_hash_);
for (std::function<void(TorchMlirNode*)>& f : constructor_hooks) {
f(this);
}
}
TorchMlirNode::TorchMlirNode(
OpKind op, OpList operands, const std::function<Shape()>& shape_fn,
size_t num_outputs, hash_t hash_seed)
: TorchMlirNode(
op, operands, std::vector<Shape>{}, num_outputs, hash_seed) {
addComputedShape(shape_fn);
}
TorchMlirNode::TorchMlirNode(
OpKind op, OpList operands, size_t num_outputs, hash_t hash_seed)
: TorchMlirNode(
op, operands, std::vector<Shape>{}, num_outputs, hash_seed) {}
TorchMlirNode::TorchMlirNode(
OpKind op, Shape shape, size_t num_outputs, hash_t hash_seed)
: TorchMlirNode(op, {}, {std::move(shape)}, num_outputs, hash_seed) {}
hash_t TorchMlirNode::hash() const { return dag_hash_; }
hash_t TorchMlirNode::shapeHash() const { return shape_hash_; }
TorchMlirNode* TorchMlirNode::mlir_node(int index) const {
return dynamic_cast<TorchMlirNode*>(operands_.at(index).get());
}
///////////////////////////////////////////////////////////////////////////////
// TorchMlirTensorList
///////////////////////////////////////////////////////////////////////////////
E2E HuggingFace Bert using LTC Backend (#912) * Update native function definitions * Add ops to support bert lowering - Add empty_strided and as_strided - Restore zeros_like to op blacklist (Without this, tensors will be unintentionally created with a CPU device rather than lazy) - Check for composite implicit ops and add device data IR - Also fix codegen for functionalization * Add autogen to CMakeList * Remove PyTorch submodule * Reduced BERT model size * Print Mark Step status in Torch MLIR LTC debug string * Apply fixes to work with latest upstream/main - Pass importOptions into getMlirTypeFromTorchType during NodeImporter::importNode Without this, the tensor type created may have a mismatched type as ImportOptions may cause vtensor to be used instead of tensor * Update shape inference functions - Fixed compute_shape_native_batch_norm when mean and var are uninitialized Previously, the number of shapes returned would be <3 if either mean or val was didn't exist. Instead, we now initialize them with a vector matching the number of channels. - Implemented compute_shape_mul - Fixed bug in reshape shape inference error message * Get MLIR backend more consistent with TS backend - Remove LazyNativeFunctions::_unsafe_view from autogen - Blacklist ops to make JIT graph more like output of TS backend - Print graph when SSA value has mismatch of types and results - Remove normalize_index from LazyShapeInference - Fix seeds for LTC example models * Update and clean up shape inference functions - Prune shape inference functions - Add shape inference function for GenerateSlice - Add shape inference function for GenerateCopy Co-authored-by: Henry Tu <henry.tu@cerebras.net>
2022-06-08 02:38:50 +08:00
OpKind TorchMlirTensorList::ClassOpKind() {
// Note: this OpKind is separate from ltc_ops.h since it would be a circular
// import otherwise
static const OpKind tensor_list_opkind =
OpKind::Get("lazy_tensors::tensor_list");
E2E HuggingFace Bert using LTC Backend (#912) * Update native function definitions * Add ops to support bert lowering - Add empty_strided and as_strided - Restore zeros_like to op blacklist (Without this, tensors will be unintentionally created with a CPU device rather than lazy) - Check for composite implicit ops and add device data IR - Also fix codegen for functionalization * Add autogen to CMakeList * Remove PyTorch submodule * Reduced BERT model size * Print Mark Step status in Torch MLIR LTC debug string * Apply fixes to work with latest upstream/main - Pass importOptions into getMlirTypeFromTorchType during NodeImporter::importNode Without this, the tensor type created may have a mismatched type as ImportOptions may cause vtensor to be used instead of tensor * Update shape inference functions - Fixed compute_shape_native_batch_norm when mean and var are uninitialized Previously, the number of shapes returned would be <3 if either mean or val was didn't exist. Instead, we now initialize them with a vector matching the number of channels. - Implemented compute_shape_mul - Fixed bug in reshape shape inference error message * Get MLIR backend more consistent with TS backend - Remove LazyNativeFunctions::_unsafe_view from autogen - Blacklist ops to make JIT graph more like output of TS backend - Print graph when SSA value has mismatch of types and results - Remove normalize_index from LazyShapeInference - Fix seeds for LTC example models * Update and clean up shape inference functions - Prune shape inference functions - Add shape inference function for GenerateSlice - Add shape inference function for GenerateCopy Co-authored-by: Henry Tu <henry.tu@cerebras.net>
2022-06-08 02:38:50 +08:00
return tensor_list_opkind;
}
TorchMlirTensorList::TorchMlirTensorList(OpList values)
: TorchMlirNode(
E2E HuggingFace Bert using LTC Backend (#912) * Update native function definitions * Add ops to support bert lowering - Add empty_strided and as_strided - Restore zeros_like to op blacklist (Without this, tensors will be unintentionally created with a CPU device rather than lazy) - Check for composite implicit ops and add device data IR - Also fix codegen for functionalization * Add autogen to CMakeList * Remove PyTorch submodule * Reduced BERT model size * Print Mark Step status in Torch MLIR LTC debug string * Apply fixes to work with latest upstream/main - Pass importOptions into getMlirTypeFromTorchType during NodeImporter::importNode Without this, the tensor type created may have a mismatched type as ImportOptions may cause vtensor to be used instead of tensor * Update shape inference functions - Fixed compute_shape_native_batch_norm when mean and var are uninitialized Previously, the number of shapes returned would be <3 if either mean or val was didn't exist. Instead, we now initialize them with a vector matching the number of channels. - Implemented compute_shape_mul - Fixed bug in reshape shape inference error message * Get MLIR backend more consistent with TS backend - Remove LazyNativeFunctions::_unsafe_view from autogen - Blacklist ops to make JIT graph more like output of TS backend - Print graph when SSA value has mismatch of types and results - Remove normalize_index from LazyShapeInference - Fix seeds for LTC example models * Update and clean up shape inference functions - Prune shape inference functions - Add shape inference function for GenerateSlice - Add shape inference function for GenerateCopy Co-authored-by: Henry Tu <henry.tu@cerebras.net>
2022-06-08 02:38:50 +08:00
/*op=*/TorchMlirTensorList::ClassOpKind(),
/*operands=*/values,
/*shapes=*/std::vector<Shape>(),
/*num_outputs=*/1,
/*hash_seed=*/kHashSeed) {}
E2E HuggingFace Bert using LTC Backend (#912) * Update native function definitions * Add ops to support bert lowering - Add empty_strided and as_strided - Restore zeros_like to op blacklist (Without this, tensors will be unintentionally created with a CPU device rather than lazy) - Check for composite implicit ops and add device data IR - Also fix codegen for functionalization * Add autogen to CMakeList * Remove PyTorch submodule * Reduced BERT model size * Print Mark Step status in Torch MLIR LTC debug string * Apply fixes to work with latest upstream/main - Pass importOptions into getMlirTypeFromTorchType during NodeImporter::importNode Without this, the tensor type created may have a mismatched type as ImportOptions may cause vtensor to be used instead of tensor * Update shape inference functions - Fixed compute_shape_native_batch_norm when mean and var are uninitialized Previously, the number of shapes returned would be <3 if either mean or val was didn't exist. Instead, we now initialize them with a vector matching the number of channels. - Implemented compute_shape_mul - Fixed bug in reshape shape inference error message * Get MLIR backend more consistent with TS backend - Remove LazyNativeFunctions::_unsafe_view from autogen - Blacklist ops to make JIT graph more like output of TS backend - Print graph when SSA value has mismatch of types and results - Remove normalize_index from LazyShapeInference - Fix seeds for LTC example models * Update and clean up shape inference functions - Prune shape inference functions - Add shape inference function for GenerateSlice - Add shape inference function for GenerateCopy Co-authored-by: Henry Tu <henry.tu@cerebras.net>
2022-06-08 02:38:50 +08:00
torch::lazy::TorchMlirOpVector TorchMlirTensorList::Lower(
TorchMlirFunction function, TorchMlirLoweringContext* loctx) const {
std::vector<torch::jit::Value*> tensor_list;
CHECK(!operands().empty());
for (const torch::lazy::Output& operand : operands()) {
tensor_list.emplace_back(loctx->GetOutputOp(operand));
}
auto graph = function->graph();
auto listnode =
graph->insertNode(graph->createList(c10::TensorType::get(), tensor_list));
return {listnode->output()};
}
///////////////////////////////////////////////////////////////////////////////
// TorchMlirOptionalTensorList
///////////////////////////////////////////////////////////////////////////////
OpKind TorchMlirOptionalTensorList::ClassOpKind() {
// Note: this OpKind is separate from ltc_ops.h since it would be a circular
// import otherwise
static const OpKind tensor_list_opkind =
OpKind::Get("lazy_tensors::optional_tensor_list");
return tensor_list_opkind;
}
TorchMlirOptionalTensorList::TorchMlirOptionalTensorList(OpList values)
: TorchMlirNode(
/*op=*/TorchMlirOptionalTensorList::ClassOpKind(),
/*operands=*/values,
/*shapes=*/std::vector<Shape>(),
/*num_outputs=*/1,
/*hash_seed=*/kHashSeed) {}
torch::lazy::TorchMlirOpVector TorchMlirOptionalTensorList::Lower(
TorchMlirFunction function, TorchMlirLoweringContext* loctx) const {
std::vector<torch::jit::Value*> tensor_list;
CHECK(!operands().empty());
for (const torch::lazy::Output& operand : operands()) {
tensor_list.emplace_back(loctx->GetOutputOp(operand));
}
auto graph = function->graph();
auto listnode =
graph->insertNode(graph->createList(c10::OptionalType::create(c10::TensorType::get()), tensor_list));
return {listnode->output()};
}
2022-03-24 22:15:43 +08:00
} // namespace lazy
} // namespace torch