Resolve CI testing failure for Lazy Tensor Core (#1088)

* Xfail unsupported ops

* Register FuncDialect

* Include dynamic_ir in build

* Code reformat

* Enable LTC tests for macOS and Source Build
pull/1125/head
Henry Tu 2022-07-25 19:40:25 -04:00 committed by Henry Tu
parent 0d16a91656
commit 70395de197
8 changed files with 23 additions and 10 deletions

View File

@ -232,8 +232,6 @@ LTC_XFAIL_SET = {
"HBC_basic", "HBC_basic",
"HardTanhIntModule_basic", "HardTanhIntModule_basic",
"HardTanhModule_basic", "HardTanhModule_basic",
"HardswishModule_basic",
"HardswishRandomModule_basic",
"IndexPut1DFloatAccumulateModule_basic", "IndexPut1DFloatAccumulateModule_basic",
"IndexPut1DFloatNonAccumulateModule_basic", "IndexPut1DFloatNonAccumulateModule_basic",
"IndexPut1DIntAccumulateModule_basic", "IndexPut1DIntAccumulateModule_basic",
@ -268,10 +266,10 @@ LTC_XFAIL_SET = {
"IndexPutImpl3DFloatNonAccumulateModule_basic", "IndexPutImpl3DFloatNonAccumulateModule_basic",
"IndexTensorModule3dInput_basic", "IndexTensorModule3dInput_basic",
"IndexTensorModule_basic", "IndexTensorModule_basic",
"IndexTensorSelectDimModule_basic",
"Matmul_dot", "Matmul_dot",
"Matmul_matvec", "Matmul_matvec",
"Matmul_vecmat", "Matmul_vecmat",
"MobilenetV3Module_basic",
"MulIntModule_basic", "MulIntModule_basic",
"NeFloatIntModule_basic", "NeFloatIntModule_basic",
"NeIntModule_basic", "NeIntModule_basic",
@ -308,6 +306,7 @@ LTC_XFAIL_SET = {
"SubFloatModule_basic", "SubFloatModule_basic",
"SubIntModule_basic", "SubIntModule_basic",
"TableBatchEmbeddingModule_basic", "TableBatchEmbeddingModule_basic",
"TensorsConcatNegativeDimModule_basic",
"TensorToBoolZeroRank_basic", "TensorToBoolZeroRank_basic",
"TensorToBool_basic", "TensorToBool_basic",
"TensorToFloatZeroRank_basic", "TensorToFloatZeroRank_basic",
@ -319,6 +318,14 @@ LTC_XFAIL_SET = {
"UniformStaticModule_basic", "UniformStaticModule_basic",
"UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic", "UnsafeViewCollapseDynamicWithAtenSizeIntModule_basic",
"VarBiasedModule_basic", "VarBiasedModule_basic",
"VarDimAllDimReduceModule_basic",
"VarDimBiasedModule_basic",
"VarDimKeepDimFalseModule_basic",
"VarDimModule_basic",
"VarDimMultiDimModule_basic",
"VarDimNegativeModule_basic",
"VarDimSingleDimModule_basic",
"VarDimUnbiasedModule_basic",
"VarUnbiasedModule_basic", "VarUnbiasedModule_basic",
"ViewCollapseDynamicWithAtenSizeIntModule_basic", "ViewCollapseDynamicWithAtenSizeIntModule_basic",
} }

View File

@ -10,6 +10,7 @@ add_mlir_library(TorchMLIRInitAll
Core Core
LINK_LIBS PUBLIC LINK_LIBS PUBLIC
MLIRFuncDialect
MLIRIR MLIRIR
MLIRSupport MLIRSupport

View File

@ -9,6 +9,7 @@
#include "torch-mlir/InitAll.h" #include "torch-mlir/InitAll.h"
#include "mlir/Dialect/Func/IR/FuncOps.h"
#include "mlir/IR/Dialect.h" #include "mlir/IR/Dialect.h"
#include "torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorDialect.h" #include "torch-mlir-dialects/Dialect/TMTensor/IR/TMTensorDialect.h"
#include "torch-mlir-dialects/Dialect/TMTensor/Transforms/Passes.h" #include "torch-mlir-dialects/Dialect/TMTensor/Transforms/Passes.h"
@ -20,6 +21,7 @@
#include "torch-mlir/RefBackend/Passes.h" #include "torch-mlir/RefBackend/Passes.h"
void mlir::torch::registerAllDialects(mlir::DialectRegistry &registry) { void mlir::torch::registerAllDialects(mlir::DialectRegistry &registry) {
registry.insert<mlir::func::FuncDialect>();
registry.insert<mlir::torch::Torch::TorchDialect>(); registry.insert<mlir::torch::Torch::TorchDialect>();
registry.insert<mlir::torch::TorchConversion::TorchConversionDialect>(); registry.insert<mlir::torch::TorchConversion::TorchConversionDialect>();
registry.insert<mlir::torch::TMTensor::TMTensorDialect>(); registry.insert<mlir::torch::TMTensor::TMTensorDialect>();

View File

@ -61,6 +61,7 @@ add_library(torch_mlir_ltc_backend SHARED
${LTC_GENERATED} ${LTC_GENERATED}
${LTC_BACKEND_DEPENDS} ${LTC_BACKEND_DEPENDS}
backend_impl.cpp backend_impl.cpp
dynamic_ir.cpp
mlir_node.cpp mlir_node.cpp
ops/device_data.cpp ops/device_data.cpp
ops/generic.cpp ops/generic.cpp

View File

@ -10,7 +10,7 @@
// https://github.com/pytorch/pytorch/blob/master/torch/csrc/lazy/ts_backend/dynamic_ir.cpp // https://github.com/pytorch/pytorch/blob/master/torch/csrc/lazy/ts_backend/dynamic_ir.cpp
//===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===//
#include <torch/csrc/lazy/ts_backend/dynamic_ir.h> #include "dynamic_ir.h"
namespace torch { namespace torch {
namespace lazy { namespace lazy {

View File

@ -52,11 +52,11 @@ TorchMlirLoweringContext::TorchMlirLoweringContext(
function_( function_(
std::make_shared<torch::jit::GraphFunction>(name, graph_, nullptr)), std::make_shared<torch::jit::GraphFunction>(name, graph_, nullptr)),
mlir_context_(mlirContextCreate()) { mlir_context_(mlirContextCreate()) {
RegisterMlirDialects();
for (auto node : post_order) { for (auto node : post_order) {
Lower(node); Lower(node);
} }
RegisterMlirDialects();
} }
void TorchMlirLoweringContext::Lower(const Node* node) { void TorchMlirLoweringContext::Lower(const Node* node) {

View File

@ -137,7 +137,7 @@ public:
const std::string debug_string() const; const std::string debug_string() const;
const std::string to_string() const; const std::string to_string() const override;
private: private:
std::vector<std::string> parameter_names_; std::vector<std::string> parameter_names_;

View File

@ -32,7 +32,9 @@ struct ReferenceLazyBackendDeviceType : public BackendDeviceType {
ReferenceLazyBackendDeviceType(int8_t device_type) ReferenceLazyBackendDeviceType(int8_t device_type)
: device_type_(static_cast<c10::DeviceType>(device_type)) {} : device_type_(static_cast<c10::DeviceType>(device_type)) {}
std::string toString() const override { return c10::DeviceTypeName(device_type_); } std::string toString() const override {
return c10::DeviceTypeName(device_type_);
}
c10::DeviceType device_type_; c10::DeviceType device_type_;
}; };
@ -127,11 +129,11 @@ public:
/** /**
* Device Configuration * Device Configuration
* */ * */
std::shared_ptr<torch::lazy::BackendDeviceType> GetDefaultDeviceType() const { std::shared_ptr<torch::lazy::BackendDeviceType>
GetDefaultDeviceType() const override {
return std::make_shared<BackendDeviceType>(default_device_type_); return std::make_shared<BackendDeviceType>(default_device_type_);
} }
void SetDefaultDeviceType(int8_t device_type) override { void SetDefaultDeviceType(int8_t device_type) override {
default_device_type_ = ReferenceLazyBackendDeviceType(device_type); default_device_type_ = ReferenceLazyBackendDeviceType(device_type);
} }