mirror of https://github.com/llvm/torch-mlir
Add value coding for ndarray.
* This lets us import arrays from the outer environment, which is the first step to actually handling numpy ops.pull/1/head
parent
f6721c173d
commit
a4f3ce1ed3
|
@ -38,7 +38,8 @@ def Numpy_NarrowOp : Numpy_Op<"narrow", []> {
|
||||||
// NdArray type handling
|
// NdArray type handling
|
||||||
//----------------------------------------------------------------------------//
|
//----------------------------------------------------------------------------//
|
||||||
|
|
||||||
def Numpy_CopyToArray : Numpy_Op<"create_array_from_tensor", [NoSideEffect]> {
|
def Numpy_CreateArrayFromTensorOp : Numpy_Op<"create_array_from_tensor", [
|
||||||
|
NoSideEffect]> {
|
||||||
let summary = "Creates an ndarray from a tensor.";
|
let summary = "Creates an ndarray from a tensor.";
|
||||||
let description = [{
|
let description = [{
|
||||||
Creates a new ndarray that will contain the data of the given tensor.
|
Creates a new ndarray that will contain the data of the given tensor.
|
||||||
|
@ -54,7 +55,7 @@ def Numpy_CopyToArray : Numpy_Op<"create_array_from_tensor", [NoSideEffect]> {
|
||||||
}];
|
}];
|
||||||
}
|
}
|
||||||
|
|
||||||
def Numpy_CopyToTensor : Numpy_Op<"copy_to_tensor", []> {
|
def Numpy_CopyToTensorOp : Numpy_Op<"copy_to_tensor", []> {
|
||||||
let summary = "Copies an ndarray, yielding a value-typed tensor.";
|
let summary = "Copies an ndarray, yielding a value-typed tensor.";
|
||||||
let description = [{
|
let description = [{
|
||||||
The semantics of this operation connote a copy of the data in the source
|
The semantics of this operation connote a copy of the data in the source
|
||||||
|
|
|
@ -11,6 +11,8 @@
|
||||||
|
|
||||||
#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h"
|
#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h"
|
||||||
#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.h"
|
#include "npcomp/Dialect/Basicpy/IR/BasicpyOps.h"
|
||||||
|
#include "npcomp/Dialect/Numpy/IR/NumpyDialect.h"
|
||||||
|
#include "npcomp/Dialect/Numpy/IR/NumpyOps.h"
|
||||||
|
|
||||||
namespace mlir {
|
namespace mlir {
|
||||||
namespace NPCOMP {
|
namespace NPCOMP {
|
||||||
|
@ -102,6 +104,26 @@ public:
|
||||||
auto op = opBuilder.create<Basicpy::SlotObjectGetOp>(
|
auto op = opBuilder.create<Basicpy::SlotObjectGetOp>(
|
||||||
loc, resultType, slotObject, indexAttr);
|
loc, resultType, slotObject, indexAttr);
|
||||||
return op.getOperation();
|
return op.getOperation();
|
||||||
|
})
|
||||||
|
.def("numpy_create_array_from_tensor_op",
|
||||||
|
[](BasicpyDialectHelper &self, PyValue source) -> PyOperationRef {
|
||||||
|
auto sourceType = source.value.getType().dyn_cast<TensorType>();
|
||||||
|
if (!sourceType) {
|
||||||
|
throw py::raiseValueError("expected tensor type for "
|
||||||
|
"numpy_create_array_from_tensor_op");
|
||||||
|
}
|
||||||
|
auto dtype = sourceType.getElementType();
|
||||||
|
auto ndarrayType =
|
||||||
|
Numpy::NdArrayType::get(dtype, self.getContext());
|
||||||
|
OpBuilder &opBuilder = self.pyOpBuilder.getBuilder(true);
|
||||||
|
Location loc = self.pyOpBuilder.getCurrentLoc();
|
||||||
|
auto op = opBuilder.create<Numpy::CreateArrayFromTensorOp>(
|
||||||
|
loc, ndarrayType, source.value);
|
||||||
|
return op.getOperation();
|
||||||
|
})
|
||||||
|
.def("numpy_NdArrayType",
|
||||||
|
[](BasicpyDialectHelper &self, PyType dtype) -> PyType {
|
||||||
|
return Numpy::NdArrayType::get(dtype.type, self.getContext());
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -0,0 +1,18 @@
|
||||||
|
# RUN: %PYTHON %s | npcomp-opt -split-input-file | FileCheck %s --dump-input=fail
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from npcomp.compiler import test_config
|
||||||
|
|
||||||
|
import_global = test_config.create_import_dump_decorator()
|
||||||
|
|
||||||
|
global_data = (np.zeros((2, 3)) + [1.0, 2.0, 3.0] * np.reshape([1.0, 2.0],
|
||||||
|
(2, 1)))
|
||||||
|
|
||||||
|
|
||||||
|
# CHECK-LABEL: func @global_array_to_const
|
||||||
|
@import_global
|
||||||
|
def global_array_to_const():
|
||||||
|
# CHECK: %[[CST:.*]] = constant dense<{{\[\[}}1.000000e+00, 2.000000e+00, 3.000000e+00], [2.000000e+00, 4.000000e+00, 6.000000e+00]]> : tensor<2x3xf64>
|
||||||
|
# CHECK: numpy.create_array_from_tensor %[[CST]] : (tensor<2x3xf64>) -> !numpy.ndarray<f64>
|
||||||
|
local_data = global_data
|
||||||
|
return local_data
|
|
@ -113,6 +113,9 @@ class ValueCoderChain(ValueCoder):
|
||||||
def __init__(self, sub_coders: Sequence[ValueCoder]):
|
def __init__(self, sub_coders: Sequence[ValueCoder]):
|
||||||
self._sub_coders = tuple(sub_coders)
|
self._sub_coders = tuple(sub_coders)
|
||||||
|
|
||||||
|
def __repr__(self):
|
||||||
|
return "ValueCoderChain({})".format(self._sub_coders)
|
||||||
|
|
||||||
def code_py_value_as_const(self, env: "Environment",
|
def code_py_value_as_const(self, env: "Environment",
|
||||||
py_value) -> Union[_NotImplementedType, ir.Value]:
|
py_value) -> Union[_NotImplementedType, ir.Value]:
|
||||||
for sc in self._sub_coders:
|
for sc in self._sub_coders:
|
||||||
|
|
|
@ -11,6 +11,7 @@ from .interfaces import *
|
||||||
from .partial_eval_base import *
|
from .partial_eval_base import *
|
||||||
from .target import *
|
from .target import *
|
||||||
from .value_coder_base import *
|
from .value_coder_base import *
|
||||||
|
from .value_coder_numpy import *
|
||||||
|
|
||||||
|
|
||||||
def create_import_dump_decorator(*,
|
def create_import_dump_decorator(*,
|
||||||
|
@ -30,7 +31,10 @@ def create_import_dump_decorator(*,
|
||||||
|
|
||||||
|
|
||||||
def create_test_config(target_factory: TargetFactory = GenericTarget64):
|
def create_test_config(target_factory: TargetFactory = GenericTarget64):
|
||||||
value_coder = BuiltinsValueCoder()
|
value_coder = ValueCoderChain([
|
||||||
|
BuiltinsValueCoder(),
|
||||||
|
CreateNumpyValueCoder(),
|
||||||
|
])
|
||||||
pe_hook = build_default_partial_eval_hook()
|
pe_hook = build_default_partial_eval_hook()
|
||||||
|
|
||||||
return Configuration(target_factory=target_factory,
|
return Configuration(target_factory=target_factory,
|
||||||
|
|
|
@ -0,0 +1,39 @@
|
||||||
|
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
||||||
|
# See https://llvm.org/LICENSE.txt for license information.
|
||||||
|
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||||
|
"""Value coders for Numpy types."""
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
from _npcomp.mlir import ir
|
||||||
|
|
||||||
|
from . import logging
|
||||||
|
from .interfaces import *
|
||||||
|
|
||||||
|
__all__ = [
|
||||||
|
"CreateNumpyValueCoder",
|
||||||
|
]
|
||||||
|
|
||||||
|
_NotImplementedType = type(NotImplemented)
|
||||||
|
|
||||||
|
|
||||||
|
class NdArrayValueCoder(ValueCoder):
|
||||||
|
"""Value coder for numpy types."""
|
||||||
|
__slots__ = []
|
||||||
|
|
||||||
|
def code_py_value_as_const(self, env: Environment,
|
||||||
|
py_value) -> Union[_NotImplementedType, ir.Value]:
|
||||||
|
# TODO: Query for ndarray compat (for duck typed and such)
|
||||||
|
# TODO: Have a higher level name resolution signal which indicates const
|
||||||
|
ir_h = env.ir_h
|
||||||
|
if isinstance(py_value, np.ndarray):
|
||||||
|
dense_attr = ir_h.context.dense_elements_attr(py_value)
|
||||||
|
tensor_type = dense_attr.type
|
||||||
|
tensor_value = ir_h.constant_op(tensor_type, dense_attr).result
|
||||||
|
return ir_h.numpy_create_array_from_tensor_op(tensor_value).result
|
||||||
|
return NotImplemented
|
||||||
|
|
||||||
|
|
||||||
|
def CreateNumpyValueCoder() -> ValueCoder:
|
||||||
|
return ValueCoderChain((NdArrayValueCoder(),))
|
|
@ -14,14 +14,14 @@ __all__ = [
|
||||||
|
|
||||||
class DialectHelper(Basicpy.DialectHelper):
|
class DialectHelper(Basicpy.DialectHelper):
|
||||||
r"""Dialect helper.
|
r"""Dialect helper.
|
||||||
|
|
||||||
>>> c = ir.MLIRContext()
|
>>> c = ir.MLIRContext()
|
||||||
>>> h = DialectHelper(c, ir.OpBuilder(c))
|
>>> h = DialectHelper(c, ir.OpBuilder(c))
|
||||||
>>> m = c.new_module()
|
>>> m = c.new_module()
|
||||||
>>> tensor_type = h.tensor_type(h.f32_type)
|
>>> tensor_type = h.tensor_type(h.f32_type)
|
||||||
>>> h.builder.insert_block_start(m.first_block)
|
>>> h.builder.insert_block_start(m.first_block)
|
||||||
>>> f = h.func_op("foobar", h.function_type(
|
>>> f = h.func_op("foobar", h.function_type(
|
||||||
... [tensor_type, tensor_type], [tensor_type]),
|
... [tensor_type, tensor_type], [tensor_type]),
|
||||||
... create_entry_block=True)
|
... create_entry_block=True)
|
||||||
>>> uf = h.numpy_ufunc_call_op("numpy.add", tensor_type,
|
>>> uf = h.numpy_ufunc_call_op("numpy.add", tensor_type,
|
||||||
... *f.first_block.args)
|
... *f.first_block.args)
|
||||||
|
|
Loading…
Reference in New Issue