mirror of https://github.com/llvm/torch-mlir
Misc fixes for MacOS. (#255)
* Change aligned_alloc -> malloc. It can fail (and does on MacOS) and is a bit over-aggressive optimization for a reference backend. * Fixed a fragile test that prints -0.0 on MacOS. * Fail the test (not the framework) on failure to trace (Torch on MacOS is missing features). * Fix .so -> .dylib for compiler runtime.pull/256/head
parent
2dbab50444
commit
ec611c1e6f
|
@ -227,9 +227,9 @@ def run_tests(tests: List[Test], config: TestConfig) -> List[TestResult]:
|
|||
"""Invoke the given `Test`'s with the provided `TestConfig`."""
|
||||
results = []
|
||||
for test in tests:
|
||||
golden_trace = _generate_golden_trace(test)
|
||||
# TODO: Precompile everything in parallel.
|
||||
try:
|
||||
golden_trace = _generate_golden_trace(test)
|
||||
compiled = config.compile(test.program_factory())
|
||||
except Exception as e:
|
||||
results.append(
|
||||
|
|
|
@ -488,7 +488,8 @@ RtValue refbackrt::createRtValueFromOutputArgInfo(const OutputArgInfo &info) {
|
|||
switch (info.elementType) {
|
||||
case ElementType::F32: {
|
||||
auto byteSize = numel * sizeof(float);
|
||||
data = static_cast<void *>(aligned_alloc(32, byteSize));
|
||||
data = static_cast<void *>(malloc(byteSize));
|
||||
assert(data && "could not allocate tensor");
|
||||
memset(data, 0, byteSize);
|
||||
return RtValue(Tensor::create(shape, ElementType::F32, data));
|
||||
break;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
||||
|
||||
import os
|
||||
import platform
|
||||
|
||||
_refjit = None
|
||||
|
||||
|
@ -40,7 +41,11 @@ def is_enabled() -> bool:
|
|||
def get_runtime_libs():
|
||||
# The _refjit_resources directory is at the npcomp.compiler level.
|
||||
resources_dir = os.path.join(os.path.dirname(__file__))
|
||||
return [os.path.join(resources_dir, "libNPCOMPCompilerRuntimeShlib.so")]
|
||||
suffix = ".so"
|
||||
if platform.system() == "Darwin":
|
||||
suffix = ".dylib"
|
||||
shlib_name = f"libNPCOMPCompilerRuntimeShlib{suffix}"
|
||||
return [os.path.join(resources_dir, shlib_name)]
|
||||
|
||||
|
||||
class JitModuleInvoker:
|
||||
|
|
|
@ -65,7 +65,7 @@ class _LiterateEnum(Enum):
|
|||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: Cannot parse SampleEnum 1.0
|
||||
|
||||
|
||||
"""
|
||||
|
||||
@classmethod
|
||||
|
@ -111,11 +111,11 @@ class TypeClass(_LiterateEnum):
|
|||
|
||||
class ValueType:
|
||||
"""The type a value can take in the npcomp language.
|
||||
|
||||
|
||||
Types of values in npcomp are always being refined and are therefore
|
||||
mutable. Instances represent the type derived for a single value, not a
|
||||
mutable. Instances represent the type derived for a single value, not a
|
||||
concept of "typeness" generally.
|
||||
|
||||
|
||||
>>> ValueType()
|
||||
Any
|
||||
>>> ValueType('NdArray')
|
||||
|
@ -166,7 +166,7 @@ class ValueType:
|
|||
|
||||
class ValueTypeList:
|
||||
"""Models a list of ValueTypes.
|
||||
|
||||
|
||||
>>> v3 = ValueTypeList(3)
|
||||
>>> v3
|
||||
(Any, Any, Any)
|
||||
|
@ -178,7 +178,7 @@ class ValueTypeList:
|
|||
>>> v3[2] += Rank(2)
|
||||
>>> v3
|
||||
(Any, Any, NdArray[Rank(2)])
|
||||
|
||||
|
||||
With names:
|
||||
>>> v3 = ValueTypeList(3, [None, "b", None])
|
||||
>>> v3[1] = 'NdArray'
|
||||
|
@ -221,11 +221,11 @@ class ValueTypeList:
|
|||
|
||||
class Signature:
|
||||
"""A function signature.
|
||||
|
||||
|
||||
This currently only models a linear list of positional arguments and
|
||||
assumes that multiple results will be represented by some form of tuple
|
||||
type.
|
||||
|
||||
|
||||
>>> Signature()
|
||||
() -> Any
|
||||
>>> Signature(2)
|
||||
|
@ -279,7 +279,7 @@ class Signature:
|
|||
|
||||
class ArrayParams:
|
||||
"""Represents parameters defining how to construct an array.
|
||||
|
||||
|
||||
>>> ArrayParams()
|
||||
ArrayParams(dtype=Unspec)
|
||||
>>> ArrayParams(np.float32)
|
||||
|
@ -309,26 +309,26 @@ class ArrayParams:
|
|||
@classmethod
|
||||
def from_constraints(cls, constraints):
|
||||
"""Constructs params for a TypeConstraints list.
|
||||
|
||||
|
||||
Unconstrained:
|
||||
>>> ArrayParams.from_constraints(TypeConstraints())
|
||||
ArrayParams(dtype=Unspec)
|
||||
|
||||
|
||||
DType constrained:
|
||||
>>> ArrayParams.from_constraints(TypeConstraints(DType(np.float32)))
|
||||
ArrayParams(dtype=float32)
|
||||
|
||||
Rank constrained:
|
||||
Rank constrained:
|
||||
>>> ArrayParams.from_constraints(TypeConstraints(Rank(2)))
|
||||
ArrayParams(dtype=Unspec, shape=(-1, -1))
|
||||
|
||||
|
||||
Shape constrained:
|
||||
>>> ArrayParams.from_constraints(TypeConstraints(Shape(1, 2, 3)))
|
||||
ArrayParams(dtype=Unspec, shape=(1, 2, 3))
|
||||
>>> ArrayParams.from_constraints(TypeConstraints(
|
||||
... Rank(3), Shape(1, 2, 3)))
|
||||
ArrayParams(dtype=Unspec, shape=(1, 2, 3))
|
||||
|
||||
|
||||
Shape constrained with dynamic dim constraint:
|
||||
>>> ArrayParams.from_constraints(TypeConstraints(
|
||||
... Shape(1, 2, 3), DynamicDim(1)))
|
||||
|
@ -336,7 +336,7 @@ class ArrayParams:
|
|||
>>> ArrayParams.from_constraints(TypeConstraints(
|
||||
... Shape(1, 2, 3), DynamicDim((0, 2))))
|
||||
ArrayParams(dtype=Unspec, shape=(-1, 2, -1))
|
||||
|
||||
|
||||
Errors:
|
||||
>>> ArrayParams.from_constraints(TypeConstraints(
|
||||
... Rank(4), Shape(1, 2, 3)))
|
||||
|
@ -346,7 +346,7 @@ class ArrayParams:
|
|||
>>> ArrayParams.from_constraints(TypeConstraints(
|
||||
... Shape(1, 2, 3), DynamicDim((0, 5))))
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
...
|
||||
ValueError: Out of range DimFlag(Dynamic, (0, 5)) for shape [-1, 2, 3]
|
||||
"""
|
||||
# TODO: Should have a 'canonicalize' method on TypeConstraints which
|
||||
|
@ -395,7 +395,7 @@ class ArrayParams:
|
|||
@property
|
||||
def is_concrete(self):
|
||||
"""Returns true if the parameters are sufficient to construct an ndarray.
|
||||
|
||||
|
||||
>>> ArrayParams().is_concrete
|
||||
False
|
||||
>>> ArrayParams(dtype=np.float32).is_concrete
|
||||
|
@ -417,26 +417,26 @@ class ArrayParams:
|
|||
def mlir_tensor_type_asm(self):
|
||||
"""Get a corresponding MLIR tensor type.
|
||||
|
||||
Fully Unspecified:
|
||||
Fully Unspecified:
|
||||
>>> ArrayParams().mlir_tensor_type_asm
|
||||
'tensor<*x!numpy.any_dtype>'
|
||||
|
||||
|
||||
Unranked:
|
||||
>>> ArrayParams(dtype=np.float32).mlir_tensor_type_asm
|
||||
'tensor<*xf32>'
|
||||
|
||||
|
||||
Ranked:
|
||||
>>> ArrayParams(dtype=np.float32, rank=3).mlir_tensor_type_asm
|
||||
'tensor<?x?x?xf32>'
|
||||
>>> ArrayParams(dtype=np.float32, shape=(-1, -1)).mlir_tensor_type_asm
|
||||
'tensor<?x?xf32>'
|
||||
|
||||
|
||||
Scalar:
|
||||
>>> ArrayParams(dtype=np.float32, rank=0).mlir_tensor_type_asm
|
||||
'tensor<f32>'
|
||||
>>> ArrayParams(dtype=np.float32, shape=()).mlir_tensor_type_asm
|
||||
'tensor<f32>'
|
||||
|
||||
|
||||
Shaped:
|
||||
>>> ArrayParams(dtype=np.float32, shape=(2, 3)).mlir_tensor_type_asm
|
||||
'tensor<2x3xf32>'
|
||||
|
@ -460,12 +460,12 @@ class ArrayParams:
|
|||
|
||||
def new_ndarray(self):
|
||||
"""Creates a new ndarray from these params.
|
||||
|
||||
|
||||
>>> ArrayParams().new_ndarray()
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
ValueError: ArrayParams(dtype=Unspec) is not concrete
|
||||
>>> ArrayParams(np.float32, (1, 2)).new_ndarray() * 0.0
|
||||
>>> (ArrayParams(np.float32, (1, 2)).new_ndarray() * 0.0 + 1.0) * 0.0
|
||||
array([[0., 0.]], dtype=float32)
|
||||
"""
|
||||
if not self.is_concrete:
|
||||
|
@ -480,7 +480,7 @@ class TypeConstraint:
|
|||
|
||||
class TypeConstraints(list):
|
||||
"""Collection of type constraints.
|
||||
|
||||
|
||||
>>> TypeConstraints([DynamicDim()])
|
||||
TypeConstraints(DimFlag(Dynamic, Unspec))
|
||||
>>> TypeConstraints([DynamicDim(), Rank(4)])
|
||||
|
@ -554,9 +554,9 @@ class ArrayConstraint(TypeConstraint):
|
|||
|
||||
class DType(ArrayConstraint):
|
||||
"""A constraint on a dtype.
|
||||
|
||||
|
||||
DType constraints are exclusive with only one permitted in a set.
|
||||
|
||||
|
||||
>>> DType(np.float32)
|
||||
DType(float32)
|
||||
>>> DType("foobar")
|
||||
|
@ -597,7 +597,7 @@ class Rank(ArrayConstraint):
|
|||
Traceback (most recent call last):
|
||||
...
|
||||
AssertionError
|
||||
|
||||
|
||||
"""
|
||||
__slots__ = ["_rank"]
|
||||
|
||||
|
@ -619,9 +619,9 @@ class Rank(ArrayConstraint):
|
|||
|
||||
class Shape(ArrayConstraint):
|
||||
"""Establishes a static shape for an array.
|
||||
|
||||
|
||||
All dimensions must be a non-negative integer or Unspec.
|
||||
|
||||
|
||||
>>> Shape(1, 2, 3)
|
||||
Shape(1, 2, 3)
|
||||
>>> Shape(Unspec, 1)
|
||||
|
@ -665,9 +665,9 @@ class DimFlagEnum(_LiterateEnum):
|
|||
|
||||
class DimFlag(ArrayConstraint):
|
||||
"""Generic flag applying to one or more dimensions.
|
||||
|
||||
|
||||
If dims is Unspec, the flag applies to all dims.
|
||||
|
||||
|
||||
>>> DimFlag("Dynamic")
|
||||
DimFlag(Dynamic, Unspec)
|
||||
>>> DimFlag("Dynamic", 1)
|
||||
|
|
Loading…
Reference in New Issue