Refactor old tracing tests and remove deprecated ops.

* Old doctests to run under lit.
* Old custom filecheck tests -> pytest directory (under lit).
* Rename some old ufunc ops in the tracer.
pull/1/head
Stella Laurenzo 2020-06-29 16:19:03 -07:00
parent 7ca292ade5
commit 046751254f
12 changed files with 59 additions and 391 deletions

View File

@ -55,7 +55,7 @@ The project is roughly split into the following areas of code:
* C++ [include](include) and [lib](lib) trees, following LLVM/MLIR conventions * C++ [include](include) and [lib](lib) trees, following LLVM/MLIR conventions
* LIT testing trees: * LIT testing trees:
* [test](test): Lit/FileCheck tests covering core MLIR based infra * [test](test): Lit/FileCheck tests covering core MLIR based infra
* [pytest/Compiler](pytest/Compiler): Lit test suite that drive the compiler * [pytest/Compiler](pytest/Compiler): Lit test suite that drive the compiler
infra from Python infra from Python
* [backend_test](backend_test): Lit test suites conditionally enabled for * [backend_test](backend_test): Lit test suites conditionally enabled for
each backend each backend
@ -82,8 +82,6 @@ LLVM_COMMIT="$(cat ./built_tools/llvm.version)"
cd build cd build
ninja ninja
ninja check-npcomp ninja check-npcomp
# Note: currently, some python tests run separately
./python/run_tests.py
# Setup PYTHONPATH for interactive use # Setup PYTHONPATH for interactive use
export PYTHONPATH="$(realpath build/python):$(realpath build/python_native):$(realpath build/iree/bindings/python)" export PYTHONPATH="$(realpath build/python):$(realpath build/python_native):$(realpath build/iree/bindings/python)"
@ -95,12 +93,9 @@ The cmake configuration populates symlinks in the `build/python` directory
mirroring the source layout. This allows edit-run without rebuilding (unless mirroring the source layout. This allows edit-run without rebuilding (unless
if files are added/removed). if files are added/removed).
Configuring the `PYTHONPATH` as above should be sufficient to run any Configuring the `PYTHONPATH` as above should be sufficient to run any
interactive tooling (`python3`, Jupyter/Colab, etc). interactive tooling (`python3`, Jupyter/Colab, etc).
The `run_tests.py` script is special in that it sets up the PYTHONPATH
correctly when run.
Note that running the `cmake_configure.sh` script will also output a `.env` Note that running the `cmake_configure.sh` script will also output a `.env`
file in the workspace folder with the correct PYTHONPATH set. This allows file in the workspace folder with the correct PYTHONPATH set. This allows
tools like VSCode to work by default for debugging. tools like VSCode to work by default for debugging.

View File

@ -102,67 +102,6 @@ def Numpy_BuiltinUfuncCallOp : Numpy_Op<"builtin_ufunc_call"> {
}]; }];
} }
def Numpy_BuiltinUfuncOp : Numpy_Op<"builtin_ufunc", [Symbol]> {
let summary = "References a built-in universal function";
let description = [{
This module-level op binds by name to a fully-qualified numpy built-in
ufunc (i.e. "numpy.add") and carries metadata associated with it.
Deprecated: Remove once using new builtin_ufunc_call.
}];
}
def Numpy_GenericUfuncOp : Numpy_Op<"generic_ufunc", [
IsolatedFromAbove,
Symbol]> {
let summary = "Defines a ufunc in terms of overloaded element-wise functions";
let description = [{
Deprecated: Remove once using new builtin_ufunc_call.
}];
let arguments = (ins
TypeArrayAttr:$overload_types);
let regions = (region
VariadicRegion<AnyRegion>:$overloads);
}
def Numpy_UfuncReturnOp : Numpy_Op<"ufunc_return", [
Terminator,
HasParent<"Numpy::GenericUfuncOp">]> {
let summary = "Return a value from a generic_ufunc";
let description = [{
Must terminate the basic block of a generic_ufunc overload.
Deprecated: Remove once using new builtin_ufunc_call.
}];
let arguments = (ins
Variadic<AnyType>:$operands
);
let assemblyFormat = "attr-dict ($operands^ `:` type($operands))?";
}
def Numpy_UfuncCallOp : Numpy_Op<"ufunc_call", []> {
let summary = "Default operation on a func";
let description = [{
Invokes a ufunc with the given arguments. This variant models the __call__
behavior of a python ufunc except that it does not model the `out`
parameter, which indicates an in-place update.
Deprecated: Remove once using new builtin_ufunc_call.
}];
let arguments = (ins
FlatSymbolRefAttr:$ufunc_ref,
Variadic<Numpy_AnyArray>:$operands
);
let results = (outs
Numpy_AnyArray:$result
);
let assemblyFormat = [{
$ufunc_ref `(` operands `)` attr-dict `:` functional-type(operands, results)
}];
}
//----------------------------------------------------------------------------// //----------------------------------------------------------------------------//
// Built-in array functions // Built-in array functions
// //

View File

@ -16,135 +16,6 @@
namespace mlir { namespace mlir {
namespace NPCOMP { namespace NPCOMP {
namespace Numpy { namespace Numpy {
//===----------------------------------------------------------------------===//
// BuildinUfuncOp
//===----------------------------------------------------------------------===//
static ParseResult parseBuiltinUfuncOp(OpAsmParser &parser,
OperationState *result) {
// Parse the name as a symbol.
StringAttr nameAttr;
if (parser.parseSymbolName(nameAttr, SymbolTable::getSymbolAttrName(),
result->attributes)) {
return failure();
}
if (failed(parser.parseOptionalAttrDict(result->attributes))) {
return failure();
}
return success();
}
static void printBuiltinUfuncOp(OpAsmPrinter &p, BuiltinUfuncOp op) {
p << op.getOperationName() << " ";
p.printSymbolName(op.getName());
p.printOptionalAttrDict(op.getAttrs(), {SymbolTable::getSymbolAttrName()});
}
//===----------------------------------------------------------------------===//
// GenericUfuncOp
//===----------------------------------------------------------------------===//
static ParseResult parseGenericUfuncOp(OpAsmParser &parser,
OperationState *result) {
Builder b(result->getContext());
// Parse the name as a symbol.
StringAttr nameAttr;
if (parser.parseSymbolName(nameAttr, SymbolTable::getSymbolAttrName(),
result->attributes))
return failure();
// Parse the body of overloads.
if (parser.parseLParen())
return failure();
SmallVector<Attribute, 4> overloadTypes;
for (bool first = true;; first = false) {
if (first) {
if (parser.parseOptionalKeyword("overload"))
break;
}
if (!first) {
if (parser.parseOptionalComma())
break;
if (parser.parseKeyword("overload"))
return failure();
}
SmallVector<OpAsmParser::OperandType, 2> argNames;
SmallVector<Type, 2> argTypes;
SmallVector<Type, 1> resultTypes;
SmallVector<NamedAttrList, 1> unusedAttrs;
bool isVariadic = false;
if (::mlir::impl::parseFunctionSignature(parser, false, argNames, argTypes,
unusedAttrs, isVariadic,
resultTypes, unusedAttrs))
return failure();
overloadTypes.push_back(TypeAttr::get(
FunctionType::get(argTypes, resultTypes, result->getContext())));
auto *region = result->addRegion();
if (parser.parseRegion(*region, argNames, argTypes))
return failure();
}
if (parser.parseRParen())
return failure();
// Parse 'attributes {...}'
if (parser.parseOptionalAttrDictWithKeyword(result->attributes))
return failure();
result->addAttribute(b.getIdentifier("overload_types"),
b.getArrayAttr(overloadTypes));
return success();
}
static void printGenericUfuncOp(OpAsmPrinter &p, GenericUfuncOp op) {
p << op.getOperationName() << " @" << op.getName() << "(";
bool first = true;
for (auto it : llvm::enumerate(op.getRegions())) {
auto *region = it.value();
if (first)
first = false;
else
p << ", ";
if (region->empty()) {
p << "<<OVERLOAD_ERROR>>";
continue;
}
Block &entryBlock = region->front();
p << "overload(";
if (it.index() >= op.overload_types().size()) {
p << "<<MISSING OVERLOAD TYPE>>";
continue;
}
TypeAttr tattr = op.overload_types()[it.index()].cast<TypeAttr>();
FunctionType overloadType = tattr.getValue().dyn_cast<FunctionType>();
if (!overloadType) {
p << "<<ILLEGAL OVERLOAD TYPE>>";
continue;
}
if (overloadType.getNumInputs() != entryBlock.getNumArguments()) {
p << "<<OVERLOAD ARG MISMATCH>>";
continue;
}
for (unsigned i = 0, e = entryBlock.getNumArguments(); i < e; ++i) {
auto arg = entryBlock.getArgument(i);
if (i > 0)
p << ", ";
p.printOperand(arg);
p << ": ";
p.printType(overloadType.getInputs()[i]);
}
p << ")";
p.printArrowTypeList(overloadType.getResults());
p.printRegion(*region, false, true);
}
p << ")";
}
#define GET_OP_CLASSES #define GET_OP_CLASSES
#include "npcomp/Dialect/Numpy/IR/NumpyOps.cpp.inc" #include "npcomp/Dialect/Numpy/IR/NumpyOps.cpp.inc"
} // namespace Numpy } // namespace Numpy

View File

@ -1,12 +1,16 @@
# RUN: %PYTHON %s | FileCheck %s --dump-input=fail
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information. # See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
"""Test for the MLIR IR Python bindings""" """Test for the MLIR IR Python bindings.
TODO: These tests were just for bootstrapping and are not authoritative at this
point.
"""
from _npcomp.mlir import ir from _npcomp.mlir import ir
from npcomp.utils import test_utils
test_utils.start_filecheck_test()
c = ir.MLIRContext() c = ir.MLIRContext()
# CHECK-LABEL: module @parseSuccess # CHECK-LABEL: module @parseSuccess
@ -37,5 +41,3 @@ try:
except ValueError as e: except ValueError as e:
# CHECK: [ERROR]: expected operation name in quotes # CHECK: [ERROR]: expected operation name in quotes
print(e) print(e)
test_utils.end_filecheck_test(__file__)

View File

@ -1,3 +1,5 @@
# RUN: %PYTHON %s | FileCheck %s --dump-input=fail
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information. # See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
@ -5,9 +7,7 @@
from _npcomp.mlir import ir from _npcomp.mlir import ir
from _npcomp.mlir import passes from _npcomp.mlir import passes
from npcomp.utils import test_utils
test_utils.start_filecheck_test()
c = ir.MLIRContext() c = ir.MLIRContext()
pm = passes.PassManager(c) pm = passes.PassManager(c)
@ -38,5 +38,3 @@ print("PASSES:", str(pm))
pm.run(m) pm.run(m)
print(m.to_asm()) print(m.to_asm())
# CHECK-NOT: func @notUsed # CHECK-NOT: func @notUsed
test_utils.end_filecheck_test(__file__)

View File

@ -1,13 +1,15 @@
# RUN: %PYTHON %s | FileCheck %s --dump-input=fail
# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. # Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
# See https://llvm.org/LICENSE.txt for license information. # See https://llvm.org/LICENSE.txt for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception # SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
import os
os.environ["NUMPY_EXPERIMENTAL_ARRAY_FUNCTION"] = "1"
from npcomp.types import * from npcomp.types import *
from npcomp.exporter import * from npcomp.exporter import *
from npcomp.tracing.mlir_trace import * from npcomp.tracing.mlir_trace import *
from npcomp.utils import test_utils
test_utils.start_filecheck_test()
def simple_mul(a: np.ndarray, b: np.ndarray) -> np.ndarray: def simple_mul(a: np.ndarray, b: np.ndarray) -> np.ndarray:
@ -29,12 +31,10 @@ exp.simple_mul.sig.result += DType(np.float32)
mb = ModuleBuilder() mb = ModuleBuilder()
mb.trace(exp.simple_mul) mb.trace(exp.simple_mul)
# CHECK: func @simple_mul(%arg0: tensor<?x4xf32>, %arg1: tensor<1xf32>) -> tensor<?x4xf32> { # CHECK: func @simple_mul(%arg0: tensor<?x4xf32>, %arg1: tensor<1xf32>) -> tensor<?x4xf32> {
# CHECK: %0 = numpy.ufunc_call @numpy.multiply(%arg0, %arg1) : (tensor<?x4xf32>, tensor<1xf32>) -> tensor<*x!numpy.any_dtype> # CHECK: %0 = numpy.builtin_ufunc_call<"numpy.multiply"> (%arg0, %arg1) : (tensor<?x4xf32>, tensor<1xf32>) -> tensor<*x!basicpy.UnknownType>
# CHECK: %1 = numpy.ufunc_call @numpy.add(%0, %arg0) : (tensor<*x!numpy.any_dtype>, tensor<?x4xf32>) -> tensor<*x!numpy.any_dtype> # CHECK: %1 = numpy.builtin_ufunc_call<"numpy.add"> (%0, %arg0) : (tensor<*x!basicpy.UnknownType>, tensor<?x4xf32>) -> tensor<*x!basicpy.UnknownType>
# CHECK: %2 = numpy.ufunc_call @numpy.add(%1, %arg1) : (tensor<*x!numpy.any_dtype>, tensor<1xf32>) -> tensor<*x!numpy.any_dtype> # CHECK: %2 = numpy.builtin_ufunc_call<"numpy.add"> (%1, %arg1) : (tensor<*x!basicpy.UnknownType>, tensor<1xf32>) -> tensor<*x!basicpy.UnknownType>
# CHECK: %3 = numpy.narrow %2 : (tensor<*x!numpy.any_dtype>) -> tensor<?x4xf32> # CHECK: %3 = numpy.narrow %2 : (tensor<*x!basicpy.UnknownType>) -> tensor<?x4xf32>
# CHECK: return %3 : tensor<?x4xf32> # CHECK: return %3 : tensor<?x4xf32>
# CHECK: } # CHECK: }
print(mb.module.to_asm()) print(mb.module.to_asm())
test_utils.end_filecheck_test(__file__)

View File

@ -1,15 +1,37 @@
# RUN: %PYTHON %s # RUN: %PYTHON %s
import os
os.environ["NUMPY_EXPERIMENTAL_ARRAY_FUNCTION"] = "1"
import traceback
def run_doctest(mod): def run_doctest(mod):
print("TESTING:", mod) print("\n\nTESTING:", mod)
print("--------")
import doctest import doctest
import sys import sys
import importlib import importlib
m = importlib.import_module(mod) try:
m = importlib.import_module(mod)
except:
print("ERROR IMPORTING MODULE:", mod)
sys.exit(1)
fc, _ = doctest.testmod(m) fc, _ = doctest.testmod(m)
if fc: if fc:
sys.exit(1) sys.exit(1)
run_doctest("npcomp.compiler.py_value_utils") TEST_MODULES = (
"npcomp.compiler.py_value_utils",
"npcomp.dialect.Basicpy",
"npcomp.dialect.Numpy",
"npcomp.tracing.context",
"npcomp.tracing.emitters",
"npcomp.tracing.mlir_trace",
"npcomp.types",
"npcomp.exporter",
)
for mname in TEST_MODULES:
run_doctest(mname)

View File

@ -17,24 +17,6 @@ class DialectHelper(Basicpy.DialectHelper):
>>> c = ir.MLIRContext() >>> c = ir.MLIRContext()
>>> h = DialectHelper(c, ir.OpBuilder(c)) >>> h = DialectHelper(c, ir.OpBuilder(c))
>>> m = c.new_module()
>>> tensor_type = h.tensor_type(h.f32_type)
>>> h.builder.insert_block_start(m.first_block)
>>> f = h.func_op("foobar", h.function_type(
... [tensor_type, tensor_type], [tensor_type]),
... create_entry_block=True)
>>> uf = h.numpy_ufunc_call_op("numpy.add", tensor_type,
... *f.first_block.args)
>>> _ = h.return_op(uf.results)
>>> print(m.to_asm())
<BLANKLINE>
<BLANKLINE>
module {
func @foobar(%arg0: tensor<*xf32>, %arg1: tensor<*xf32>) -> tensor<*xf32> {
%0 = numpy.ufunc_call @numpy.add(%arg0, %arg1) : (tensor<*xf32>, tensor<*xf32>) -> tensor<*xf32>
return %0 : tensor<*xf32>
}
}
DenseElementsAttrs: DenseElementsAttrs:
>>> c.dense_elements_attr(np.asarray([1, 2, 3, 4])) >>> c.dense_elements_attr(np.asarray([1, 2, 3, 4]))
@ -61,7 +43,7 @@ class DialectHelper(Basicpy.DialectHelper):
tensor<*xf32> tensor<*xf32>
>>> t.function_type([t.i32_type], [t.f32_type]) >>> t.function_type([t.i32_type], [t.f32_type])
(i32) -> f32 (i32) -> f32
>>> t.unknown_tensor_type >>> t.numpy_unknown_tensor_type
tensor<*x!basicpy.UnknownType> tensor<*x!basicpy.UnknownType>
""" """
@ -84,13 +66,6 @@ class DialectHelper(Basicpy.DialectHelper):
attrs = c.dictionary_attr({"qualified_name": c.string_attr(qualified_name)}) attrs = c.dictionary_attr({"qualified_name": c.string_attr(qualified_name)})
return self.op("numpy.builtin_ufunc_call", [result_type], args, attrs) return self.op("numpy.builtin_ufunc_call", [result_type], args, attrs)
def numpy_ufunc_call_op(self, callee_symbol, result_type, *args):
"""Creates a numpy.ufunc_call op."""
c = self.context
attrs = c.dictionary_attr(
{"ufunc_ref": c.flat_symbol_ref_attr(callee_symbol)})
return self.op("numpy.ufunc_call", [result_type], args, attrs)
def numpy_narrow_op(self, result_type, operand): def numpy_narrow_op(self, result_type, operand):
"""Creates a numpy.narrow op.""" """Creates a numpy.narrow op."""
return self.op("numpy.narrow", [result_type], [operand]) return self.op("numpy.narrow", [result_type], [operand])
@ -100,34 +75,6 @@ class DialectHelper(Basicpy.DialectHelper):
[array] + list(slice_elements)) [array] + list(slice_elements))
def load_builtin_module(context=None):
"""Loads a module populated with numpy built-ins.
This is not a long-term solution but overcomes some bootstrapping
issues.
>>> m = load_builtin_module()
>>> op = m.region(0).blocks.front.operations.front
>>> op.is_registered
True
>>> op.name
'numpy.builtin_ufunc'
Args:
context: The MLIRContext to use (None to create a new one).
Returns:
A ModuleOp.
"""
if context is None:
context = ir.MLIRContext()
return context.parse_asm(_BUILTIN_MODULE_ASM)
_BUILTIN_MODULE_ASM = r"""
numpy.builtin_ufunc @numpy.add
numpy.builtin_ufunc @numpy.multiply
"""
if __name__ == "__main__": if __name__ == "__main__":
import doctest import doctest
doctest.testmod() doctest.testmod()

View File

@ -58,9 +58,9 @@ class TraceValueMap(
["input_trace_values", "result_trace_value_types", "extra"], ["input_trace_values", "result_trace_value_types", "extra"],
defaults=(None,))): defaults=(None,))):
"""The result of mapping an invocation to corresponding op structure. """The result of mapping an invocation to corresponding op structure.
This type associates: This type associates:
- Python (object, TraceValueType) representing invocation inputs that - Python (object, TraceValueType) representing invocation inputs that
correspond to SSA values in the IR. correspond to SSA values in the IR.
- TraceValueTypes that are the expected logical result types from the - TraceValueTypes that are the expected logical result types from the
invocation. invocation.
@ -92,7 +92,7 @@ class FuncEmitter:
def map_results(self, py_results, extra): def map_results(self, py_results, extra):
"""Maps a list of python results to actual function return values. """Maps a list of python results to actual function return values.
Args: Args:
py_results: List of python results corresponding to the emitted op py_results: List of python results corresponding to the emitted op
results. results.
@ -105,7 +105,7 @@ class FuncEmitter:
def emit(self, request: EmissionRequest): def emit(self, request: EmissionRequest):
"""Emits IR using the provided ops and types factories. """Emits IR using the provided ops and types factories.
Args: Args:
emission_inputs: An EmissionRequest produced by tracing each TraceValue emission_inputs: An EmissionRequest produced by tracing each TraceValue
from a previous call to map_invocation and the corresponding extra from a previous call to map_invocation and the corresponding extra
@ -120,7 +120,7 @@ class FuncEmitter:
class GenericCallUfuncEmitter(FuncEmitter): class GenericCallUfuncEmitter(FuncEmitter):
"""A FuncEmitter for generic ufuncs requiring no special behavior. """A FuncEmitter for generic ufuncs requiring no special behavior.
Representation: Representation:
>>> emitter = GenericCallUfuncEmitter("numpy.add") >>> emitter = GenericCallUfuncEmitter("numpy.add")
>>> emitter >>> emitter
@ -168,8 +168,9 @@ class GenericCallUfuncEmitter(FuncEmitter):
def emit(self, request: EmissionRequest): def emit(self, request: EmissionRequest):
h = request.dialect_helper h = request.dialect_helper
op_result_type = h.tensor_type(h.numpy_any_dtype) op_result_type = h.tensor_type(h.numpy_any_dtype)
call_op = h.numpy_ufunc_call_op(self._ufunc_name, op_result_type, call_op = h.numpy_builtin_ufunc_call_op(*request.input_ssa_values,
*request.input_ssa_values) qualified_name=self._ufunc_name,
result_type=op_result_type)
return call_op.results return call_op.results
@ -219,7 +220,7 @@ class GenericArrayFuncEmitter(FuncEmitter):
class EmitterRegistry: class EmitterRegistry:
"""Registry of known Emitter instances mapped to source function. """Registry of known Emitter instances mapped to source function.
>>> r = EmitterRegistry.create_default() >>> r = EmitterRegistry.create_default()
>>> r.lookup_ufunc(np.add, "__call__") >>> r.lookup_ufunc(np.add, "__call__")
<ufunc emitter 'numpy.add'> <ufunc emitter 'numpy.add'>

View File

@ -20,9 +20,7 @@ class ModuleBuilder:
def __init__(self, mlir_context=None, emitter_registry=None): def __init__(self, mlir_context=None, emitter_registry=None):
self.context = mlir_context if mlir_context else ir.MLIRContext() self.context = mlir_context if mlir_context else ir.MLIRContext()
# TODO: Instead of bootstrapping a large module, populate imports self.module = self.context.new_module()
# dynamically.
self.module = Numpy.load_builtin_module(self.context)
self.helper = Numpy.DialectHelper(self.context, ir.OpBuilder(self.context)) self.helper = Numpy.DialectHelper(self.context, ir.OpBuilder(self.context))
self.emitters = (emitter_registry self.emitters = (emitter_registry
if emitter_registry else EmitterRegistry.create_default()) if emitter_registry else EmitterRegistry.create_default())

View File

@ -1,61 +0,0 @@
#!/usr/bin/env python3
import os
import subprocess
import sys
TEST_MODULES = (
"npcomp.mlir_ir_test",
"npcomp.mlir_pass_test",
"npcomp.dialect.Basicpy",
"npcomp.dialect.Numpy",
"npcomp.tracing.context",
"npcomp.tracing.mlir_trace",
"npcomp.types",
"npcomp.exporter",
"npcomp.tracing.mlir_trace_test",
)
# Compute PYTHONPATH for sub processes.
DIRSEP = os.path.pathsep
LOCAL_PYTHONPATH_COMPONENTS = [
# This directory.
os.path.abspath(os.path.dirname(__file__)),
# The parallel python_native directory (assuming in the build tree).
os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "python_native"))
]
PYTHONPATH = DIRSEP.join(LOCAL_PYTHONPATH_COMPONENTS)
if "PYTHONPATH" in os.environ:
PYTHONPATH = PYTHONPATH + DIRSEP + os.environ["PYTHONPATH"]
CHILD_ENVIRON = dict(os.environ)
CHILD_ENVIRON["PYTHONPATH"] = PYTHONPATH
# Configure filecheck.
FILECHECK_BINARY = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "..", "..", "bin",
"FileCheck"))
if os.path.exists(FILECHECK_BINARY):
CHILD_ENVIRON["FILECHECK_BINARY"] = FILECHECK_BINARY
else:
print("WARNING! Built FileCheck not found. Leaving to path resolution")
passed = []
failed = []
for test_module in TEST_MODULES:
print("--------====== RUNNING %s ======--------" % test_module)
try:
subprocess.check_call([sys.executable, "-Wignore", "-m", test_module],
env=CHILD_ENVIRON)
print("--------====== DONE %s ======--------\n" % test_module)
passed.append(test_module)
except subprocess.CalledProcessError:
print("!!!!!!!!====== ERROR %s ======!!!!!!!!\n" % test_module)
failed.append(test_module)
print("Done: %d passed, %d failed" % (len(passed), len(failed)))
if failed:
for test_module in failed:
print(" %s: FAILED" % test_module)
sys.exit(1)

View File

@ -1,52 +1,8 @@
// RUN: npcomp-opt -split-input-file %s | npcomp-opt | FileCheck --dump-input=fail %s // RUN: npcomp-opt -split-input-file %s | npcomp-opt | FileCheck --dump-input=fail %s
// CHECK-LABEL: @any_dtype
func @any_dtype(%arg0: tensor<*x!numpy.any_dtype>) -> (tensor<*x!numpy.any_dtype>) {
return %arg0 : tensor<*x!numpy.any_dtype>
}
// ----- // -----
// CHECK-LABEL: @builtin_ufunc // CHECK-LABEL: @builtin_ufunc
module @builtin_ufunc { func @builtin_ufunc(%arg0 : tensor<3xf64>, %arg1 : tensor<3xf64>) -> tensor<3xf64> {
// CHECK: numpy.builtin_ufunc @numpy.add %0 = numpy.builtin_ufunc_call<"numpy.add"> (%arg0, %arg1) : (tensor<3xf64>, tensor<3xf64>) -> tensor<3xf64>
numpy.builtin_ufunc @numpy.add return %0 : tensor<3xf64>
// CHECK: numpy.builtin_ufunc @numpy.custom_sub {some_attr = "foobar"}
numpy.builtin_ufunc @numpy.custom_sub { some_attr = "foobar" }
}
// -----
// CHECK-LABEL: @example_generic_ufunc
module @example_generic_ufunc {
// CHECK: numpy.generic_ufunc @numpy.add(
numpy.generic_ufunc @numpy.add (
// CHECK-SAME: overload(%arg0: i32, %arg1: i32) -> i32 {
overload(%arg0: i32, %arg1: i32) -> i32 {
// CHECK: addi
%0 = addi %arg0, %arg1 : i32
numpy.ufunc_return %0 : i32
},
// CHECK: overload(%arg0: f32, %arg1: f32) -> f32 {
overload(%arg0: f32, %arg1: f32) -> f32 {
// CHECK: addf
%0 = addf %arg0, %arg1 : f32
numpy.ufunc_return %0 : f32
}
)
}
// -----
// CHECK-LABEL: @ufunc_apply_ops
module @ufunc_apply_ops {
numpy.generic_ufunc @numpy.add (
overload(%arg0: i32, %arg1: i32) -> i32 {
%0 = addi %arg0, %arg1 : i32
numpy.ufunc_return %0 : i32
}
)
func @example(%arg0: tensor<*xi32>, %arg1: tensor<*xi32>) -> tensor<*xi32> {
%0 = numpy.ufunc_call @numpy.add(%arg0, %arg1) : (tensor<*xi32>, tensor<*xi32>)
-> tensor<*xi32>
return %0 : tensor<*xi32>
}
} }