2020-04-27 08:20:58 +08:00
|
|
|
//===- NumpyOps.cpp - Core numpy dialect ops --------------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
2020-06-10 10:22:24 +08:00
|
|
|
#include "npcomp/Dialect/Numpy/IR/NumpyOps.h"
|
2020-04-29 11:32:49 +08:00
|
|
|
#include "mlir/IR/Builders.h"
|
|
|
|
#include "mlir/IR/FunctionImplementation.h"
|
2020-04-27 08:20:58 +08:00
|
|
|
#include "mlir/IR/OpImplementation.h"
|
2020-07-06 09:09:43 +08:00
|
|
|
#include "mlir/IR/PatternMatch.h"
|
2021-04-02 08:36:18 +08:00
|
|
|
#include "mlir/IR/TypeUtilities.h"
|
2020-06-10 10:22:24 +08:00
|
|
|
#include "npcomp/Dialect/Basicpy/IR/BasicpyDialect.h"
|
|
|
|
#include "npcomp/Dialect/Numpy/IR/NumpyDialect.h"
|
2020-04-27 08:20:58 +08:00
|
|
|
|
2020-07-04 09:16:34 +08:00
|
|
|
using namespace mlir;
|
|
|
|
using namespace mlir::NPCOMP;
|
|
|
|
using namespace mlir::NPCOMP::Numpy;
|
|
|
|
|
|
|
|
//----------------------------------------------------------------------------//
|
|
|
|
// Type inference
|
|
|
|
//----------------------------------------------------------------------------//
|
|
|
|
|
|
|
|
/// Adds constraints to relating a unary op that accepts and returns either
|
|
|
|
/// tensor or ndarray types where the dtype should be the same.
|
|
|
|
/// Type constraints are added on the dtype, not the outer object type.
|
|
|
|
static void constrainUnaryDtypeInvariantOp(Typing::CPA::Context &context,
|
|
|
|
Value source, Value dest,
|
|
|
|
Operation *op) {
|
|
|
|
auto &env = context.getCurrentEnvironment();
|
|
|
|
auto *sourceTn =
|
|
|
|
llvm::dyn_cast<Typing::CPA::ObjectValueType>(env.mapValueToType(source));
|
|
|
|
auto *destTn =
|
|
|
|
llvm::dyn_cast<Typing::CPA::ObjectValueType>(env.mapValueToType(dest));
|
|
|
|
if (sourceTn && destTn && sourceTn->getFieldCount() == 1 &&
|
|
|
|
destTn->getFieldCount() == 1) {
|
2020-07-05 08:47:19 +08:00
|
|
|
context.getConstraint(sourceTn->getFieldTypes().front(),
|
|
|
|
destTn->getFieldTypes().front());
|
2020-07-04 09:16:34 +08:00
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
void CreateArrayFromTensorOp::addCPAConstraints(Typing::CPA::Context &context) {
|
|
|
|
constrainUnaryDtypeInvariantOp(context, source(), dest(), *this);
|
|
|
|
}
|
|
|
|
|
|
|
|
void CopyToTensorOp::addCPAConstraints(Typing::CPA::Context &context) {
|
|
|
|
constrainUnaryDtypeInvariantOp(context, source(), dest(), *this);
|
|
|
|
}
|
|
|
|
|
|
|
|
void BuiltinUfuncCallOp::addCPAConstraints(Typing::CPA::Context &context) {
|
|
|
|
// TODO: This should really be a function call chosen so as to promote
|
|
|
|
// arguments. For now, though, we just say that the result is constrained
|
|
|
|
// to the inputs. Note that not all ufuncs transfer types like this.
|
|
|
|
// We just pretend this is two unary functions that write into the output.
|
|
|
|
for (auto input : inputs()) {
|
|
|
|
constrainUnaryDtypeInvariantOp(context, input, output(), *this);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2021-04-02 08:36:18 +08:00
|
|
|
//----------------------------------------------------------------------------//
|
|
|
|
// StaticInfoCast
|
|
|
|
//----------------------------------------------------------------------------//
|
|
|
|
|
|
|
|
bool StaticInfoCastOp::areCastCompatible(mlir::TypeRange inputs,
|
|
|
|
mlir::TypeRange outputs) {
|
|
|
|
auto input = inputs[0].cast<NdArrayType>();
|
|
|
|
auto output = outputs[0].cast<NdArrayType>();
|
|
|
|
if (input.getOptionalShape() && output.getOptionalShape()) {
|
|
|
|
if (failed(verifyCompatibleShape(*input.getOptionalShape(),
|
|
|
|
*output.getOptionalShape())))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return input.getDtype() == output.getDtype() ||
|
|
|
|
input.getDtype().isa<AnyDtypeType>() ||
|
|
|
|
output.getDtype().isa<AnyDtypeType>();
|
|
|
|
}
|
|
|
|
|
2021-04-03 03:02:43 +08:00
|
|
|
void StaticInfoCastOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
|
|
|
|
MLIRContext *context) {
|
|
|
|
// static_info_cast(oneUse@create_array_from_tensor(%tensor))
|
|
|
|
// -->
|
|
|
|
// create_array_from_tensor(tensor_static_info_cast(%tensor))
|
|
|
|
//
|
|
|
|
// This pattern tends to create more tensor code and less array code.
|
|
|
|
// This form is considered more canonical because it has same number of ops
|
|
|
|
// but is more analyzable.
|
|
|
|
//
|
|
|
|
// TODO: Consider a world where we numpy.ndarray can track an "immutable" bit
|
|
|
|
// which makes it tensor-like. Is that useful?
|
|
|
|
patterns.add(+[](StaticInfoCastOp op, PatternRewriter &rewriter) {
|
|
|
|
auto createArray = op.getOperand().getDefiningOp<CreateArrayFromTensorOp>();
|
|
|
|
if (!createArray || !createArray.getResult().hasOneUse())
|
|
|
|
return failure();
|
|
|
|
auto tensorCast = rewriter.create<TensorStaticInfoCastOp>(
|
|
|
|
op.getLoc(), op.getType().cast<NdArrayType>().toTensorType(),
|
|
|
|
createArray.getOperand());
|
|
|
|
rewriter.replaceOpWithNewOp<CreateArrayFromTensorOp>(op, op.getType(),
|
|
|
|
tensorCast);
|
|
|
|
rewriter.eraseOp(createArray);
|
|
|
|
return success();
|
|
|
|
});
|
|
|
|
}
|
|
|
|
|
|
|
|
//----------------------------------------------------------------------------//
|
|
|
|
// TensorStaticInfoCast
|
|
|
|
//----------------------------------------------------------------------------//
|
|
|
|
|
|
|
|
bool TensorStaticInfoCastOp::areCastCompatible(mlir::TypeRange inputs,
|
|
|
|
mlir::TypeRange outputs) {
|
|
|
|
auto input = inputs[0].cast<TensorType>();
|
|
|
|
auto output = outputs[0].cast<TensorType>();
|
|
|
|
if (input.hasRank() && output.hasRank()) {
|
|
|
|
if (failed(verifyCompatibleShape(input.getShape(), output.getShape())))
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
return input.getElementType() == output.getElementType() ||
|
|
|
|
input.getElementType().isa<AnyDtypeType>() ||
|
|
|
|
output.getElementType().isa<AnyDtypeType>();
|
|
|
|
}
|
|
|
|
|
2020-07-06 09:09:43 +08:00
|
|
|
//----------------------------------------------------------------------------//
|
|
|
|
// CreateArrayFromTensorOp
|
|
|
|
//----------------------------------------------------------------------------//
|
|
|
|
|
|
|
|
namespace {
|
|
|
|
/// Match create_array_from_tensor -> copy_to_tensor and elide in favor
|
|
|
|
/// of the original tensor.
|
|
|
|
class ElideCreateRedundantArrayFromTensor
|
|
|
|
: public OpRewritePattern<CopyToTensorOp> {
|
|
|
|
public:
|
|
|
|
using OpRewritePattern::OpRewritePattern;
|
|
|
|
LogicalResult matchAndRewrite(CopyToTensorOp op,
|
2021-01-22 03:48:41 +08:00
|
|
|
PatternRewriter &rewriter) const override {
|
2020-07-06 09:09:43 +08:00
|
|
|
auto createArrayOp =
|
|
|
|
dyn_cast_or_null<CreateArrayFromTensorOp>(op.source().getDefiningOp());
|
|
|
|
if (createArrayOp && createArrayOp.dest().hasOneUse()) {
|
|
|
|
rewriter.replaceOp(op, createArrayOp.source());
|
|
|
|
}
|
|
|
|
return success();
|
|
|
|
}
|
|
|
|
};
|
|
|
|
} // namespace
|
|
|
|
|
2021-03-24 05:16:23 +08:00
|
|
|
void CopyToTensorOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
|
|
|
|
MLIRContext *context) {
|
|
|
|
patterns.add<ElideCreateRedundantArrayFromTensor>(context);
|
2020-07-06 09:09:43 +08:00
|
|
|
}
|
|
|
|
|
2020-04-27 08:55:15 +08:00
|
|
|
#define GET_OP_CLASSES
|
2020-06-10 10:22:24 +08:00
|
|
|
#include "npcomp/Dialect/Numpy/IR/NumpyOps.cpp.inc"
|