2022-03-11 01:54:13 +08:00
|
|
|
//===------------------------------------------------------------*- C++ -*-===//
|
|
|
|
//
|
|
|
|
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
|
|
// See https://llvm.org/LICENSE.txt for license information.
|
|
|
|
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
|
|
|
// Also available under a BSD-style license. See LICENSE.
|
|
|
|
//
|
|
|
|
//===----------------------------------------------------------------------===//
|
|
|
|
|
|
|
|
#include "mlir/Transforms/DialectConversion.h"
|
|
|
|
|
|
|
|
namespace mlir {
|
|
|
|
namespace torch {
|
|
|
|
namespace torch_to_linalg {
|
|
|
|
|
torch,linalg: add support for translating aten.linalg.vector_norm (#839)
This patch adds support for the torch.linalg.vector_norm op to the torch
dialect, including the necessary shape function. It also extends the
conversion of reduction operators to support lowering of
AtenLinalgVectorNormOp, in addition to adding a handful of end-to-end
tests to validate the lowering.
There exist several opportunities to make this lowering optimal and
robust. For instance, in its current form, the translation does not
support ord = 0, +inf, or -inf. For L1 norms, we don't need to raise
each element to the power 1.0. Similarly, L2 norms could benefit from
strength reduction. Since the canonicalization pass is not able to
apply these optimizations, we should consider applying them during the
linalg lowering itself.
2022-05-20 06:48:15 +08:00
|
|
|
struct ReductionOpInfo {
|
|
|
|
bool keepDim;
|
|
|
|
Value tensorOperand;
|
|
|
|
DenseSet<int64_t> dimSet;
|
|
|
|
};
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
// Helper function to get the padding tensor given the padding int values.
|
|
|
|
Value getPaddedTensor(Operation *op, OpBuilder &b, Value &input,
|
|
|
|
SmallVectorImpl<int64_t> &lowPaddingInts,
|
|
|
|
SmallVectorImpl<int64_t> &highPaddingInts, Value pad);
|
|
|
|
|
|
|
|
// Helper function to get the padding tensor given the padding int values.
|
|
|
|
// It's assumed that the padding on the low end and high end are the same,
|
|
|
|
// and that zero padding is required.
|
2022-04-01 16:23:29 +08:00
|
|
|
Value getZeroPaddedTensor(Operation *op, OpBuilder &b, Value &input,
|
|
|
|
SmallVectorImpl<int64_t> &paddingInts);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
2022-11-04 15:57:29 +08:00
|
|
|
// Helper function that adds dynamic padding to a tensor, ignoring unpaddedDims
|
|
|
|
// dimensions at the beginning. The high and low padding are the same, and the
|
|
|
|
// padding value is zero.
|
|
|
|
Value getDynamicZeroPaddedTensor(Operation *op, OpBuilder &b, Value &input,
|
|
|
|
SmallVectorImpl<Value> &padding,
|
|
|
|
int unpaddedDims = 0);
|
|
|
|
|
2022-03-11 01:54:13 +08:00
|
|
|
// Helper function to caculate the output tensor dims for convolution-like ops.
|
|
|
|
// Along each dim:
|
|
|
|
// dim_out =
|
|
|
|
// floor((dim_in + 2 * padding - dilation * (kernelSize - 1) - 1) / stride) + 1
|
|
|
|
Value getOutputDimForConvOps(OpBuilder &b, Location loc, Value in,
|
|
|
|
Value paddingInt, Value dilationInt,
|
2022-05-03 21:22:42 +08:00
|
|
|
Value kernelSizeInt, Value strideInt,
|
|
|
|
bool ceilMode = false);
|
2022-03-11 01:54:13 +08:00
|
|
|
|
2022-08-25 00:19:35 +08:00
|
|
|
// As above but for transposed convolution ops
|
|
|
|
// Along each dim:
|
|
|
|
// dim_out =
|
|
|
|
// (dim_in - 1) * stride - 2 * padding + dilation * (kernelSize - 1) + 1
|
|
|
|
Value getOutputDimForConvTransposeOps(OpBuilder &b, Location loc, Value in,
|
|
|
|
Value paddingInt, Value dilationInt,
|
|
|
|
Value kernelSizeInt, Value strideInt);
|
|
|
|
|
torch,linalg: add support for translating aten.linalg.vector_norm (#839)
This patch adds support for the torch.linalg.vector_norm op to the torch
dialect, including the necessary shape function. It also extends the
conversion of reduction operators to support lowering of
AtenLinalgVectorNormOp, in addition to adding a handful of end-to-end
tests to validate the lowering.
There exist several opportunities to make this lowering optimal and
robust. For instance, in its current form, the translation does not
support ord = 0, +inf, or -inf. For L1 norms, we don't need to raise
each element to the power 1.0. Similarly, L2 norms could benefit from
strength reduction. Since the canonicalization pass is not able to
apply these optimizations, we should consider applying them during the
linalg lowering itself.
2022-05-20 06:48:15 +08:00
|
|
|
// Create a reduction of `opInfo.tensorOperand`, reducing along the dimensions
|
|
|
|
// in `opInfo.dimSet`. If `opInfo.keepDim` is true, the output tensor is the
|
|
|
|
// same rank as the `opInfo.tensorOperand` and reduced dimensions are set to
|
|
|
|
// size 1. `initElem` is the element used to initialize the output tensor where
|
|
|
|
// the reduction will be stored.
|
2022-03-11 01:54:13 +08:00
|
|
|
Value createReductionLinalgGeneric(
|
torch,linalg: add support for translating aten.linalg.vector_norm (#839)
This patch adds support for the torch.linalg.vector_norm op to the torch
dialect, including the necessary shape function. It also extends the
conversion of reduction operators to support lowering of
AtenLinalgVectorNormOp, in addition to adding a handful of end-to-end
tests to validate the lowering.
There exist several opportunities to make this lowering optimal and
robust. For instance, in its current form, the translation does not
support ord = 0, +inf, or -inf. For L1 norms, we don't need to raise
each element to the power 1.0. Similarly, L2 norms could benefit from
strength reduction. Since the canonicalization pass is not able to
apply these optimizations, we should consider applying them during the
linalg lowering itself.
2022-05-20 06:48:15 +08:00
|
|
|
OpBuilder &b, Location loc, const ReductionOpInfo &opInfo, Value initElem,
|
2022-03-11 01:54:13 +08:00
|
|
|
function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuild);
|
|
|
|
|
torch,linalg: add support for translating aten.linalg.vector_norm (#839)
This patch adds support for the torch.linalg.vector_norm op to the torch
dialect, including the necessary shape function. It also extends the
conversion of reduction operators to support lowering of
AtenLinalgVectorNormOp, in addition to adding a handful of end-to-end
tests to validate the lowering.
There exist several opportunities to make this lowering optimal and
robust. For instance, in its current form, the translation does not
support ord = 0, +inf, or -inf. For L1 norms, we don't need to raise
each element to the power 1.0. Similarly, L2 norms could benefit from
strength reduction. Since the canonicalization pass is not able to
apply these optimizations, we should consider applying them during the
linalg lowering itself.
2022-05-20 06:48:15 +08:00
|
|
|
// Create a pointwise operation that uses values in `tensorOperands`, such that
|
|
|
|
// the element type of the resulting tensor is `resultElementType`.
|
|
|
|
Value createElementwiseLinalgGeneric(
|
|
|
|
OpBuilder &b, Location loc, ValueRange tensorOperands,
|
|
|
|
Type resultElementType,
|
|
|
|
function_ref<void(OpBuilder &, Location, ValueRange)> bodyBuild);
|
2022-06-16 23:45:10 +08:00
|
|
|
|
|
|
|
// Broadcasts input tensor based on the broadcastToShape.
|
|
|
|
LogicalResult broadcastToGivenShape(Operation *op, PatternRewriter &rewriter,
|
|
|
|
Value input,
|
|
|
|
SmallVector<Value> broadcastToShape,
|
|
|
|
Value &result);
|
|
|
|
|
2022-08-25 00:19:35 +08:00
|
|
|
// Cast a tensor to a rank-equivalent tensor of unknown size, i.e. <1x2xf32> ->
|
|
|
|
// <?x?xf32>
|
|
|
|
Value removeSizeInformation(OpBuilder &b, Location loc, Value tensor);
|
2022-03-11 01:54:13 +08:00
|
|
|
} // namespace torch_to_linalg
|
|
|
|
} // namespace torch
|
|
|
|
} // namespace mlir
|