[tosa] Add missing overrride-s to fix compiler warnings (#514)

Signed-off-by: Suraj Sudhir <suraj.sudhir@arm.com>
pull/513/head
Suraj Sudhir 2022-01-07 10:57:54 -08:00 committed by GitHub
parent 732a76f45c
commit d6b6c0268c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 14 additions and 11 deletions

View File

@ -117,8 +117,9 @@ class ConvertAtenAddSubOp : public OpConversionPattern<AtenOpT> {
public: public:
using OpConversionPattern<AtenOpT>::OpConversionPattern; using OpConversionPattern<AtenOpT>::OpConversionPattern;
using OpAdaptor = typename AtenOpT::Adaptor; using OpAdaptor = typename AtenOpT::Adaptor;
LogicalResult matchAndRewrite(AtenOpT op, OpAdaptor adaptor, LogicalResult
ConversionPatternRewriter &rewriter) const { matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Value lhs = adaptor.self(); Value lhs = adaptor.self();
auto lhsTy = lhs.getType().cast<TensorType>(); auto lhsTy = lhs.getType().cast<TensorType>();
Value rhs = adaptor.other(); Value rhs = adaptor.other();
@ -312,8 +313,9 @@ public:
// Common rewriter for all reduction ops, calls the specific implementation of // Common rewriter for all reduction ops, calls the specific implementation of
// readReduceDimsAndKeepDims() needed for the op variant. // readReduceDimsAndKeepDims() needed for the op variant.
LogicalResult matchAndRewrite(AtenOpT op, OpAdaptor adaptor, LogicalResult
ConversionPatternRewriter &rewriter) const { matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Value self = adaptor.self(); Value self = adaptor.self();
auto selfTy = self.getType().cast<TensorType>(); auto selfTy = self.getType().cast<TensorType>();
@ -360,7 +362,7 @@ class ConvertAtenMultipleDimsReductionOp
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor, LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter, ConversionPatternRewriter &rewriter,
ElementsAttr &reduceDimsAttr, ElementsAttr &reduceDimsAttr,
bool &keepDims) const { bool &keepDims) const override {
SmallVector<int64_t, 4> reduceDims; SmallVector<int64_t, 4> reduceDims;
if (!matchPattern(op.dim(), m_TorchConstantIntList(reduceDims))) if (!matchPattern(op.dim(), m_TorchConstantIntList(reduceDims)))
return rewriter.notifyMatchFailure(op, return rewriter.notifyMatchFailure(op,
@ -391,7 +393,7 @@ class ConvertAtenOneDimReductionOp
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor, LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter, ConversionPatternRewriter &rewriter,
ElementsAttr &reduceDimsAttr, ElementsAttr &reduceDimsAttr,
bool &keepDims) const { bool &keepDims) const override {
int64_t reduceDim; int64_t reduceDim;
if (!matchPattern(op.dim(), m_TorchConstantInt(&reduceDim))) if (!matchPattern(op.dim(), m_TorchConstantInt(&reduceDim)))
return rewriter.notifyMatchFailure(op, return rewriter.notifyMatchFailure(op,
@ -421,7 +423,7 @@ public:
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor, LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter, ConversionPatternRewriter &rewriter,
ElementsAttr &reduceDimsAttr, ElementsAttr &reduceDimsAttr,
bool &keepDims) const { bool &keepDims) const override {
auto self = adaptor.self(); auto self = adaptor.self();
auto selfTy = self.getType().template cast<RankedTensorType>(); auto selfTy = self.getType().template cast<RankedTensorType>();
@ -543,8 +545,9 @@ public:
// Common rewriter for all squeeze ops, calls the specific implementation of // Common rewriter for all squeeze ops, calls the specific implementation of
// generateSqueezedShape() needed for the op variant. // generateSqueezedShape() needed for the op variant.
LogicalResult matchAndRewrite(AtenOpT op, OpAdaptor adaptor, LogicalResult
ConversionPatternRewriter &rewriter) const { matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Value self = adaptor.self(); Value self = adaptor.self();
auto selfTy = self.getType().template cast<RankedTensorType>(); auto selfTy = self.getType().template cast<RankedTensorType>();
@ -585,7 +588,7 @@ class ConvertAtenSqueezeOneDimOp : public ConvertAtenSqueezeOp<AtenOpT> {
LogicalResult LogicalResult
generateSqueezedShape(AtenOpT op, RankedTensorType selfTy, generateSqueezedShape(AtenOpT op, RankedTensorType selfTy,
ConversionPatternRewriter &rewriter, ConversionPatternRewriter &rewriter,
SmallVector<int64_t> &squeezedShape) const { SmallVector<int64_t> &squeezedShape) const override {
int64_t squeezeDim; int64_t squeezeDim;
if (!matchPattern(op.dim(), m_TorchConstantInt(&squeezeDim))) if (!matchPattern(op.dim(), m_TorchConstantInt(&squeezeDim)))
return rewriter.notifyMatchFailure(op, return rewriter.notifyMatchFailure(op,
@ -619,7 +622,7 @@ class ConvertAtenSqueezeAllDimsOp : public ConvertAtenSqueezeOp<AtenOpT> {
LogicalResult LogicalResult
generateSqueezedShape(AtenOpT op, RankedTensorType selfTy, generateSqueezedShape(AtenOpT op, RankedTensorType selfTy,
ConversionPatternRewriter &rewriter, ConversionPatternRewriter &rewriter,
SmallVector<int64_t> &squeezedShape) const { SmallVector<int64_t> &squeezedShape) const override {
auto selfShape = selfTy.getShape(); auto selfShape = selfTy.getShape();
// Dims that may dynamically resolve to 1 are not reduced here. Only // Dims that may dynamically resolve to 1 are not reduced here. Only