[tosa] Add missing overrride-s to fix compiler warnings (#514)

Signed-off-by: Suraj Sudhir <suraj.sudhir@arm.com>
pull/513/head
Suraj Sudhir 2022-01-07 10:57:54 -08:00 committed by GitHub
parent 732a76f45c
commit d6b6c0268c
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
1 changed files with 14 additions and 11 deletions

View File

@ -117,8 +117,9 @@ class ConvertAtenAddSubOp : public OpConversionPattern<AtenOpT> {
public:
using OpConversionPattern<AtenOpT>::OpConversionPattern;
using OpAdaptor = typename AtenOpT::Adaptor;
LogicalResult matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
LogicalResult
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Value lhs = adaptor.self();
auto lhsTy = lhs.getType().cast<TensorType>();
Value rhs = adaptor.other();
@ -312,8 +313,9 @@ public:
// Common rewriter for all reduction ops, calls the specific implementation of
// readReduceDimsAndKeepDims() needed for the op variant.
LogicalResult matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
LogicalResult
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Value self = adaptor.self();
auto selfTy = self.getType().cast<TensorType>();
@ -360,7 +362,7 @@ class ConvertAtenMultipleDimsReductionOp
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter,
ElementsAttr &reduceDimsAttr,
bool &keepDims) const {
bool &keepDims) const override {
SmallVector<int64_t, 4> reduceDims;
if (!matchPattern(op.dim(), m_TorchConstantIntList(reduceDims)))
return rewriter.notifyMatchFailure(op,
@ -391,7 +393,7 @@ class ConvertAtenOneDimReductionOp
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter,
ElementsAttr &reduceDimsAttr,
bool &keepDims) const {
bool &keepDims) const override {
int64_t reduceDim;
if (!matchPattern(op.dim(), m_TorchConstantInt(&reduceDim)))
return rewriter.notifyMatchFailure(op,
@ -421,7 +423,7 @@ public:
LogicalResult readReduceDimsAndKeepDims(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter,
ElementsAttr &reduceDimsAttr,
bool &keepDims) const {
bool &keepDims) const override {
auto self = adaptor.self();
auto selfTy = self.getType().template cast<RankedTensorType>();
@ -543,8 +545,9 @@ public:
// Common rewriter for all squeeze ops, calls the specific implementation of
// generateSqueezedShape() needed for the op variant.
LogicalResult matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const {
LogicalResult
matchAndRewrite(AtenOpT op, OpAdaptor adaptor,
ConversionPatternRewriter &rewriter) const override {
Value self = adaptor.self();
auto selfTy = self.getType().template cast<RankedTensorType>();
@ -585,7 +588,7 @@ class ConvertAtenSqueezeOneDimOp : public ConvertAtenSqueezeOp<AtenOpT> {
LogicalResult
generateSqueezedShape(AtenOpT op, RankedTensorType selfTy,
ConversionPatternRewriter &rewriter,
SmallVector<int64_t> &squeezedShape) const {
SmallVector<int64_t> &squeezedShape) const override {
int64_t squeezeDim;
if (!matchPattern(op.dim(), m_TorchConstantInt(&squeezeDim)))
return rewriter.notifyMatchFailure(op,
@ -619,7 +622,7 @@ class ConvertAtenSqueezeAllDimsOp : public ConvertAtenSqueezeOp<AtenOpT> {
LogicalResult
generateSqueezedShape(AtenOpT op, RankedTensorType selfTy,
ConversionPatternRewriter &rewriter,
SmallVector<int64_t> &squeezedShape) const {
SmallVector<int64_t> &squeezedShape) const override {
auto selfShape = selfTy.getShape();
// Dims that may dynamically resolve to 1 are not reduced here. Only