From a38b7b72b2697eb1f625e884943ffb9e3d48bb97 Mon Sep 17 00:00:00 2001 From: Stanley Winata Date: Fri, 5 Feb 2021 17:57:38 -0800 Subject: [PATCH] adapt acap_dispatch to latest pytorch nightly ("1.9.0.dev20210215+cpu") Modify ACAP_Dispatch to work with latest pytorch -Remove boxed from convolution's m.impl -Use redispatch and constrainted keyset to replace deprecated callwithdispatchkey --- .../pytorch/csrc/builder/acap_dispatch.cpp | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/frontends/pytorch/csrc/builder/acap_dispatch.cpp b/frontends/pytorch/csrc/builder/acap_dispatch.cpp index 24eae502a..ed8fb4485 100644 --- a/frontends/pytorch/csrc/builder/acap_dispatch.cpp +++ b/frontends/pytorch/csrc/builder/acap_dispatch.cpp @@ -210,7 +210,7 @@ at::Tensor AcapController::convolutionKernel( auto current = getCurrentThreadAcapController(); if (!current) { - return opTyped.callWithDispatchKey(c10::DispatchKey::AutogradOther, input, + return opTyped.redispatch(c10::DispatchKeySet({c10::DispatchKey::AutogradOther}), input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); } @@ -240,8 +240,8 @@ at::Tensor AcapController::convolutionKernel( callBuilder.addOperand(IValue(output_padding)); callBuilder.addOperand(IValue(groups)); - auto result = opTyped.callWithDispatchKey( - c10::DispatchKey::AutogradOther, input, weight, bias, stride, padding, + auto result = opTyped.redispatch( + c10::DispatchKeySet({c10::DispatchKey::AutogradOther}), input, weight, bias, stride, padding, dilation, transposed, output_padding, groups); callBuilder.addResult(result); callBuilder.create(); @@ -277,7 +277,7 @@ AcapController::mklConvolutionBackward( auto current = getCurrentThreadAcapController(); if (!current) { - return opTyped.callWithDispatchKey(c10::DispatchKey::AutogradOther, input, + return opTyped.redispatch(c10::DispatchKeySet({c10::DispatchKey::AutogradOther}), input, grad_output, weight, padding, stride, dilation, groups, output_mask); } @@ -313,8 +313,8 @@ AcapController::mklConvolutionBackward( callBuilder.addOperand(IValue(groups)); callBuilder.addOperand(IValue(output_mask)); - auto results = opTyped.callWithDispatchKey( - c10::DispatchKey::AutogradCPU, input, grad_output, weight, padding, + auto results = opTyped.redispatch( + c10::DispatchKeySet({c10::DispatchKey::AutogradCPU}), input, grad_output, weight, padding, stride, dilation, groups, output_mask); callBuilder.addResult(std::get<0>(results)); @@ -346,7 +346,7 @@ at::Tensor &AcapController::copyUnderKernel(at::Tensor &self, auto current = getCurrentThreadAcapController(); if (!current) { - return opTyped.callWithDispatchKey(c10::DispatchKey::AutogradOther, self, + return opTyped.redispatch(c10::DispatchKeySet({c10::DispatchKey::AutogradOther}), self, src, non_blocking); } @@ -356,7 +356,7 @@ at::Tensor &AcapController::copyUnderKernel(at::Tensor &self, callBuilder.addOperand(IValue(self)); callBuilder.addOperand(IValue(src)); - auto &result = opTyped.callWithDispatchKey(c10::DispatchKey::CPU, self, src, + auto &result = opTyped.redispatch(c10::DispatchKeySet({c10::DispatchKey::CPU}), self, src, non_blocking); callBuilder.addResult(result); callBuilder.create(); @@ -383,7 +383,7 @@ at::Tensor AcapController::arangeBackendSelectKernel( at::Scalar end, c10::optional dtype, c10::optional layout, c10::optional device, c10::optional pin_memory)>(); - return opTyped.callWithDispatchKey(targetDk, end, dtype, layout, device, + return opTyped.redispatch(c10::DispatchKeySet({targetDk}), end, dtype, layout, device, pin_memory); } @@ -593,7 +593,7 @@ TORCH_LIBRARY_IMPL(aten, ACAP_GRAD_DISPATCH_KEY, m) { // Presumably this is on someone's list to adapt to the dispatch machinery // in a more appropriate way, but as the core of what the framework is, // perhaps people are reticent to touch it. Maybe someday, this can go away. - m.impl_UNBOXED("convolution", &AcapController::convolutionKernel); + m.impl("convolution", &AcapController::convolutionKernel); // Sadly, there is no easy intercept point for the backwards convolution // kernel which allows for chaining to an existing backend. And convolution