diff --git a/backend_ops/tensorrt/instance_norm/trt_instance_norm.cpp b/backend_ops/tensorrt/instance_norm/trt_instance_norm.cpp index cb710e1c3..226967f2b 100644 --- a/backend_ops/tensorrt/instance_norm/trt_instance_norm.cpp +++ b/backend_ops/tensorrt/instance_norm/trt_instance_norm.cpp @@ -62,15 +62,15 @@ int TRTInstanceNormalization::enqueue( int elem_size = getElementSize(inputDesc[1].type); void* n_scales = (void*)workspace; - void* n_bias = (void*)(workspace + getAlignedSize(n * c * elem_size)); + void* n_bias = (void*)((char*)workspace + getAlignedSize(n * c * elem_size)); const void* scales = (const void*)inputs[1]; const void* bias = (const void*)inputs[2]; for (int i = 0; i < n; ++i) { - cudaMemcpyAsync(n_scales + i * c * elem_size, scales, c * elem_size, + cudaMemcpyAsync((char*)n_scales + i * c * elem_size, scales, c * elem_size, cudaMemcpyDeviceToDevice, stream); - cudaMemcpyAsync(n_bias + i * c * elem_size, bias, c * elem_size, + cudaMemcpyAsync((char*)n_bias + i * c * elem_size, bias, c * elem_size, cudaMemcpyDeviceToDevice, stream); } diff --git a/backend_ops/tensorrt/roi_align/trt_roi_align.cpp b/backend_ops/tensorrt/roi_align/trt_roi_align.cpp index da2263119..4bec0ce86 100644 --- a/backend_ops/tensorrt/roi_align/trt_roi_align.cpp +++ b/backend_ops/tensorrt/roi_align/trt_roi_align.cpp @@ -113,7 +113,7 @@ int TRTRoIAlign::enqueue(const nvinfer1::PluginTensorDesc *inputDesc, switch (mPoolMode) { case 0: // max argmax_y = workSpace; - argmax_x = argmax_y + output_size * word_size; + argmax_x = (char *)argmax_y + output_size * word_size; break; case 1: // avg break;