Skip to content

Commit deb9f74

Browse files
committed
fix: add at::adaptive_avg_pool1d in interpolate plugin and fix #791
Signed-off-by: Ruoqian Guo <[email protected]>
1 parent f6f5e3e commit deb9f74

File tree

3 files changed

+34
-18
lines changed

3 files changed

+34
-18
lines changed

core/conversion/converters/impl/pooling.cpp

+6-18
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ bool AdaptivePoolingConverter(
3737
ConversionCtx* ctx,
3838
const torch::jit::Node* n,
3939
args& args,
40-
nvinfer1::PoolingType pool_type) {
40+
nvinfer1::PoolingType pool_type, const std::string& mode) {
4141
auto in = args[0].ITensorOrFreeze(ctx);
4242
auto out_size = util::toDims(args[1].unwrapToIntList());
4343

@@ -48,15 +48,7 @@ bool AdaptivePoolingConverter(
4848
}
4949

5050
auto orig_dims = in->getDimensions();
51-
bool expandDims = (orig_dims.nbDims < 4);
52-
TORCHTRT_CHECK(orig_dims.nbDims > 2, "Unable to create pooling layer from node: " << *n);
53-
if (expandDims) {
54-
in = addPadding(ctx, n, in, 4, false, false);
55-
}
56-
57-
if (out_size.nbDims == 1) {
58-
out_size = util::unsqueezeDims(out_size, 0, 1);
59-
}
51+
TORCHTRT_CHECK(orig_dims.nbDims > 1, "Unable to create pooling layer from node: " << *n);
6052

6153
auto in_shape = util::toVec(in->getDimensions());
6254
nvinfer1::ILayer* new_layer = nullptr;
@@ -90,10 +82,6 @@ bool AdaptivePoolingConverter(
9082
int32_t use_scales_casted = 0;
9183
f.emplace_back(nvinfer1::PluginField("use_scales", &use_scales_casted, nvinfer1::PluginFieldType::kINT32, 1));
9284

93-
std::string mode = "adaptive_avg_pool2d";
94-
if (pool_type == nvinfer1::PoolingType::kMAX) {
95-
mode = "adaptive_max_pool2d";
96-
}
9785
f.emplace_back(nvinfer1::PluginField("mode", &mode, nvinfer1::PluginFieldType::kCHAR, 1));
9886

9987
fc.nbFields = f.size();
@@ -110,7 +98,7 @@ bool AdaptivePoolingConverter(
11098
TORCHTRT_CHECK(new_layer, "Unable to create pooling (interpolation) plugin from node" << *n);
11199

112100
new_layer->setName(util::node_info(n).c_str());
113-
auto layer_output = addUnpadding(ctx, n, new_layer->getOutput(0), orig_dims.nbDims, false, false);
101+
auto layer_output = new_layer->getOutput(0);
114102

115103
ctx->AssociateValueAndTensor(n->outputs()[0], layer_output);
116104
LOG_DEBUG("Output tensor shape: " << layer_output->getDimensions());
@@ -238,15 +226,15 @@ auto pooling_registrations TORCHTRT_UNUSED =
238226
}})
239227
.pattern({"aten::adaptive_avg_pool1d(Tensor self, int[1] output_size) -> (Tensor)",
240228
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
241-
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kAVERAGE);
229+
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kAVERAGE, "adaptive_avg_pool1d");
242230
}})
243231
.pattern({"aten::adaptive_avg_pool2d(Tensor self, int[2] output_size) -> (Tensor)",
244232
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
245-
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kAVERAGE);
233+
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kAVERAGE, "adaptive_avg_pool2d");
246234
}})
247235
.pattern({"aten::adaptive_max_pool2d(Tensor self, int[2] output_size) -> (Tensor, Tensor)",
248236
[](ConversionCtx* ctx, const torch::jit::Node* n, args& args) -> bool {
249-
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kMAX);
237+
return AdaptivePoolingConverter(ctx, n, args, nvinfer1::PoolingType::kMAX, "adaptive_max_pool2d");
250238
}});
251239
} // namespace
252240
} // namespace impl

core/plugins/impl/interpolate_plugin.cpp

+2
Original file line numberDiff line numberDiff line change
@@ -289,6 +289,8 @@ int InterpolatePlugin::enqueue(
289289
out = at::upsample_bilinear2d(input, {size_[0], size_[1]}, align_corners_);
290290
} else if (mode_ == "trilinear") {
291291
out = at::upsample_trilinear3d(input, {size_[0], size_[1], size_[2]}, align_corners_);
292+
} else if(mode_ == "adaptive_avg_pool1d"){
293+
out = at::adaptive_avg_pool1d(input, {size_[0]});
292294
} else if (mode_ == "adaptive_avg_pool2d") {
293295
out = at::adaptive_avg_pool2d(input, {size_[0], size_[1]});
294296
} else if (mode_ == "adaptive_max_pool2d") {

tests/core/conversion/converters/test_pooling.cpp

+26
Original file line numberDiff line numberDiff line change
@@ -540,6 +540,32 @@ TEST(Converters, ATenAdaptiveAvgPool1DGlobalPoolingConvertsCorrectly) {
540540
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6));
541541
}
542542

543+
TEST(Converters, ATenAdaptiveAvgPool1DUsingPluginConvertsCorrectly) {
544+
const auto graph =
545+
R"IR(
546+
graph(%0 : Tensor):
547+
%2 : int = prim::Constant[value=3]()
548+
%6 : int[] = prim::ListConstruct(%2)
549+
%10 : Tensor = aten::adaptive_avg_pool1d(%0, %6)
550+
return (%10))IR";
551+
552+
auto g = std::make_shared<torch::jit::Graph>();
553+
torch::jit::parseIR(graph, g.get());
554+
555+
// PyTorch adaptive_avg_pool1d needs a 3D input or a 2D input
556+
auto in = at::randint(-5, 5, {1, 3, 16}, at::kCUDA);
557+
558+
auto jit_in = at::clone(in);
559+
auto params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
560+
auto jit_results = torch_tensorrt::tests::util::RunGraph(g, params, {jit_in});
561+
562+
auto trt_in = at::clone(in);
563+
params = torch_tensorrt::core::ir::get_static_params(g->inputs(), {});
564+
auto trt_results = torch_tensorrt::tests::util::RunGraphEngine(g, params, {trt_in});
565+
566+
ASSERT_TRUE(torch_tensorrt::tests::util::almostEqual(jit_results[0], trt_results[0], 2e-6));
567+
}
568+
543569
TEST(Converters, ATenAdaptiveMaxPool2DConvertsCorrectly) {
544570
const auto graph = R"IR(
545571
graph(%0 : Tensor):

0 commit comments

Comments
 (0)