@@ -125,77 +125,6 @@ DEFINE_TWO_INPUT_SIMPLE_EVALUATOR(
125
125
int64_t ,
126
126
{" aten::__round_to_zero_floordiv(int a, int b) -> (int)" });
127
127
128
- std::pair<std::vector<int64_t >, torch::TensorOptions> newTensorImplementation (const torch::jit::Node* n, kwargs& args) {
129
- auto options = torch::TensorOptions ().layout (torch::kStrided ).device (torch::kCUDA );
130
-
131
- // Input 2 is the dtype
132
- if (!args.at (n->input (2 )).isNone () && !args.at (n->input (2 )).IValue ()->isNone ()) {
133
- options = options.dtype (c10::ScalarType (args.at (n->input (2 )).unwrapToInt ()));
134
- } else {
135
- auto tensor_var = args.at (n->input (0 ));
136
- if (tensor_var.isITensor ()) {
137
- auto tensor = tensor_var.ITensor ();
138
- options = options.dtype (scalarTypeToTypeMeta (util::TRTDataTypeToScalarType (tensor->getType ())));
139
- } else {
140
- auto tensor = tensor_var.unwrapToTensor ();
141
- options = options.dtype (tensor.dtype ());
142
- }
143
- }
144
- return std::make_pair (args.at (n->input (1 )).unwrapToIntList ().vec (), options);
145
- }
146
-
147
- c10::optional<torch::jit::IValue> newTensorLikeImplementation (
148
- ConversionCtx* ctx,
149
- const torch::jit::Node* n,
150
- kwargs& args,
151
- const std::function<torch::Tensor(const std::vector<int64_t >&, const torch::TensorOptions&)>& tensor_builder) {
152
- auto options = torch::TensorOptions ().layout (torch::kStrided ).device (torch::kCUDA );
153
- auto tensor_var = args.at (n->input (0 ));
154
-
155
- if (tensor_var.isITensor ()) {
156
- auto tensor = tensor_var.ITensor ();
157
- auto dtype = util::TRTDataTypeToScalarType (tensor->getType ());
158
- options = options.dtype (dtype);
159
- } else {
160
- auto tensor = tensor_var.unwrapToTensor ();
161
- options = options.dtype (tensor.dtype ());
162
- }
163
-
164
- // Input 1 is the dtype
165
- if (!args.at (n->input (1 )).isNone () && !args.at (n->input (1 )).IValue ()->isNone ()) {
166
- options = options.dtype (c10::ScalarType (args.at (n->input (1 )).unwrapToInt ()));
167
- }
168
- std::vector<int64_t > tensor_dims;
169
- if (tensor_var.isITensor ()) {
170
- auto tensor = tensor_var.ITensor ();
171
- tensor_dims = util::toVec (tensor->getDimensions ());
172
- } else {
173
- auto tensor = tensor_var.unwrapToTensor ();
174
- tensor_dims = tensor.sizes ().vec ();
175
- }
176
- if (ctx->input_is_dynamic ) {
177
- auto self = args.at (n->input (0 )).ITensorOrFreeze (ctx);
178
- std::vector<int64_t > dims_vec (self->getDimensions ().nbDims , 1 );
179
- auto constant = tensor_builder (dims_vec, options);
180
- auto constant_itensor = converters::tensor_to_const (ctx, constant);
181
- // broadcast constant to output shape
182
- std::vector<int64_t > start_vec (self->getDimensions ().nbDims , 0 );
183
- auto start_offset = util::toDims (c10::IntArrayRef (start_vec));
184
- auto shape_layer = ctx->net ->addShape (*self);
185
- TORCHTRT_CHECK (shape_layer, " Unable to create shape layer from node: " << *n);
186
- shape_layer->setName ((util::node_info (n) + " _shape" ).c_str ());
187
- // slice implements expand
188
- auto slice_layer = ctx->net ->addSlice (*constant_itensor, start_offset, self->getDimensions (), start_offset);
189
- TORCHTRT_CHECK (slice_layer, " Unable to create slice layer from node: " << *n);
190
- slice_layer->setInput (2 , *shape_layer->getOutput (0 ));
191
- slice_layer->setName ((util::node_info (n) + " _slice" ).c_str ());
192
- auto out_tensor = ctx->AssociateValueAndTensor (n->outputs ()[0 ], slice_layer->getOutput (0 ));
193
- LOG_DEBUG (" Output tensor shape: " << out_tensor->getDimensions ());
194
- return {};
195
- }
196
- return tensor_builder (tensor_dims, options);
197
- }
198
-
199
128
auto aten_registrations TORCHTRT_UNUSED =
200
129
RegisterNodeEvaluators ()
201
130
.evaluator(
0 commit comments