You signed in with another tab or window. Reload to refresh your session.You signed out in another tab or window. Reload to refresh your session.You switched accounts on another tab or window. Reload to refresh your session.Dismiss alert
"Not all inputs provided are torch.tensors. Please provide torch.tensors as inputs"
422
+
)
423
+
ifoutput_formatnotinaccepted_formats:
424
+
raiseValueError(
425
+
f"Provided output_format {output_format} is not supported. Supported options are exported_program | torchscript"
426
+
)
427
+
ifnotfile_path:
428
+
raiseValueError("File path cannot be empty. Please provide a valid file path")
429
+
430
+
ifmodule_type==_ModuleType.nn:
431
+
raiseValueError(
432
+
"Input model is of type nn.Module. Saving nn.Module directly is not supported. Supported model types torch.jit.ScriptModule | torch.fx.GraphModule | torch.export.ExportedProgram."
433
+
)
434
+
elifmodule_type==_ModuleType.ts:
435
+
ifoutput_format=="exported_program":
436
+
raiseValueError(
437
+
"Provided model is a torch.jit.ScriptModule but the output_format specified is exported_program. Please verify the output_format"
438
+
)
439
+
else:
440
+
torch.jit.save(module, file_path)
441
+
elifmodule_type==_ModuleType.ep:
442
+
ifoutput_format=="torchscript":
443
+
raiseValueError(
444
+
"Provided model is a torch.export.ExportedProgram but the output_format specified is torchscript. Please verify the output_format"
445
+
)
446
+
else:
447
+
torch.export.save(module, file_path)
448
+
elifmodule_type==_ModuleType.fx:
449
+
ifinputsisNone:
450
+
raiseValueError(
451
+
"Provided model is a torch.fx.GraphModule however the inputs are empty. Please provide valid torch.tensors as inputs to trace and save the model"
"""Compile a TorchScript module for NVIDIA GPUs using TensorRT
80
78
81
79
Takes a existing TorchScript module and a set of settings to configure the compiler
@@ -132,7 +130,6 @@ def compile(
132
130
enable_experimental_decompositions (bool): Use the full set of operator decompositions. These decompositions may not be tested but serve to make the grap easier to covert to TensorRT, potentially increasing the amount of graphs run in TensorRT.
133
131
dryrun (bool): Toggle for "Dryrun" mode, running everything except conversion to TRT and logging outputs
134
132
hardware_compatible (bool): Build the TensorRT engines compatible with GPU architectures other than that of the GPU on which the engine was built (currently works for NVIDIA Ampere and newer)
135
-
output_format (str): Output format of the result of TRT compilation. Options include "exported_program" (or) "ep" | "torchscript" (or) "ts" | "graph_module" (or) "fx". Default is "exported_program"
136
133
**kwargs: Any,
137
134
Returns:
138
135
torch.fx.GraphModule: Compiled FX Module, when run it will execute via TensorRT
0 commit comments