Skip to content

Commit ba569aa

Browse files
gs-oliveperi044
andauthored
feat: Merge dynamo additions into release/1.4 (#1884)
Signed-off-by: Dheeraj Peri <[email protected]> Co-authored-by: Dheeraj Peri <[email protected]>
1 parent 76d879c commit ba569aa

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

42 files changed

+3844
-159
lines changed

.circleci/config.yml

+117
Original file line numberDiff line numberDiff line change
@@ -521,6 +521,7 @@ commands:
521521
- store_artifacts:
522522
path: /tmp/testlogs
523523

524+
# =================== FX tests start ======================== #
524525
test-fx_core:
525526
description: "Test the fx core"
526527
steps:
@@ -720,6 +721,61 @@ commands:
720721
- store_artifacts:
721722
path: /tmp/testlogs
722723

724+
# =================== FX tests end ======================== #
725+
726+
# =================== Dynamo tests start ======================== #
727+
test-dynamo-fx_ts:
728+
description: "Test the Dynamo fx_ts_compat path"
729+
steps:
730+
- run:
731+
name: Run Dynamo fx_ts_compat core tests
732+
command: |
733+
cd py/torch_tensorrt/dynamo/fx_ts_compat/test
734+
pushd core/
735+
pytest --junitxml=/tmp/artifacts/test_results/dynamo/fx_ts_compat/test_results.xml
736+
popd
737+
738+
- store_test_results:
739+
path: /tmp/artifacts
740+
- store_artifacts:
741+
path: /tmp/testlogs
742+
743+
test-dynamo-torch_compile-core:
744+
description: "Test the Dynamo torch_compile path"
745+
steps:
746+
- run:
747+
name: Run Dynamo torch_compile core tests
748+
command: |
749+
cd py/torch_tensorrt/dynamo/torch_compile
750+
pushd test/
751+
pytest --junitxml=/tmp/artifacts/test_results/dynamo/torch_compile/test_results.xml
752+
popd
753+
754+
- store_test_results:
755+
path: /tmp/artifacts
756+
- store_artifacts:
757+
path: /tmp/testlogs
758+
759+
test-dynamo-torch_compile:
760+
description: "Test the Dynamo torch_compile path"
761+
steps:
762+
- run:
763+
name: Run Dynamo torch_compile E2E tests
764+
command: |
765+
cd py/torch_tensorrt/dynamo/
766+
pushd test/
767+
pip3 install timm
768+
pip3 install transformers
769+
pytest --junitxml=/tmp/artifacts/test_results/dynamo/torch_compile/test_results.xml --ir torch_compile
770+
popd
771+
772+
- store_test_results:
773+
path: /tmp/artifacts
774+
- store_artifacts:
775+
path: /tmp/testlogs
776+
777+
# =================== Dynamo tests end ======================== #
778+
723779
# Define a job to be invoked later in a workflow.
724780
# See: https://circleci.com/docs/2.0/configuration-reference/#jobs
725781
jobs:
@@ -911,6 +967,43 @@ jobs:
911967
- dump-test-env
912968
- test-fx-no-aten
913969

970+
test-py-dynamo-x86_64-linux:
971+
parameters:
972+
torch-build:
973+
type: string
974+
torch-build-index:
975+
type: string
976+
trt-version-long:
977+
type: string
978+
python-version:
979+
type: string
980+
machine:
981+
image: linux-cuda-11:2023.02.1
982+
resource_class: gpu.nvidia.large
983+
steps:
984+
- checkout
985+
- setup-py-version:
986+
python-version: << parameters.python-version >>
987+
- attach_workspace:
988+
at: /tmp/dist/
989+
- install-torch-from-index:
990+
torch-build: << parameters.torch-build >>
991+
torch-build-index: << parameters.torch-build-index >>
992+
- create-py-env:
993+
trt-version-long: << parameters.trt-version-long >>
994+
- install-cudnn
995+
# - run:
996+
# name: "Set LD_LIBRARY_PATH path to include the installed CUDNN"
997+
# command: export LD_LIBRARY_PATH=/usr/lib/x86_64-linux-gnu/:$LD_LIBRARY_PATH
998+
- run:
999+
name: "Install torch-tensorrt"
1000+
command: pip3 install --pre /tmp/dist/x86_64-linux/*cp39-cp39*.whl
1001+
# We install torch after torch-trt because pip automatically enforces the version constraint otherwise
1002+
- dump-test-env
1003+
- test-dynamo-torch_compile
1004+
- test-dynamo-torch_compile-core
1005+
- test-dynamo-fx_ts
1006+
9141007
package-x86_64-linux:
9151008
parameters:
9161009
enabled:
@@ -1300,6 +1393,14 @@ workflows:
13001393
requires:
13011394
- build-x86_64-linux
13021395

1396+
- test-py-dynamo-x86_64-linux:
1397+
torch-build: << pipeline.parameters.torch-build >>
1398+
torch-build-index: << pipeline.parameters.torch-build-index >>
1399+
trt-version-long: << pipeline.parameters.trt-version-long >>
1400+
python-version: << pipeline.parameters.python-version >>
1401+
requires:
1402+
- build-x86_64-linux
1403+
13031404
- build-x86_64-linux:
13041405
name: build-x86_64-linux-legacy
13051406
torch-build: << pipeline.parameters.torch-build-legacy >>
@@ -1374,6 +1475,14 @@ workflows:
13741475
requires:
13751476
- package-x86_64-linux
13761477

1478+
- test-py-dynamo-x86_64-linux:
1479+
torch-build: << pipeline.parameters.torch-build >>
1480+
torch-build-index: << pipeline.parameters.torch-build-index >>
1481+
trt-version-long: << pipeline.parameters.trt-version-long >>
1482+
python-version: << pipeline.parameters.python-version >>
1483+
requires:
1484+
- package-x86_64-linux
1485+
13771486
on-push:
13781487
jobs:
13791488
- build-x86_64-linux:
@@ -1407,6 +1516,14 @@ workflows:
14071516
requires:
14081517
- build-x86_64-linux
14091518

1519+
- test-py-dynamo-x86_64-linux:
1520+
torch-build: << pipeline.parameters.torch-build >>
1521+
torch-build-index: << pipeline.parameters.torch-build-index >>
1522+
trt-version-long: << pipeline.parameters.trt-version-long >>
1523+
python-version: << pipeline.parameters.python-version >>
1524+
requires:
1525+
- build-x86_64-linux
1526+
14101527
- build-x86_64-linux-cmake:
14111528
torch-build: << pipeline.parameters.torch-build >>
14121529
torch-build-index: << pipeline.parameters.torch-build-index >>

py/setup.py

+41-26
Original file line numberDiff line numberDiff line change
@@ -362,6 +362,10 @@ def run(self):
362362
"torch_tensorrt.fx.tools",
363363
"torch_tensorrt.fx.tracer.acc_tracer",
364364
"torch_tensorrt.fx.tracer.dispatch_tracer",
365+
"torch_tensorrt.dynamo",
366+
"torch_tensorrt.dynamo.fx_ts_compat",
367+
"torch_tensorrt.dynamo.fx_ts_compat.passes",
368+
"torch_tensorrt.dynamo.fx_ts_compat.tools",
365369
]
366370
package_dir = {
367371
"torch_tensorrt.fx": "torch_tensorrt/fx",
@@ -370,11 +374,47 @@ def run(self):
370374
"torch_tensorrt.fx.tools": "torch_tensorrt/fx/tools",
371375
"torch_tensorrt.fx.tracer.acc_tracer": "torch_tensorrt/fx/tracer/acc_tracer",
372376
"torch_tensorrt.fx.tracer.dispatch_tracer": "torch_tensorrt/fx/tracer/dispatch_tracer",
377+
"torch_tensorrt.dynamo": "torch_tensorrt/dynamo",
378+
"torch_tensorrt.dynamo.fx_ts_compat": "torch_tensorrt/dynamo/fx_ts_compat",
379+
"torch_tensorrt.dynamo.fx_ts_compat.passes": "torch_tensorrt/dynamo/fx_ts_compat/passes",
380+
"torch_tensorrt.dynamo.fx_ts_compat.tools": "torch_tensorrt/dynamo/fx_ts_compat/tools",
373381
}
374382

375383
with open("README.md", "r", encoding="utf-8") as fh:
376384
long_description = fh.read()
377385

386+
if FX_ONLY:
387+
package_data_list = [
388+
"_Input.py",
389+
]
390+
else:
391+
package_data_list = [
392+
"lib/*",
393+
"include/torch_tensorrt/*.h",
394+
"include/torch_tensorrt/core/*.h",
395+
"include/torch_tensorrt/core/conversion/*.h",
396+
"include/torch_tensorrt/core/conversion/conversionctx/*.h",
397+
"include/torch_tensorrt/core/conversion/converters/*.h",
398+
"include/torch_tensorrt/core/conversion/evaluators/*.h",
399+
"include/torch_tensorrt/core/conversion/tensorcontainer/*.h",
400+
"include/torch_tensorrt/core/conversion/var/*.h",
401+
"include/torch_tensorrt/core/ir/*.h",
402+
"include/torch_tensorrt/core/lowering/*.h",
403+
"include/torch_tensorrt/core/lowering/passes/*.h",
404+
"include/torch_tensorrt/core/partitioning/*.h",
405+
"include/torch_tensorrt/core/partitioning/segmentedblock/*.h",
406+
"include/torch_tensorrt/core/partitioning/partitioninginfo/*.h",
407+
"include/torch_tensorrt/core/partitioning/partitioningctx/*.h",
408+
"include/torch_tensorrt/core/plugins/*.h",
409+
"include/torch_tensorrt/core/plugins/impl/*.h",
410+
"include/torch_tensorrt/core/runtime/*.h",
411+
"include/torch_tensorrt/core/util/*.h",
412+
"include/torch_tensorrt/core/util/logging/*.h",
413+
"bin/*",
414+
"BUILD",
415+
"WORKSPACE",
416+
]
417+
378418
setup(
379419
name="torch_tensorrt",
380420
version=__version__,
@@ -418,32 +458,7 @@ def run(self):
418458
python_requires=">=3.8",
419459
include_package_data=True,
420460
package_data={
421-
"torch_tensorrt": [
422-
"lib/*",
423-
"include/torch_tensorrt/*.h",
424-
"include/torch_tensorrt/core/*.h",
425-
"include/torch_tensorrt/core/conversion/*.h",
426-
"include/torch_tensorrt/core/conversion/conversionctx/*.h",
427-
"include/torch_tensorrt/core/conversion/converters/*.h",
428-
"include/torch_tensorrt/core/conversion/evaluators/*.h",
429-
"include/torch_tensorrt/core/conversion/tensorcontainer/*.h",
430-
"include/torch_tensorrt/core/conversion/var/*.h",
431-
"include/torch_tensorrt/core/ir/*.h",
432-
"include/torch_tensorrt/core/lowering/*.h",
433-
"include/torch_tensorrt/core/lowering/passes/*.h",
434-
"include/torch_tensorrt/core/partitioning/*.h",
435-
"include/torch_tensorrt/core/partitioning/segmentedblock/*.h",
436-
"include/torch_tensorrt/core/partitioning/partitioninginfo/*.h",
437-
"include/torch_tensorrt/core/partitioning/partitioningctx/*.h",
438-
"include/torch_tensorrt/core/plugins/*.h",
439-
"include/torch_tensorrt/core/plugins/impl/*.h",
440-
"include/torch_tensorrt/core/runtime/*.h",
441-
"include/torch_tensorrt/core/util/*.h",
442-
"include/torch_tensorrt/core/util/logging/*.h",
443-
"bin/*",
444-
"BUILD",
445-
"WORKSPACE",
446-
],
461+
"torch_tensorrt": package_data_list,
447462
},
448463
exclude_package_data={
449464
"": ["*.cpp"],

py/torch_tensorrt/_Device.py

+24-10
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,17 @@
11
import torch
22

3-
from torch_tensorrt import _enums
3+
# from torch_tensorrt import _enums
4+
import tensorrt as trt
45
from torch_tensorrt import logging
5-
from torch_tensorrt import _C
6-
76
import warnings
87

8+
try:
9+
from torch_tensorrt import _C
10+
except:
11+
warnings.warn(
12+
"Unable to import torchscript frontend core and torch-tensorrt runtime. Some dependent features may be unavailable."
13+
)
14+
915

1016
class Device(object):
1117
"""
@@ -51,7 +57,7 @@ def __init__(self, *args, **kwargs):
5157
)
5258
else:
5359
(self.device_type, id) = Device._parse_device_str(args[0])
54-
if self.device_type == _enums.DeviceType.GPU:
60+
if self.device_type == trt.DeviceType.GPU:
5561
self.gpu_id = id
5662
else:
5763
self.dla_core = id
@@ -64,7 +70,7 @@ def __init__(self, *args, **kwargs):
6470
elif len(args) == 0:
6571
if "gpu_id" in kwargs or "dla_core" in kwargs:
6672
if "dla_core" in kwargs:
67-
self.device_type = _enums.DeviceType.DLA
73+
self.device_type = trt.DeviceType.DLA
6874
self.dla_core = kwargs["dla_core"]
6975
if "gpu_id" in kwargs:
7076
self.gpu_id = kwargs["gpu_id"]
@@ -76,7 +82,7 @@ def __init__(self, *args, **kwargs):
7682
)
7783
else:
7884
self.gpu_id = kwargs["gpu_id"]
79-
self.device_type = _enums.DeviceType.GPU
85+
self.device_type = trt.DeviceType.GPU
8086
else:
8187
raise ValueError(
8288
"Either gpu_id or dla_core or both must be defined if no string with device specs is provided as an arg"
@@ -97,15 +103,23 @@ def __init__(self, *args, **kwargs):
97103
def __str__(self) -> str:
98104
return (
99105
"Device(type={}, gpu_id={}".format(self.device_type, self.gpu_id) + ")"
100-
if self.device_type == _enums.DeviceType.GPU
106+
if self.device_type == trt.DeviceType.GPU
101107
else ", dla_core={}, allow_gpu_fallback={}".format(
102108
self.dla_core, self.allow_gpu_fallback
103109
)
104110
)
105111

106112
def _to_internal(self) -> _C.Device:
107113
internal_dev = _C.Device()
108-
internal_dev.device_type = self.device_type
114+
if self.device_type == trt.DeviceType.GPU:
115+
internal_dev.device_type = _C.DeviceType.GPU
116+
elif self.device_type == trt.DeviceType.DLA:
117+
internal_dev.device_type = _C.DeviceType.DLA
118+
else:
119+
raise ValueError(
120+
"Invalid DeviceType detected while parsing the Device class"
121+
)
122+
109123
internal_dev.gpu_id = self.gpu_id
110124
internal_dev.dla_core = self.dla_core
111125
internal_dev.allow_gpu_fallback = self.allow_gpu_fallback
@@ -136,6 +150,6 @@ def _parse_device_str(s):
136150
s = s.lower()
137151
spec = s.split(":")
138152
if spec[0] == "gpu" or spec[0] == "cuda":
139-
return (_enums.DeviceType.GPU, int(spec[1]))
153+
return (trt.DeviceType.GPU, int(spec[1]))
140154
elif spec[0] == "dla":
141-
return (_enums.DeviceType.DLA, int(spec[1]))
155+
return (trt.DeviceType.DLA, int(spec[1]))

0 commit comments

Comments
 (0)