@@ -36,89 +36,89 @@ RUN pyenv install -v ${PYTHON_VERSION}
36
36
RUN pyenv global ${PYTHON_VERSION}
37
37
38
38
# Install CUDNN + TensorRT + dependencies
39
- RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin
40
- RUN mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600
41
- RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/7fa2af80.pub
42
- RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 536F8F1DE80F6A35
43
- RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
44
- RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
45
- RUN apt-get update
46
- RUN apt-get install -y libcudnn8=${CUDNN_VERSION}* libcudnn8-dev=${CUDNN_VERSION}*
47
-
48
- RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
49
- RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
50
- RUN apt-get update
51
-
52
- RUN apt-get install -y libnvinfer8=${TENSORRT_VERSION}.* libnvinfer-plugin8=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers8=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.* libnvparsers8=${TENSORRT_VERSION}.* libnvparsers-dev=${TENSORRT_VERSION}.*
53
-
54
- # Setup Bazel via Bazelisk
55
- RUN wget -q https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 -O /usr/bin/bazel &&\
56
- chmod a+x /usr/bin/bazel
57
-
58
- # Build Torch-TensorRT in an auxillary container
59
- FROM base as torch-tensorrt-builder-base
60
-
61
- ARG ARCH="x86_64"
62
- ARG TARGETARCH="amd64"
63
-
64
- RUN apt-get update
65
- RUN apt-get install -y python3-setuptools
66
- RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
67
-
68
- RUN apt-get update &&\
69
- apt-get install -y --no-install-recommends locales ninja-build &&\
70
- rm -rf /var/lib/apt/lists/* &&\
71
- locale-gen en_US.UTF-8
72
-
73
- FROM torch-tensorrt-builder-base as torch-tensorrt-builder
74
-
75
- COPY . /workspace/torch_tensorrt/src
76
- WORKDIR /workspace/torch_tensorrt/src
77
- RUN cp ./docker/WORKSPACE.docker WORKSPACE
78
-
79
- # Symlink the path pyenv is using for python with the /opt directory for package sourcing
80
- RUN mkdir -p "/opt/python3/" &&\
81
- ln -s "`pyenv which python | xargs dirname | xargs dirname`/lib/python$PYTHON_VERSION/site-packages" "/opt/python3/"
82
-
83
- # Extract base image cuda version (everything after :, before -, before final ., in BASE_IMG)
84
- # Ensure the default cuda folder agrees with the version in the base image
85
- RUN CUDA_BASE_IMG_VERSION_INTERMEDIATE=`echo ${BASE_IMG#*:}` &&\
86
- CUDA_BASE_IMG_VERSION=`echo ${CUDA_BASE_IMG_VERSION_INTERMEDIATE%%-*}` &&\
87
- CUDA_MAJOR_MINOR_VERSION=`echo ${CUDA_BASE_IMG_VERSION%.*}` &&\
88
- rm -fr /usr/local/cuda &&\
89
- ln -s /usr/local/cuda-${CUDA_MAJOR_MINOR_VERSION} /usr/local/cuda
90
-
91
- ENV CUDA_HOME=/usr/local/cuda
92
-
93
- # This script builds both libtorchtrt bin/lib/include tarball and the Python wheel, in dist/
94
- RUN bash ./docker/dist-build.sh
95
-
96
- # Copy and install Torch-TRT into the main container
97
- FROM base as torch-tensorrt
98
-
99
- COPY . /opt/torch_tensorrt
100
-
101
- # Symlink the path pyenv is using for python with the /opt directory for package sourcing
102
- RUN mkdir -p "/opt/python3/" &&\
103
- ln -s "`pyenv which python | xargs dirname | xargs dirname`/lib/python$PYTHON_VERSION/site-packages" "/opt/python3/"
104
-
105
- COPY --from=torch-tensorrt-builder /workspace/torch_tensorrt/src/dist/ .
106
-
107
- RUN cp /opt/torch_tensorrt/docker/WORKSPACE.docker /opt/torch_tensorrt/WORKSPACE &&\
108
- pip install -r /opt/torch_tensorrt/py/requirements.txt &&\
109
- # Install all dependency wheel files and user-specified TensorRT
110
- pip install *.whl &&\
111
- pip install tensorrt==${TENSORRT_VERSION}.* &&\
112
- # Add the Torch-TensorRT wheel file to the dist directory and delete all other .whl files
113
- rm -fr /workspace/torch_tensorrt/dist/* &&\
114
- mkdir -p /opt/torch_tensorrt/dist/ && mv torch_tensorrt*.whl /opt/torch_tensorrt/dist/ &&\
115
- rm -fr *.whl &&\
116
- # Remove other cache files if present
117
- pip cache purge && rm -rf /opt/torch_tensorrt/.mypy_cache
118
-
119
- WORKDIR /opt/torch_tensorrt
120
-
121
- ENV LD_LIBRARY_PATH /opt/python3/site-packages/torch/lib:/opt/python3/site-packages/torch_tensorrt/lib:/usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}
122
- ENV PATH /opt/python3/site-packages/torch_tensorrt/bin:${PATH}
39
+ # RUN wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin
40
+ # RUN mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600
41
+ # RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/7fa2af80.pub
42
+ # RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys 536F8F1DE80F6A35
43
+ # RUN apt-key adv --keyserver keyserver.ubuntu.com --recv-keys A4B469963BF863CC
44
+ # RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
45
+ # RUN apt-get update
46
+ # RUN apt-get install -y libcudnn8=${CUDNN_VERSION}* libcudnn8-dev=${CUDNN_VERSION}*
47
+
48
+ # RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
49
+ # RUN add-apt-repository "deb https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/ /"
50
+ # RUN apt-get update
51
+
52
+ # RUN apt-get install -y libnvinfer8=${TENSORRT_VERSION}.* libnvinfer-plugin8=${TENSORRT_VERSION}.* libnvinfer-dev=${TENSORRT_VERSION}.* libnvinfer-plugin-dev=${TENSORRT_VERSION}.* libnvonnxparsers8=${TENSORRT_VERSION}.* libnvonnxparsers-dev=${TENSORRT_VERSION}.* libnvparsers8=${TENSORRT_VERSION}.* libnvparsers-dev=${TENSORRT_VERSION}.*
53
+
54
+ # # Setup Bazel via Bazelisk
55
+ # RUN wget -q https://github.com/bazelbuild/bazelisk/releases/download/v1.17.0/bazelisk-linux-amd64 -O /usr/bin/bazel &&\
56
+ # chmod a+x /usr/bin/bazel
57
+
58
+ # # Build Torch-TensorRT in an auxillary container
59
+ # FROM base as torch-tensorrt-builder-base
60
+
61
+ # ARG ARCH="x86_64"
62
+ # ARG TARGETARCH="amd64"
63
+
64
+ # RUN apt-get update
65
+ # RUN apt-get install -y python3-setuptools
66
+ # RUN apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/3bf863cc.pub
67
+
68
+ # RUN apt-get update &&\
69
+ # apt-get install -y --no-install-recommends locales ninja-build &&\
70
+ # rm -rf /var/lib/apt/lists/* &&\
71
+ # locale-gen en_US.UTF-8
72
+
73
+ # FROM torch-tensorrt-builder-base as torch-tensorrt-builder
74
+
75
+ # COPY . /workspace/torch_tensorrt/src
76
+ # WORKDIR /workspace/torch_tensorrt/src
77
+ # RUN cp ./docker/WORKSPACE.docker WORKSPACE
78
+
79
+ # # Symlink the path pyenv is using for python with the /opt directory for package sourcing
80
+ # RUN mkdir -p "/opt/python3/" &&\
81
+ # ln -s "`pyenv which python | xargs dirname | xargs dirname`/lib/python$PYTHON_VERSION/site-packages" "/opt/python3/"
82
+
83
+ # # Extract base image cuda version (everything after :, before -, before final ., in BASE_IMG)
84
+ # # Ensure the default cuda folder agrees with the version in the base image
85
+ # RUN CUDA_BASE_IMG_VERSION_INTERMEDIATE=`echo ${BASE_IMG#*:}` &&\
86
+ # CUDA_BASE_IMG_VERSION=`echo ${CUDA_BASE_IMG_VERSION_INTERMEDIATE%%-*}` &&\
87
+ # CUDA_MAJOR_MINOR_VERSION=`echo ${CUDA_BASE_IMG_VERSION%.*}` &&\
88
+ # rm -fr /usr/local/cuda &&\
89
+ # ln -s /usr/local/cuda-${CUDA_MAJOR_MINOR_VERSION} /usr/local/cuda
90
+
91
+ # ENV CUDA_HOME=/usr/local/cuda
92
+
93
+ # # This script builds both libtorchtrt bin/lib/include tarball and the Python wheel, in dist/
94
+ # RUN bash ./docker/dist-build.sh
95
+
96
+ # # Copy and install Torch-TRT into the main container
97
+ # FROM base as torch-tensorrt
98
+
99
+ # COPY . /opt/torch_tensorrt
100
+
101
+ # # Symlink the path pyenv is using for python with the /opt directory for package sourcing
102
+ # RUN mkdir -p "/opt/python3/" &&\
103
+ # ln -s "`pyenv which python | xargs dirname | xargs dirname`/lib/python$PYTHON_VERSION/site-packages" "/opt/python3/"
104
+
105
+ # COPY --from=torch-tensorrt-builder /workspace/torch_tensorrt/src/dist/ .
106
+
107
+ # RUN cp /opt/torch_tensorrt/docker/WORKSPACE.docker /opt/torch_tensorrt/WORKSPACE &&\
108
+ # pip install -r /opt/torch_tensorrt/py/requirements.txt &&\
109
+ # # Install all dependency wheel files and user-specified TensorRT
110
+ # pip install *.whl &&\
111
+ # pip install tensorrt==${TENSORRT_VERSION}.* &&\
112
+ # # Add the Torch-TensorRT wheel file to the dist directory and delete all other .whl files
113
+ # rm -fr /workspace/torch_tensorrt/dist/* &&\
114
+ # mkdir -p /opt/torch_tensorrt/dist/ && mv torch_tensorrt*.whl /opt/torch_tensorrt/dist/ &&\
115
+ # rm -fr *.whl &&\
116
+ # # Remove other cache files if present
117
+ # pip cache purge && rm -rf /opt/torch_tensorrt/.mypy_cache
118
+
119
+ # WORKDIR /opt/torch_tensorrt
120
+
121
+ # ENV LD_LIBRARY_PATH /opt/python3/site-packages/torch/lib:/opt/python3/site-packages/torch_tensorrt/lib:/usr/lib/x86_64-linux-gnu:${LD_LIBRARY_PATH}
122
+ # ENV PATH /opt/python3/site-packages/torch_tensorrt/bin:${PATH}
123
123
124
124
CMD /bin/bash
0 commit comments