|
| 1 | +""" |
| 2 | +.. _data_parallel_stable_diffusion: |
| 3 | +
|
| 4 | +Torch-TensorRT Distributed Inference |
| 5 | +====================================================== |
| 6 | +
|
| 7 | +This interactive script is intended as a sample of distributed inference using data |
| 8 | +parallelism using Accelerate |
| 9 | +library with the Torch-TensorRT workflow on Stable Diffusion model. |
| 10 | +
|
| 11 | +""" |
| 12 | + |
| 13 | +# %% |
| 14 | +# Imports and Model Definition |
| 15 | +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| 16 | +import torch |
| 17 | +from accelerate import PartialState |
| 18 | +from diffusers import DiffusionPipeline |
| 19 | + |
| 20 | +import torch_tensorrt |
| 21 | + |
| 22 | +model_id = "CompVis/stable-diffusion-v1-4" |
| 23 | + |
| 24 | +# Instantiate Stable Diffusion Pipeline with FP16 weights |
| 25 | +pipe = DiffusionPipeline.from_pretrained( |
| 26 | + model_id, revision="fp16", torch_dtype=torch.float16 |
| 27 | +) |
| 28 | + |
| 29 | +distributed_state = PartialState() |
| 30 | +pipe = pipe.to(distributed_state.device) |
| 31 | + |
| 32 | +backend = "torch_tensorrt" |
| 33 | + |
| 34 | +# Optimize the UNet portion with Torch-TensorRT |
| 35 | +pipe.unet = torch.compile( # %% |
| 36 | + # Inference |
| 37 | + # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| 38 | + # Assume there are 2 processes (2 devices) |
| 39 | + pipe.unet, |
| 40 | + backend=backend, |
| 41 | + options={ |
| 42 | + "truncate_long_and_double": True, |
| 43 | + "precision": torch.float16, |
| 44 | + "debug": True, |
| 45 | + "use_python_runtime": True, |
| 46 | + }, |
| 47 | + dynamic=False, |
| 48 | +) |
| 49 | +torch_tensorrt.runtime.set_multi_device_safe_mode(True) |
| 50 | + |
| 51 | + |
| 52 | +# %% |
| 53 | +# Inference |
| 54 | +# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ |
| 55 | + |
| 56 | +# Assume there are 2 processes (2 devices) |
| 57 | +with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt: |
| 58 | + print("before \n") |
| 59 | + result = pipe(prompt).images[0] |
| 60 | + print("after ") |
| 61 | + result.save(f"result_{distributed_state.process_index}.png") |
0 commit comments