|
| 1 | +from typing import Dict, Tuple |
| 2 | +import torch |
| 3 | +from torch._custom_op.impl import custom_op |
| 4 | +from torch.fx.node import Argument, Target |
| 5 | + |
| 6 | +from torch_tensorrt.fx.converter_registry import tensorrt_converter |
| 7 | +from torch_tensorrt.fx.converters import acc_ops_converters |
| 8 | +from torch_tensorrt.fx.types import TRTNetwork, TRTTensor |
| 9 | + |
| 10 | +from torch_tensorrt.dynamo.backend.lowering import module_substitution |
| 11 | + |
| 12 | + |
| 13 | +# This file serves as an example and a tutorial for excluding custom modules from |
| 14 | +# torch.compile tracing. Each required step is labeled with a number indicating the |
| 15 | +# preferable implementation order. |
| 16 | + |
| 17 | + |
| 18 | +# 1. The Placeholder |
| 19 | +# |
| 20 | +# Specify the schema and namespace of the operator, as well as a placeholder function |
| 21 | +# representing the schema. The schema should be in torch JIT syntax, indicating input and output |
| 22 | +# types. The namespace, such as tensorrt, will cause the op to be registered as torch.ops.tensorrt.your_op |
| 23 | +# Then, create a placeholder function with no operations, but having the same schema and naming as that |
| 24 | +# used in the decorator |
| 25 | +@custom_op( |
| 26 | + qualname="tensorrt::maxpool1d", |
| 27 | + manual_schema="(Tensor x, int[1] kernel_size, int[1] stride, int[1] padding, int[1] dilation, bool ceil_mode) -> Tensor", |
| 28 | +) |
| 29 | +def maxpool1d(x, kernel_size, stride, padding, dilation, ceil_mode): |
| 30 | + # Defines operator schema, name, namespace, and function header |
| 31 | + ... |
| 32 | + |
| 33 | + |
| 34 | +# 2. The Generic Implementation |
| 35 | +# |
| 36 | +# Define the default implementation of the operator in torch syntax. This is used for autograd |
| 37 | +# and other tracing functionality. Generally, the torch.nn.functional analog of the operator to replace |
| 38 | +# is desirable. If the operator to replace is a custom module you've written, then add its Torch |
| 39 | +# implementation here. Note that the function header to the generic function can have specific arguments |
| 40 | +# as in the above placeholder |
| 41 | +@maxpool1d.impl("cpu") |
| 42 | +@maxpool1d.impl("cuda") |
| 43 | +@maxpool1d.impl_abstract() |
| 44 | +def maxpool1d_generic( |
| 45 | + *args, |
| 46 | + **kwargs, |
| 47 | +): |
| 48 | + # Defines an implementation for AOT Autograd to use for shape analysis/propagation |
| 49 | + return torch.nn.functional.max_pool1d( |
| 50 | + *args, |
| 51 | + **kwargs, |
| 52 | + ) |
| 53 | + |
| 54 | + |
| 55 | +# 3. The Module Substitution Function |
| 56 | +# |
| 57 | +# Define a function which can intercept a node of the kind to be replaced, extract |
| 58 | +# the relevant data from that node/submodule, and then re-package the information |
| 59 | +# for use by an accelerated implementation (to be implemented in step 4). This function |
| 60 | +# should use the operator defined in step 1 (for example torch.ops.tensorrt.maxpool1d). |
| 61 | +# It should refactor the args and kwargs as is needed by the accelerated implementation. |
| 62 | +# |
| 63 | +# If the submodule has weights or other Tensor fields which the accelerated implementation |
| 64 | +# needs, the function should insert the necessary nodes to access those weights. For example, |
| 65 | +# if the weight Tensor of a submodule is needed, one could write: |
| 66 | +# |
| 67 | +# weights = gm.graph.get_attr(n.target + ".weight", torch.Tensor) |
| 68 | +# bias = gm.graph.get_attr(n.target + ".bias", torch.Tensor) |
| 69 | +# ... |
| 70 | +# kwargs={"weight": weights, |
| 71 | +# "bias": bias, |
| 72 | +# ... |
| 73 | +# |
| 74 | +@module_substitution(torch.nn.MaxPool1d, torch.ops.tensorrt.maxpool1d) |
| 75 | +def maxpool1d_insertion_fn( |
| 76 | + gm: torch.fx.GraphModule, submodule: torch.nn.Module, node: torch.fx.Node |
| 77 | +) -> torch.fx.Node: |
| 78 | + # Defines insertion function for new node |
| 79 | + new_node = gm.graph.call_function( |
| 80 | + torch.ops.tensorrt.maxpool1d, |
| 81 | + args=node.args, |
| 82 | + kwargs={ |
| 83 | + "kernel_size": submodule.kernel_size, |
| 84 | + "stride": submodule.stride, |
| 85 | + "padding": submodule.padding, |
| 86 | + "dilation": submodule.dilation, |
| 87 | + "ceil_mode": submodule.ceil_mode, |
| 88 | + }, |
| 89 | + ) |
| 90 | + |
| 91 | + return new_node |
| 92 | + |
| 93 | + |
| 94 | +# 4. The Accelerated Implementation |
| 95 | +# |
| 96 | +# Define an accelerated implementation of the operator, and register it as necessary. |
| 97 | +# This accelerated implementation should consume the args/kwargs specified in step 3. |
| 98 | +# One should expect that torch.compile will compress all kwargs into the args field in |
| 99 | +# the order specified in the schema written in step 1. |
| 100 | +@tensorrt_converter(torch.ops.tensorrt.maxpool1d.default) |
| 101 | +def tensorrt_maxpool1d( |
| 102 | + network: TRTNetwork, |
| 103 | + target: Target, |
| 104 | + args: Tuple[Argument, ...], |
| 105 | + kwargs: Dict[str, Argument], |
| 106 | + name: str, |
| 107 | +) -> TRTTensor: |
| 108 | + # Defines converter replacing the default operator for this function |
| 109 | + kwargs_new = { |
| 110 | + "input": args[0], |
| 111 | + "kernel_size": args[1], |
| 112 | + "stride": args[2], |
| 113 | + "padding": args[3], |
| 114 | + "dilation": args[4], |
| 115 | + "ceil_mode": False if len(args) < 6 else args[5], |
| 116 | + } |
| 117 | + |
| 118 | + return acc_ops_converters.acc_ops_max_pool1d( |
| 119 | + network, target, None, kwargs_new, name |
| 120 | + ) |
| 121 | + |
| 122 | + |
| 123 | +# 5. Add Imports |
| 124 | +# |
| 125 | +# Add your accelerated module file to the __init__.py in this directory, to ensure |
| 126 | +# all registrations are run. For instance, if the new module file is called new_mod.py, |
| 127 | +# one should add `from .new_mod import *` to the __init__.py |
0 commit comments