Skip to content

Commit 5377e43

Browse files
committed
fix: Update logging scheme for converters
- Add debug statements for converter registrations in both FX and Dynamo - Print operator counts along with registry support message
1 parent cb6c965 commit 5377e43

File tree

5 files changed

+37
-15
lines changed

5 files changed

+37
-15
lines changed

py/torch_tensorrt/dynamo/__init__.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -3,9 +3,9 @@
33

44
if version.parse(sanitized_torch_version()) >= version.parse("2.1.dev"):
55
from ._settings import *
6-
from .compile import compile
76
from .aten_tracer import trace
87
from .converter_registry import (
98
DYNAMO_CONVERTERS,
109
dynamo_tensorrt_converter,
1110
)
11+
from .compile import compile

py/torch_tensorrt/dynamo/converter_registry.py

+8
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import logging
12
from dataclasses import dataclass, field
23
from typing import Any, Callable, Dict, Optional, Sequence, Union
34
from enum import Enum, auto
@@ -6,6 +7,9 @@
67
from torch_tensorrt.fx.converter_registry import CONVERTERS
78

89

10+
logger = logging.getLogger(__name__)
11+
12+
913
class ConverterPriority(Enum):
1014
"""Enum to set a converter's priority in the registry"""
1115

@@ -85,6 +89,10 @@ def register_converter(converter):
8589
else:
8690
DYNAMO_ATEN_CONVERTERS[key] = [converter_support]
8791

92+
logger.debug(
93+
f"Converter for {key} added to Dynamo ATen Converter Registry with priority: {priority}"
94+
)
95+
8896
return converter
8997

9098
def disable_converter(converter):

py/torch_tensorrt/dynamo/lowering/_partition.py

+17-14
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,5 @@
11
import logging
2-
from typing import Callable, Dict, List, Optional, Sequence, Set
2+
from typing import Dict, List, Optional, Sequence, Set
33

44
import torch
55

@@ -55,10 +55,6 @@ def __init__(
5555
)
5656

5757
self.min_block_size = min_block_size
58-
logger.debug(
59-
"Initialized Capability-Based Partitioner with available Converters:\n"
60-
+ f"{CONVERTERS.display_all_available_converters()}"
61-
)
6258

6359
def propose_partitions(self) -> List[Partition]:
6460
# Propose partitions using the default, then refine the results
@@ -114,8 +110,8 @@ def __init__(self, support_dict=None, torch_executed_ops=set()):
114110
super().__init__(support_dict)
115111

116112
# Initialize sets of supported/unsupported operators
117-
self.supported_operators = set()
118-
self.unsupported_operators = set()
113+
self.supported_operators = {}
114+
self.unsupported_operators = {}
119115
self.torch_executed_ops = torch_executed_ops
120116

121117
def is_node_supported(
@@ -130,12 +126,18 @@ def is_node_supported(
130126
if node in CONVERTERS and node_name not in self.torch_executed_ops:
131127
# If node is a proper, supported computational node, store the operator
132128
if not node.is_impure():
133-
self.supported_operators.add(node_name)
129+
if node_name not in self.supported_operators:
130+
self.supported_operators[node_name] = 1
131+
else:
132+
self.supported_operators[node_name] += 1
134133

135134
return True
136135
else:
137136
if not node.is_impure():
138-
self.unsupported_operators.add(node_name)
137+
if node_name not in self.unsupported_operators:
138+
self.unsupported_operators[node_name] = 1
139+
else:
140+
self.unsupported_operators[node_name] += 1
139141

140142
return False
141143

@@ -147,15 +149,16 @@ def print_support_overview(self, num_trt_blocks: Optional[int] = None):
147149

148150
# Reformat support messages for debugger to print node overview as a single string
149151
supported_nodes_str = "\nSupported Nodes:\n"
150-
for node_name in self.supported_operators:
151-
supported_nodes_str += f"- {node_name}\n"
152+
for node_name, count in self.supported_operators.items():
153+
supported_nodes_str += f"- {node_name} + Operator Count: {count}\n"
152154

153155
logger.debug(supported_nodes_str)
154156

155-
if len(self.unsupported_operators) != 0:
157+
if self.unsupported_operators:
156158
unsupported_nodes_str = "\nUnsupported or Excluded Nodes:\n"
157-
for node_name in self.unsupported_operators:
158-
unsupported_nodes_str += f"- {node_name}\n"
159+
for node_name, count in self.unsupported_operators.items():
160+
unsupported_nodes_str += f"- {node_name} + Operator Count: {count}\n"
161+
159162
logger.debug(unsupported_nodes_str)
160163
else:
161164
logger.debug("\nAll Nodes Supported\n")

py/torch_tensorrt/fx/converter_registry.py

+11
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
1+
import logging
12
from typing import Any, Callable, Dict
23

34
from torch.fx.node import Target
@@ -7,6 +8,9 @@
78
NO_EXPLICIT_BATCH_DIM_SUPPORT = {}
89

910

11+
logger = logging.getLogger(__name__)
12+
13+
1014
def tensorrt_converter(
1115
key: Target,
1216
no_implicit_batch_dim: bool = False,
@@ -19,6 +23,13 @@ def register_converter(converter):
1923
NO_IMPLICIT_BATCH_DIM_SUPPORT[key] = converter
2024
if no_explicit_batch_dim:
2125
NO_EXPLICIT_BATCH_DIM_SUPPORT[key] = converter
26+
27+
logger.debug(
28+
f"Converter for {key} added to FX Converter Registry "
29+
+ f"{'without' if no_explicit_batch_dim else 'with'} Explicit Batch Dim Support + "
30+
+ f"{'without' if no_implicit_batch_dim else 'with'} Implicit Batch Dim Support"
31+
)
32+
2233
return converter
2334

2435
def disable_converter(converter):

0 commit comments

Comments
 (0)