@@ -1111,7 +1111,8 @@ def compute_hash(self) -> str:
1111
1111
factors : list [Any ] = []
1112
1112
factors .append (self .cache_dtype )
1113
1113
# `cpu_offload_gb` does not use `torch.compile` yet.
1114
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
1114
+ hash_str = hashlib .md5 (str (factors ).encode (),
1115
+ usedforsecurity = False ).hexdigest ()
1115
1116
return hash_str
1116
1117
1117
1118
def __init__ (
@@ -1243,7 +1244,8 @@ def compute_hash(self) -> str:
1243
1244
# no factors to consider.
1244
1245
# this config will not affect the computation graph.
1245
1246
factors : list [Any ] = []
1246
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
1247
+ hash_str = hashlib .md5 (str (factors ).encode (),
1248
+ usedforsecurity = False ).hexdigest ()
1247
1249
return hash_str
1248
1250
1249
1251
def __post_init__ (self ):
@@ -1354,7 +1356,8 @@ def compute_hash(self) -> str:
1354
1356
# no factors to consider.
1355
1357
# this config will not affect the computation graph.
1356
1358
factors : list [Any ] = []
1357
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
1359
+ hash_str = hashlib .md5 (str (factors ).encode (),
1360
+ usedforsecurity = False ).hexdigest ()
1358
1361
return hash_str
1359
1362
1360
1363
def __post_init__ (self ):
@@ -1674,7 +1677,8 @@ def compute_hash(self) -> str:
1674
1677
# no factors to consider.
1675
1678
# this config will not affect the computation graph.
1676
1679
factors : list [Any ] = []
1677
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
1680
+ hash_str = hashlib .md5 (str (factors ).encode (),
1681
+ usedforsecurity = False ).hexdigest ()
1678
1682
return hash_str
1679
1683
1680
1684
def __post_init__ (self ) -> None :
@@ -1810,7 +1814,8 @@ def compute_hash(self) -> str:
1810
1814
# the device/platform information will be summarized
1811
1815
# by torch/vllm automatically.
1812
1816
factors : list [Any ] = []
1813
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
1817
+ hash_str = hashlib .md5 (str (factors ).encode (),
1818
+ usedforsecurity = False ).hexdigest ()
1814
1819
return hash_str
1815
1820
1816
1821
def __init__ (self , device : str = "auto" ) -> None :
@@ -1983,7 +1988,8 @@ def compute_hash(self) -> str:
1983
1988
# no factors to consider.
1984
1989
# spec decode does not use `torch.compile` yet.
1985
1990
factors : list [Any ] = []
1986
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
1991
+ hash_str = hashlib .md5 (str (factors ).encode (),
1992
+ usedforsecurity = False ).hexdigest ()
1987
1993
return hash_str
1988
1994
1989
1995
@classmethod
@@ -2358,7 +2364,8 @@ def compute_hash(self) -> str:
2358
2364
factors .append (self .lora_extra_vocab_size )
2359
2365
factors .append (self .long_lora_scaling_factors )
2360
2366
factors .append (self .bias_enabled )
2361
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
2367
+ hash_str = hashlib .md5 (str (factors ).encode (),
2368
+ usedforsecurity = False ).hexdigest ()
2362
2369
return hash_str
2363
2370
2364
2371
def __post_init__ (self ):
@@ -2424,7 +2431,8 @@ def compute_hash(self) -> str:
2424
2431
# no factors to consider.
2425
2432
# this config will not affect the computation graph.
2426
2433
factors : list [Any ] = []
2427
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
2434
+ hash_str = hashlib .md5 (str (factors ).encode (),
2435
+ usedforsecurity = False ).hexdigest ()
2428
2436
return hash_str
2429
2437
2430
2438
def __post_init__ (self ):
@@ -2469,7 +2477,8 @@ def compute_hash(self) -> str:
2469
2477
# no factors to consider.
2470
2478
# this config will not affect the computation graph.
2471
2479
factors : list [Any ] = []
2472
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
2480
+ hash_str = hashlib .md5 (str (factors ).encode (),
2481
+ usedforsecurity = False ).hexdigest ()
2473
2482
return hash_str
2474
2483
2475
2484
def get_limit_per_prompt (self , modality : str ) -> int :
@@ -2535,7 +2544,8 @@ def compute_hash(self) -> str:
2535
2544
# no factors to consider.
2536
2545
# this config will not affect the computation graph.
2537
2546
factors : list [Any ] = []
2538
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
2547
+ hash_str = hashlib .md5 (str (factors ).encode (),
2548
+ usedforsecurity = False ).hexdigest ()
2539
2549
return hash_str
2540
2550
2541
2551
@staticmethod
@@ -2816,7 +2826,8 @@ def compute_hash(self) -> str:
2816
2826
# no factors to consider.
2817
2827
# this config will not affect the computation graph.
2818
2828
factors : list [Any ] = []
2819
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
2829
+ hash_str = hashlib .md5 (str (factors ).encode (),
2830
+ usedforsecurity = False ).hexdigest ()
2820
2831
return hash_str
2821
2832
2822
2833
def __post_init__ (self ):
@@ -2866,7 +2877,8 @@ def compute_hash(self) -> str:
2866
2877
# no factors to consider.
2867
2878
# this config will not affect the computation graph.
2868
2879
factors : list [Any ] = []
2869
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
2880
+ hash_str = hashlib .md5 (str (factors ).encode (),
2881
+ usedforsecurity = False ).hexdigest ()
2870
2882
return hash_str
2871
2883
2872
2884
def __post_init__ (self ):
@@ -2928,7 +2940,8 @@ def compute_hash(self) -> str:
2928
2940
# no factors to consider.
2929
2941
# this config will not affect the computation graph.
2930
2942
factors : list [Any ] = []
2931
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()
2943
+ hash_str = hashlib .md5 (str (factors ).encode (),
2944
+ usedforsecurity = False ).hexdigest ()
2932
2945
return hash_str
2933
2946
2934
2947
@classmethod
@@ -3425,7 +3438,8 @@ def compute_hash(self) -> str:
3425
3438
vllm_factors .append ("None" )
3426
3439
factors .append (vllm_factors )
3427
3440
3428
- hash_str = hashlib .md5 (str (factors ).encode ()).hexdigest ()[:10 ]
3441
+ hash_str = hashlib .md5 (str (factors ).encode (),
3442
+ usedforsecurity = False ).hexdigest ()[:10 ]
3429
3443
return hash_str
3430
3444
3431
3445
def pad_for_cudagraph (self , batch_size : int ) -> int :
0 commit comments