Skip to content

Commit c3d64a0

Browse files
committed
py : try to fix flake stuff
1 parent fe25223 commit c3d64a0

File tree

1 file changed

+3
-3
lines changed

1 file changed

+3
-3
lines changed

convert-hf-to-gguf.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525

2626
# check for any of the given keys in the dictionary and return the value of the first key found
2727
def get_key_opts(d, keys):
28-
vals = []
2928
for k in keys:
3029
if k in d:
3130
return d[k]
@@ -267,7 +266,6 @@ def _set_vocab_gpt2(self):
267266
toktypes.append(gguf.TokenType.USER_DEFINED)
268267
elif reverse_vocab[i] in added_vocab:
269268
tokens.append(reverse_vocab[i])
270-
# check if tokenizer has added_tokens_decoder
271269
if hasattr(tokenizer, "added_tokens_decoder"):
272270
if tokenizer.added_tokens_decoder[i].special:
273271
toktypes.append(gguf.TokenType.CONTROL)
@@ -1092,7 +1090,9 @@ def set_gguf_parameters(self):
10921090
self.gguf_writer.add_head_count_kv(get_key_opts(self.hparams, ["n_head", "num_attention_heads"]))
10931091
self.gguf_writer.add_layer_norm_eps(get_key_opts(self.hparams, ["layer_norm_epsilon", "layer_norm_eps"]))
10941092
self.gguf_writer.add_rope_dimension_count(
1095-
int(get_key_opts(self.hparams, ["partial_rotary_factor"]) * get_key_opts(self.hparams, ["n_embd", "hidden_size"])) // get_key_opts(self.hparams, ["n_head", "num_attention_heads"]))
1093+
int(get_key_opts(self.hparams, ["partial_rotary_factor"]) *
1094+
get_key_opts(self.hparams, ["n_embd", "hidden_size"])) //
1095+
get_key_opts(self.hparams, ["n_head", "num_attention_heads"]))
10961096
self.gguf_writer.add_file_type(self.ftype)
10971097
self.gguf_writer.add_add_bos_token(False)
10981098

0 commit comments

Comments
 (0)