@@ -64,34 +64,34 @@ def transpose_and_reshape(x, shape):
64
64
65
65
## Query
66
66
loader .port_weight (
67
- keras_variable = decoder_layer ._self_attention_layer ._query_dense .kernel ,
67
+ keras_variable = decoder_layer ._self_attention_layer .query_dense .kernel ,
68
68
hf_weight_key = f"model.layers.{ i } .self_attn.q_proj.weight" ,
69
69
hook_fn = transpose_and_reshape ,
70
70
)
71
71
loader .port_weight (
72
- keras_variable = decoder_layer ._self_attention_layer ._query_dense .bias ,
72
+ keras_variable = decoder_layer ._self_attention_layer .query_dense .bias ,
73
73
hf_weight_key = f"model.layers.{ i } .self_attn.q_proj.bias" ,
74
74
hook_fn = transpose_and_reshape ,
75
75
)
76
76
## Key
77
77
loader .port_weight (
78
- keras_variable = decoder_layer ._self_attention_layer ._key_dense .kernel ,
78
+ keras_variable = decoder_layer ._self_attention_layer .key_dense .kernel ,
79
79
hf_weight_key = f"model.layers.{ i } .self_attn.k_proj.weight" ,
80
80
hook_fn = transpose_and_reshape ,
81
81
)
82
82
loader .port_weight (
83
- keras_variable = decoder_layer ._self_attention_layer ._key_dense .bias ,
83
+ keras_variable = decoder_layer ._self_attention_layer .key_dense .bias ,
84
84
hf_weight_key = f"model.layers.{ i } .self_attn.k_proj.bias" ,
85
85
hook_fn = transpose_and_reshape ,
86
86
)
87
87
## Value
88
88
loader .port_weight (
89
- keras_variable = decoder_layer ._self_attention_layer ._value_dense .kernel ,
89
+ keras_variable = decoder_layer ._self_attention_layer .value_dense .kernel ,
90
90
hf_weight_key = f"model.layers.{ i } .self_attn.v_proj.weight" ,
91
91
hook_fn = transpose_and_reshape ,
92
92
)
93
93
loader .port_weight (
94
- keras_variable = decoder_layer ._self_attention_layer ._value_dense .bias ,
94
+ keras_variable = decoder_layer ._self_attention_layer .value_dense .bias ,
95
95
hf_weight_key = f"model.layers.{ i } .self_attn.v_proj.bias" ,
96
96
hook_fn = transpose_and_reshape ,
97
97
)
0 commit comments