@@ -215,7 +215,7 @@ enum llm_arch {
215
215
LLM_ARCH_UNKNOWN,
216
216
};
217
217
218
- static std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
218
+ static const std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
219
219
{ LLM_ARCH_LLAMA, "llama" },
220
220
{ LLM_ARCH_FALCON, "falcon" },
221
221
{ LLM_ARCH_GPT2, "gpt2" },
@@ -240,6 +240,7 @@ static std::map<llm_arch, const char *> LLM_ARCH_NAMES = {
240
240
{ LLM_ARCH_MINICPM, "minicpm" },
241
241
{ LLM_ARCH_GEMMA, "gemma" },
242
242
{ LLM_ARCH_STARCODER2, "starcoder2" },
243
+ { LLM_ARCH_UNKNOWN, "(unknown)" },
243
244
};
244
245
245
246
enum llm_kv {
@@ -300,7 +301,7 @@ enum llm_kv {
300
301
LLM_KV_TOKENIZER_RWKV,
301
302
};
302
303
303
- static std::map<llm_kv, const char *> LLM_KV_NAMES = {
304
+ static const std::map<llm_kv, const char *> LLM_KV_NAMES = {
304
305
{ LLM_KV_GENERAL_ARCHITECTURE, "general.architecture" },
305
306
{ LLM_KV_GENERAL_QUANTIZATION_VERSION, "general.quantization_version" },
306
307
{ LLM_KV_GENERAL_ALIGNMENT, "general.alignment" },
@@ -364,7 +365,7 @@ struct LLM_KV {
364
365
llm_arch arch;
365
366
366
367
std::string operator()(llm_kv kv) const {
367
- return ::format(LLM_KV_NAMES[kv] , LLM_ARCH_NAMES[ arch] );
368
+ return ::format(LLM_KV_NAMES.at(kv) , LLM_ARCH_NAMES.at( arch) );
368
369
}
369
370
};
370
371
@@ -399,7 +400,7 @@ enum llm_tensor {
399
400
LLM_TENSOR_LAYER_OUT_NORM,
400
401
};
401
402
402
- static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
403
+ static const std::map<llm_arch, const std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES = {
403
404
{
404
405
LLM_ARCH_LLAMA,
405
406
{
@@ -810,13 +811,6 @@ static std::map<llm_arch, std::map<llm_tensor, std::string>> LLM_TENSOR_NAMES =
810
811
static llm_arch llm_arch_from_string(const std::string & name) {
811
812
for (const auto & kv : LLM_ARCH_NAMES) { // NOLINT
812
813
if (kv.second == nullptr) {
813
- // LLM_ARCH_UNKNOWN does not have a name,
814
- // but is somehow still in the LLM_ARCH_NAMES map.
815
- if (kv.first == LLM_ARCH_UNKNOWN) {
816
- // skip string comparison
817
- continue;
818
- }
819
- // known architectures should always have a name
820
814
GGML_ASSERT(false && "missing architecture in LLM_ARCH_NAMES");
821
815
}
822
816
if (kv.second == name) {
@@ -842,46 +836,46 @@ struct LLM_TN {
842
836
llm_arch arch;
843
837
844
838
std::string operator()(llm_tensor tensor) const {
845
- if (LLM_TENSOR_NAMES[ arch] .find(tensor) == LLM_TENSOR_NAMES[ arch] .end()) {
839
+ if (LLM_TENSOR_NAMES.at( arch) .find(tensor) == LLM_TENSOR_NAMES.at( arch) .end()) {
846
840
return "__missing__";
847
841
}
848
- return LLM_TENSOR_NAMES[ arch] .at(tensor);
842
+ return LLM_TENSOR_NAMES.at( arch) .at(tensor);
849
843
}
850
844
851
845
std::string operator()(llm_tensor tensor, const std::string & suffix) const {
852
- if (LLM_TENSOR_NAMES[ arch] .find(tensor) == LLM_TENSOR_NAMES[ arch] .end()) {
846
+ if (LLM_TENSOR_NAMES.at( arch) .find(tensor) == LLM_TENSOR_NAMES.at( arch) .end()) {
853
847
return "__missing__";
854
848
}
855
- return LLM_TENSOR_NAMES[ arch] .at(tensor) + "." + suffix;
849
+ return LLM_TENSOR_NAMES.at( arch) .at(tensor) + "." + suffix;
856
850
}
857
851
858
852
std::string operator()(llm_tensor tensor, int bid) const {
859
- if (LLM_TENSOR_NAMES[ arch] .find(tensor) == LLM_TENSOR_NAMES[ arch] .end()) {
853
+ if (LLM_TENSOR_NAMES.at( arch) .find(tensor) == LLM_TENSOR_NAMES.at( arch) .end()) {
860
854
return "__missing__";
861
855
}
862
- return ::format(LLM_TENSOR_NAMES[ arch] .at(tensor).c_str(), bid);
856
+ return ::format(LLM_TENSOR_NAMES.at( arch) .at(tensor).c_str(), bid);
863
857
}
864
858
865
859
std::string operator()(llm_tensor tensor, const std::string & suffix, int bid) const {
866
- if (LLM_TENSOR_NAMES[ arch] .find(tensor) == LLM_TENSOR_NAMES[ arch] .end()) {
860
+ if (LLM_TENSOR_NAMES.at( arch) .find(tensor) == LLM_TENSOR_NAMES.at( arch) .end()) {
867
861
return "__missing__";
868
862
}
869
- return ::format(LLM_TENSOR_NAMES[ arch] .at(tensor).c_str(), bid) + "." + suffix;
863
+ return ::format(LLM_TENSOR_NAMES.at( arch) .at(tensor).c_str(), bid) + "." + suffix;
870
864
}
871
865
872
866
std::string operator()(llm_tensor tensor, const std::string & suffix, int bid, int xid) const {
873
- if (LLM_TENSOR_NAMES[ arch] .find(tensor) == LLM_TENSOR_NAMES[ arch] .end()) {
867
+ if (LLM_TENSOR_NAMES.at( arch) .find(tensor) == LLM_TENSOR_NAMES.at( arch) .end()) {
874
868
return "__missing__";
875
869
}
876
- return ::format(LLM_TENSOR_NAMES[ arch] .at(tensor).c_str(), bid, xid) + "." + suffix;
870
+ return ::format(LLM_TENSOR_NAMES.at( arch) .at(tensor).c_str(), bid, xid) + "." + suffix;
877
871
}
878
872
};
879
873
880
874
//
881
875
// gguf helpers
882
876
//
883
877
884
- static std::map<int32_t, const char *> LLAMA_ROPE_SCALING_TYPES = {
878
+ static const std::map<int32_t, const char *> LLAMA_ROPE_SCALING_TYPES = {
885
879
{ LLAMA_ROPE_SCALING_TYPE_NONE, "none" },
886
880
{ LLAMA_ROPE_SCALING_TYPE_LINEAR, "linear" },
887
881
{ LLAMA_ROPE_SCALING_TYPE_YARN, "yarn" },
0 commit comments