Skip to content

Commit 57d016b

Browse files
authored
llama : add additional suffixes for model params (#4834)
* llm_load_print_meta: Add additional suffixs for model params * Update llama.cpp model param log remove unneeded comments and convert from > to >=
1 parent 329ff61 commit 57d016b

File tree

1 file changed

+9
-1
lines changed

1 file changed

+9
-1
lines changed

llama.cpp

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3146,7 +3146,15 @@ static void llm_load_print_meta(llama_model_loader & ml, llama_model & model) {
31463146
LLAMA_LOG_INFO("%s: rope_finetuned = %s\n", __func__, hparams.rope_finetuned ? "yes" : "unknown");
31473147
LLAMA_LOG_INFO("%s: model type = %s\n", __func__, llama_model_type_name(model.type));
31483148
LLAMA_LOG_INFO("%s: model ftype = %s\n", __func__, llama_model_ftype_name(model.ftype).c_str());
3149-
LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
3149+
if (ml.n_elements >= 1e12) {
3150+
LLAMA_LOG_INFO("%s: model params = %.2f T\n", __func__, ml.n_elements*1e-12);
3151+
} else if (ml.n_elements >= 1e9) {
3152+
LLAMA_LOG_INFO("%s: model params = %.2f B\n", __func__, ml.n_elements*1e-9);
3153+
} else if (ml.n_elements >= 1e6) {
3154+
LLAMA_LOG_INFO("%s: model params = %.2f M\n", __func__, ml.n_elements*1e-6);
3155+
} else {
3156+
LLAMA_LOG_INFO("%s: model params = %.2f K\n", __func__, ml.n_elements*1e-3);
3157+
}
31503158
if (ml.n_bytes < GiB) {
31513159
LLAMA_LOG_INFO("%s: model size = %.2f MiB (%.2f BPW) \n", __func__, ml.n_bytes/1024.0/1024.0, ml.n_bytes*8.0/ml.n_elements);
31523160
} else {

0 commit comments

Comments
 (0)