@@ -3784,7 +3784,7 @@ static size_t llama_model_max_nodes(const llama_model & /*model*/) {
3784
3784
// return 32768;
3785
3785
//}
3786
3786
3787
- return 8192 ;
3787
+ return 65536 ;
3788
3788
}
3789
3789
3790
3790
struct llama_model_loader {
@@ -8879,7 +8879,8 @@ struct llm_build_context {
8879
8879
llama_context & lctx,
8880
8880
const llama_batch & batch,
8881
8881
const llm_build_cb & cb,
8882
- bool worst_case) :
8882
+ bool worst_case,
8883
+ bool warmup) :
8883
8884
model (lctx.model),
8884
8885
lctx (lctx),
8885
8886
hparams (model.hparams),
@@ -8897,7 +8898,7 @@ struct llm_build_context {
8897
8898
n_embd_head_v (hparams.n_embd_head_v),
8898
8899
n_embd_v_gqa (hparams.n_embd_v_gqa()),
8899
8900
n_expert (hparams.n_expert),
8900
- n_expert_used (hparams.n_expert_used),
8901
+ n_expert_used (warmup ? hparams.n_expert : hparams.n_expert_used),
8901
8902
freq_base (cparams.rope_freq_base),
8902
8903
freq_scale (cparams.rope_freq_scale),
8903
8904
ext_factor (cparams.yarn_ext_factor),
@@ -14433,7 +14434,7 @@ static struct ggml_cgraph * llama_build_graph_defrag(llama_context & lctx, const
14433
14434
14434
14435
llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
14435
14436
14436
- struct llm_build_context llm(lctx, dummy, cb, false);
14437
+ struct llm_build_context llm(lctx, dummy, cb, false, false );
14437
14438
14438
14439
llm.init();
14439
14440
@@ -14450,7 +14451,7 @@ static struct ggml_cgraph * llama_build_graph_k_shift(llama_context & lctx) {
14450
14451
14451
14452
llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
14452
14453
14453
- struct llm_build_context llm(lctx, dummy, cb, false);
14454
+ struct llm_build_context llm(lctx, dummy, cb, false, false );
14454
14455
14455
14456
llm.init();
14456
14457
@@ -14467,7 +14468,7 @@ static struct ggml_cgraph * llama_build_graph_s_copy(llama_context & lctx) {
14467
14468
14468
14469
llm_build_cb cb = [&](struct ggml_tensor * , const char * , int ) { };
14469
14470
14470
- struct llm_build_context llm(lctx, dummy, cb, false);
14471
+ struct llm_build_context llm(lctx, dummy, cb, false, false );
14471
14472
14472
14473
llm.init();
14473
14474
@@ -14517,7 +14518,11 @@ static struct ggml_cgraph * llama_build_graph(
14517
14518
14518
14519
struct ggml_cgraph * result = NULL;
14519
14520
14520
- struct llm_build_context llm(lctx, batch, cb, worst_case);
14521
+ const llama_vocab * vocab = llama_get_vocab(&lctx);
14522
+ llama_token bos = llama_token_bos_impl(*vocab);
14523
+ llama_token eos = llama_token_eos_impl(*vocab);
14524
+ bool is_warming_up = (batch.n_tokens == 1 && batch.token[0] == bos);
14525
+ struct llm_build_context llm(lctx, batch, cb, worst_case, is_warming_up);
14521
14526
14522
14527
llm.init();
14523
14528
0 commit comments