@@ -509,15 +509,15 @@ struct llama_server_context
509
509
bool load_model (const common_params ¶ms_)
510
510
{
511
511
params = params_;
512
- if (!params.mmproj .empty ()) {
512
+ if (!params.mmproj .path . empty ()) {
513
513
multimodal = true ;
514
514
LOG_INFO (" Multi Modal Mode Enabled" , {});
515
- clp_ctx = clip_init (params.mmproj .c_str (), clip_context_params {
515
+ clp_ctx = clip_init (params.mmproj .path . c_str (), clip_context_params {
516
516
/* use_gpu */ has_gpu,
517
517
/* verbosity=*/ 1 ,
518
518
});
519
519
if (clp_ctx == nullptr ) {
520
- LOG_ERR (" unable to load clip model: %s" , params.mmproj .c_str ());
520
+ LOG_ERR (" unable to load clip model: %s" , params.mmproj .path . c_str ());
521
521
return false ;
522
522
}
523
523
@@ -531,7 +531,7 @@ struct llama_server_context
531
531
ctx = common_init.context .release ();
532
532
if (model == nullptr )
533
533
{
534
- LOG_ERR (" unable to load model: %s" , params.model .c_str ());
534
+ LOG_ERR (" unable to load model: %s" , params.model .path . c_str ());
535
535
return false ;
536
536
}
537
537
@@ -2326,11 +2326,11 @@ static void params_parse(const backend::ModelOptions* request,
2326
2326
2327
2327
// this is comparable to: https://github.com/ggerganov/llama.cpp/blob/d9b33fe95bd257b36c84ee5769cc048230067d6f/examples/server/server.cpp#L1809
2328
2328
2329
- params.model = request->modelfile ();
2329
+ params.model . path = request->modelfile ();
2330
2330
if (!request->mmproj ().empty ()) {
2331
2331
// get the directory of modelfile
2332
- std::string model_dir = params.model .substr (0 , params.model .find_last_of (" /\\ " ));
2333
- params.mmproj = model_dir + " /" + request->mmproj ();
2332
+ std::string model_dir = params.model .path . substr (0 , params.model . path .find_last_of (" /\\ " ));
2333
+ params.mmproj . path = model_dir + " /" + request->mmproj ();
2334
2334
}
2335
2335
// params.model_alias ??
2336
2336
params.model_alias = request->modelfile ();
@@ -2405,7 +2405,7 @@ static void params_parse(const backend::ModelOptions* request,
2405
2405
scale_factor = request->lorascale ();
2406
2406
}
2407
2407
// get the directory of modelfile
2408
- std::string model_dir = params.model .substr (0 , params.model .find_last_of (" /\\ " ));
2408
+ std::string model_dir = params.model .path . substr (0 , params.model . path .find_last_of (" /\\ " ));
2409
2409
params.lora_adapters .push_back ({ model_dir + " /" +request->loraadapter (), scale_factor });
2410
2410
}
2411
2411
params.use_mlock = request->mlock ();
0 commit comments