We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
1 parent 12568c7 commit 2bf140cCopy full SHA for 2bf140c
backend/cpp/llama/grpc-server.cpp
@@ -511,7 +511,10 @@ struct llama_server_context
511
if (!params.mmproj.empty()) {
512
multimodal = true;
513
LOG_INFO("Multi Modal Mode Enabled", {});
514
- clp_ctx = clip_model_load(params.mmproj.c_str(), /*verbosity=*/ 1);
+ clp_ctx = clip_init(params.mmproj.c_str(), clip_context_params {
515
+ /* use_gpu */ false,
516
+ /*verbosity=*/ 1,
517
+ });
518
if(clp_ctx == nullptr) {
519
LOG_ERR("unable to load clip model: %s", params.mmproj.c_str());
520
return false;
0 commit comments