Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit d830ad3

Browse files
authored
Merge pull request #390 from janhq/389-feat-refactor-some-repetitions
refactor: remove the repetition
2 parents f33d6f4 + 055d003 commit d830ad3

File tree

1 file changed

+18
-19
lines changed

1 file changed

+18
-19
lines changed

controllers/llamaCPP.cc

Lines changed: 18 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,20 @@ std::shared_ptr<inferenceState> create_inference_state(llamaCPP *instance) {
2121

2222
// --------------------------------------------
2323

24+
// Function to check if the model is loaded
25+
void check_model_loaded(llama_server_context &llama, const HttpRequestPtr &req,
26+
std::function<void(const HttpResponsePtr &)> &callback) {
27+
if (!llama.model_loaded_external) {
28+
Json::Value jsonResp;
29+
jsonResp["message"] =
30+
"Model has not been loaded, please load model into nitro";
31+
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
32+
resp->setStatusCode(drogon::k409Conflict);
33+
callback(resp);
34+
return;
35+
}
36+
}
37+
2438
Json::Value create_embedding_payload(const std::vector<float> &embedding,
2539
int prompt_tokens) {
2640
Json::Value dataItem;
@@ -136,15 +150,8 @@ void llamaCPP::chatCompletion(
136150
const HttpRequestPtr &req,
137151
std::function<void(const HttpResponsePtr &)> &&callback) {
138152

139-
if (!llama.model_loaded_external) {
140-
Json::Value jsonResp;
141-
jsonResp["message"] =
142-
"Model has not been loaded, please load model into nitro";
143-
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
144-
resp->setStatusCode(drogon::k409Conflict);
145-
callback(resp);
146-
return;
147-
}
153+
// Check if model is loaded
154+
check_model_loaded(llama, req, callback);
148155

149156
const auto &jsonBody = req->getJsonObject();
150157
std::string formatted_output = pre_prompt;
@@ -402,15 +409,7 @@ void llamaCPP::chatCompletion(
402409
void llamaCPP::embedding(
403410
const HttpRequestPtr &req,
404411
std::function<void(const HttpResponsePtr &)> &&callback) {
405-
if (!llama.model_loaded_external) {
406-
Json::Value jsonResp;
407-
jsonResp["message"] =
408-
"Model has not been loaded, please load model into nitro";
409-
auto resp = nitro_utils::nitroHttpJsonResponse(jsonResp);
410-
resp->setStatusCode(drogon::k409Conflict);
411-
callback(resp);
412-
return;
413-
}
412+
check_model_loaded(llama, req, callback);
414413

415414
const auto &jsonBody = req->getJsonObject();
416415

@@ -623,4 +622,4 @@ void llamaCPP::stopBackgroundTask() {
623622
backgroundThread.join();
624623
}
625624
}
626-
}
625+
}

0 commit comments

Comments
 (0)