Skip to content
This repository was archived by the owner on Jul 4, 2025. It is now read-only.

Commit 30d51d2

Browse files
authored
Merge pull request #329 from janhq/327-feat-turn-on-log-collection
feat: add manually specified log folder
2 parents d7d75c1 + 2d47e4f commit 30d51d2

File tree

2 files changed

+7
-2
lines changed

2 files changed

+7
-2
lines changed

controllers/llamaCPP.cc

Lines changed: 6 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
#include "llamaCPP.h"
22
#include "llama.h"
3+
#include "log.h"
34
#include "utils/nitro_utils.h"
45

56
using namespace inferences;
@@ -441,7 +442,6 @@ bool llamaCPP::loadModelImpl(const Json::Value &jsonBody) {
441442
jsonBody.get("cpu_threads", std::thread::hardware_concurrency())
442443
.asInt();
443444
params.cont_batching = jsonBody.get("cont_batching", false).asBool();
444-
445445
this->clean_cache_threshold =
446446
jsonBody.get("clean_cache_threshold", 5).asInt();
447447
this->caching_enabled = jsonBody.get("caching_enabled", false).asBool();
@@ -451,6 +451,11 @@ bool llamaCPP::loadModelImpl(const Json::Value &jsonBody) {
451451
jsonBody.get("system_prompt", "ASSISTANT's RULE: ").asString();
452452
this->pre_prompt = jsonBody.get("pre_prompt", "").asString();
453453
this->repeat_last_n = jsonBody.get("repeat_last_n", 32).asInt();
454+
455+
// Set folder for llama log
456+
std::string llama_log_folder =
457+
jsonBody.get("llama_log_folder", "log/").asString();
458+
log_set_target(llama_log_folder + "llama.log");
454459
}
455460
#ifdef GGML_USE_CUBLAS
456461
LOG_INFO << "Setting up GGML CUBLAS PARAMS";

controllers/llamaCPP.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2486,7 +2486,7 @@ class llamaCPP : public drogon::HttpController<llamaCPP> {
24862486
public:
24872487
llamaCPP() {
24882488
// Some default values for now below
2489-
// log_disable(); // Disable the log to file feature, reduce bloat for
2489+
log_enable(); // Disable the log to file feature, reduce bloat for
24902490
// target
24912491
// system ()
24922492
std::vector<std::string> llama_models =

0 commit comments

Comments
 (0)