This repository was archived by the owner on Jul 4, 2025. It is now read-only.
File tree Expand file tree Collapse file tree 2 files changed +7
-2
lines changed Expand file tree Collapse file tree 2 files changed +7
-2
lines changed Original file line number Diff line number Diff line change 11#include " llamaCPP.h"
22#include " llama.h"
3+ #include " log.h"
34#include " utils/nitro_utils.h"
45
56using namespace inferences ;
@@ -441,7 +442,6 @@ bool llamaCPP::loadModelImpl(const Json::Value &jsonBody) {
441442 jsonBody.get (" cpu_threads" , std::thread::hardware_concurrency ())
442443 .asInt ();
443444 params.cont_batching = jsonBody.get (" cont_batching" , false ).asBool ();
444-
445445 this ->clean_cache_threshold =
446446 jsonBody.get (" clean_cache_threshold" , 5 ).asInt ();
447447 this ->caching_enabled = jsonBody.get (" caching_enabled" , false ).asBool ();
@@ -451,6 +451,11 @@ bool llamaCPP::loadModelImpl(const Json::Value &jsonBody) {
451451 jsonBody.get (" system_prompt" , " ASSISTANT's RULE: " ).asString ();
452452 this ->pre_prompt = jsonBody.get (" pre_prompt" , " " ).asString ();
453453 this ->repeat_last_n = jsonBody.get (" repeat_last_n" , 32 ).asInt ();
454+
455+ // Set folder for llama log
456+ std::string llama_log_folder =
457+ jsonBody.get (" llama_log_folder" , " log/" ).asString ();
458+ log_set_target (llama_log_folder + " llama.log" );
454459 }
455460#ifdef GGML_USE_CUBLAS
456461 LOG_INFO << " Setting up GGML CUBLAS PARAMS" ;
Original file line number Diff line number Diff line change @@ -2486,7 +2486,7 @@ class llamaCPP : public drogon::HttpController<llamaCPP> {
24862486public:
24872487 llamaCPP () {
24882488 // Some default values for now below
2489- // log_disable (); // Disable the log to file feature, reduce bloat for
2489+ log_enable (); // Disable the log to file feature, reduce bloat for
24902490 // target
24912491 // system ()
24922492 std::vector<std::string> llama_models =
You can’t perform that action at this time.
0 commit comments