Skip to content

Commit 4fee0d2

Browse files
committed
format c++ code
1 parent 7bfb0dd commit 4fee0d2

File tree

3 files changed

+42
-26
lines changed

3 files changed

+42
-26
lines changed

src/main/cpp/jllama.cpp

Lines changed: 17 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -13,7 +13,7 @@
1313

1414
namespace
1515
{
16-
JavaVM* g_vm = nullptr;
16+
JavaVM *g_vm = nullptr;
1717

1818
// classes
1919
jclass c_llama_model = nullptr;
@@ -117,7 +117,8 @@ jobject log_level_to_jobject(ggml_log_level level)
117117
return o_log_level_error;
118118
case GGML_LOG_LEVEL_WARN:
119119
return o_log_level_warn;
120-
default: case GGML_LOG_LEVEL_INFO:
120+
default:
121+
case GGML_LOG_LEVEL_INFO:
121122
return o_log_level_info;
122123
case GGML_LOG_LEVEL_DEBUG:
123124
return o_log_level_debug;
@@ -127,9 +128,11 @@ jobject log_level_to_jobject(ggml_log_level level)
127128
/**
128129
* Returns the JNIEnv of the current thread.
129130
*/
130-
JNIEnv* get_jni_env() {
131-
JNIEnv* env = nullptr;
132-
if (g_vm == nullptr || g_vm->GetEnv(reinterpret_cast<void**>(&env), JNI_VERSION_1_6) != JNI_OK) {
131+
JNIEnv *get_jni_env()
132+
{
133+
JNIEnv *env = nullptr;
134+
if (g_vm == nullptr || g_vm->GetEnv(reinterpret_cast<void **>(&env), JNI_VERSION_1_6) != JNI_OK)
135+
{
133136
throw std::runtime_error("Thread is not attached to the JVM");
134137
}
135138
return env;
@@ -436,10 +439,12 @@ JNIEXPORT void JNICALL Java_de_kherud_llama_LlamaModel_loadModel(JNIEnv *env, jo
436439

437440
std::thread t([ctx_server]() {
438441
JNIEnv *env;
439-
jint res = g_vm->GetEnv((void**)&env, JNI_VERSION_1_6);
440-
if (res == JNI_EDETACHED) {
441-
res = g_vm->AttachCurrentThread((void**)&env, nullptr);
442-
if (res != JNI_OK) {
442+
jint res = g_vm->GetEnv((void **)&env, JNI_VERSION_1_6);
443+
if (res == JNI_EDETACHED)
444+
{
445+
res = g_vm->AttachCurrentThread((void **)&env, nullptr);
446+
if (res != JNI_OK)
447+
{
443448
throw std::runtime_error("Failed to attach thread to JVM");
444449
}
445450
}
@@ -459,7 +464,8 @@ JNIEXPORT jint JNICALL Java_de_kherud_llama_LlamaModel_requestCompletion(JNIEnv
459464
json json_params = json::parse(c_params);
460465
const bool infill = json_params.contains("input_prefix") || json_params.contains("input_suffix");
461466

462-
if (json_params.value("use_chat_template", false)) {
467+
if (json_params.value("use_chat_template", false))
468+
{
463469
json chat;
464470
chat.push_back({{"role", "system"}, {"content", ctx_server->system_prompt}});
465471
chat.push_back({{"role", "user"}, {"content", json_params["prompt"]}});
@@ -631,7 +637,7 @@ JNIEXPORT void JNICALL Java_de_kherud_llama_LlamaModel_setLogger(JNIEnv *env, jc
631637
{
632638
o_log_callback = env->NewGlobalRef(jcallback);
633639
log_callback = [](enum ggml_log_level level, const char *text, void *user_data) {
634-
JNIEnv* env = get_jni_env();
640+
JNIEnv *env = get_jni_env();
635641
jstring message = env->NewStringUTF(text);
636642
jobject log_level = log_level_to_jobject(level);
637643
env->CallVoidMethod(o_log_callback, m_biconsumer_accept, log_level, message);

src/main/cpp/server.hpp

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2139,7 +2139,8 @@ struct server_context
21392139
slot.command = SLOT_COMMAND_NONE;
21402140
slot.release();
21412141
slot.print_timings();
2142-
send_error(slot, "input is too large to process. increase the physical batch size", ERROR_TYPE_SERVER);
2142+
send_error(slot, "input is too large to process. increase the physical batch size",
2143+
ERROR_TYPE_SERVER);
21432144
continue;
21442145
}
21452146
}

src/main/cpp/utils.hpp

Lines changed: 23 additions & 14 deletions
Original file line numberDiff line numberDiff line change
@@ -70,20 +70,24 @@ template <typename T> static T json_value(const json &body, const std::string &k
7070
}
7171
}
7272

73-
static const char * log_level_to_string(ggml_log_level level) {
74-
switch (level) {
75-
case GGML_LOG_LEVEL_ERROR:
76-
return "ERROR";
77-
case GGML_LOG_LEVEL_WARN:
78-
return "WARN";
79-
default: case GGML_LOG_LEVEL_INFO:
80-
return "INFO";
81-
case GGML_LOG_LEVEL_DEBUG:
82-
return "DEBUG";
73+
static const char *log_level_to_string(ggml_log_level level)
74+
{
75+
switch (level)
76+
{
77+
case GGML_LOG_LEVEL_ERROR:
78+
return "ERROR";
79+
case GGML_LOG_LEVEL_WARN:
80+
return "WARN";
81+
default:
82+
case GGML_LOG_LEVEL_INFO:
83+
return "INFO";
84+
case GGML_LOG_LEVEL_DEBUG:
85+
return "DEBUG";
8386
}
8487
}
8588

86-
static inline void server_log(ggml_log_level level, const char *function, int line, const char *message, const json &extra)
89+
static inline void server_log(ggml_log_level level, const char *function, int line, const char *message,
90+
const json &extra)
8791
{
8892
std::stringstream ss_tid;
8993
ss_tid << std::this_thread::get_id();
@@ -110,7 +114,9 @@ static inline void server_log(ggml_log_level level, const char *function, int li
110114
if (log_callback == nullptr)
111115
{
112116
printf("%s\n", dump.c_str());
113-
} else {
117+
}
118+
else
119+
{
114120
log_callback(level, dump.c_str(), nullptr);
115121
}
116122
}
@@ -135,9 +141,12 @@ static inline void server_log(ggml_log_level level, const char *function, int li
135141
#endif
136142

137143
const std::string str = ss.str();
138-
if (log_callback == nullptr) {
144+
if (log_callback == nullptr)
145+
{
139146
printf("[%4s] %.*s\n", log_level_to_string(level), (int)str.size(), str.data());
140-
} else {
147+
}
148+
else
149+
{
141150
log_callback(level, str.c_str(), nullptr);
142151
}
143152
}

0 commit comments

Comments
 (0)