We read every piece of feedback, and take your input very seriously.
To see all available qualifiers, see our documentation.
There was an error while loading. Please reload this page.
1 parent dcbcdd6 commit 6f9fa32Copy full SHA for 6f9fa32
llama-cpp-2/src/model.rs
@@ -424,6 +424,7 @@ impl LlamaModel {
424
acc + c.role.to_bytes().len() + c.content.to_bytes().len()
425
});
426
let mut buff: Vec<i8> = vec![0_i8; message_length * 2];
427
+
428
// Build our llama_cpp_sys_2 chat messages
429
let chat: Vec<llama_cpp_sys_2::llama_chat_message> = chat
430
.iter()
@@ -445,7 +446,7 @@ impl LlamaModel {
445
446
chat.as_ptr(),
447
chat.len(),
448
add_ass,
- buff.as_mut_ptr(),
449
+ buff.as_mut_ptr().cast::<std::os::raw::c_char>(),
450
buff.len() as i32,
451
);
452
// A buffer twice the size should be sufficient for all models, if this is not the case for a new model, we can increase it
0 commit comments