diff --git a/llama-cpp-2/src/llama_batch.rs b/llama-cpp-2/src/llama_batch.rs index 8a2fd376..153f5d52 100644 --- a/llama-cpp-2/src/llama_batch.rs +++ b/llama-cpp-2/src/llama_batch.rs @@ -157,17 +157,13 @@ impl LlamaBatch { /// /// NOTE: this is a helper function to facilitate transition to the new batch API /// - pub fn get_one( - tokens: &[LlamaToken], - pos_0: llama_pos, - seq_id: llama_seq_id, - ) -> Result { + pub fn get_one(tokens: &[LlamaToken]) -> Result { if tokens.is_empty() { return Err(BatchAddError::EmptyBuffer); } let batch = unsafe { let ptr = tokens.as_ptr() as *mut i32; - llama_cpp_sys_2::llama_batch_get_one(ptr, tokens.len() as i32, pos_0, seq_id) + llama_cpp_sys_2::llama_batch_get_one(ptr, tokens.len() as i32) }; let batch = Self { allocated: 0, diff --git a/llama-cpp-sys-2/llama.cpp b/llama-cpp-sys-2/llama.cpp index 0abc6a2c..64ed2091 160000 --- a/llama-cpp-sys-2/llama.cpp +++ b/llama-cpp-sys-2/llama.cpp @@ -1 +1 @@ -Subproject commit 0abc6a2c25272d5cf01384dda8ee8bfec4ba8745 +Subproject commit 64ed2091b24b2f9747148fdf49a34ed5938762c3