diff --git a/llama-cpp-2/src/model/params.rs b/llama-cpp-2/src/model/params.rs index b4d5a25e..92a2ccdb 100644 --- a/llama-cpp-2/src/model/params.rs +++ b/llama-cpp-2/src/model/params.rs @@ -180,7 +180,10 @@ impl LlamaModelParams { /// ``` /// # use llama_cpp_2::model::params::LlamaModelParams; /// let params = LlamaModelParams::default(); +/// #[cfg(not(target_os = "macos"))] /// assert_eq!(params.n_gpu_layers(), 0, "n_gpu_layers should be 0"); +/// #[cfg(target_os = "macos")] +/// assert_eq!(params.n_gpu_layers(), 999, "n_gpu_layers should be 999"); /// assert_eq!(params.main_gpu(), 0, "main_gpu should be 0"); /// assert_eq!(params.vocab_only(), false, "vocab_only should be false"); /// assert_eq!(params.use_mmap(), true, "use_mmap should be true"); diff --git a/llama-cpp-sys-2/build.rs b/llama-cpp-sys-2/build.rs index b7768514..5d14cea5 100644 --- a/llama-cpp-sys-2/build.rs +++ b/llama-cpp-sys-2/build.rs @@ -130,7 +130,7 @@ fn main() { llama_cpp.define("GGML_USE_ACCELERATE", None); llama_cpp.define("ACCELERATE_NEW_LAPACK", None); llama_cpp.define("ACCELERATE_LAPACK_ILP64", None); - println!("cargo:rustc-link-arg=framework=Accelerate"); + println!("cargo:rustc-link-lib=framework=Accelerate"); metal_hack(&mut ggml); ggml.include("./llama.cpp/ggml-metal.h");