From 428790faf15a4e2f1172571a370f415eb3581d33 Mon Sep 17 00:00:00 2001 From: Meng Zhang Date: Sat, 22 Jun 2024 16:58:54 +0800 Subject: [PATCH] docs: add prompt template information to http based completion config. --- website/docs/administration/model.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/website/docs/administration/model.md b/website/docs/administration/model.md index b72a827feaa3..6faea4f552dd 100644 --- a/website/docs/administration/model.md +++ b/website/docs/administration/model.md @@ -23,6 +23,7 @@ The `llama.cpp` model can be configured with the following parameters: [model.completion.http] kind = "llama.cpp/completion" api_endpoint = "http://localhost:8888" +prompt_template = "
 {prefix} {suffix} "  # Example prompt template for CodeLlama model series.
 ```
 
 #### [ollama](https://github.com/ollama/ollama/blob/main/docs/api.md#generate-a-completion)
@@ -33,6 +34,7 @@ For setting up the `ollama` model, apply the configuration below:
 [model.completion.http]
 kind = "ollama/completion"
 api_endpoint = "http://localhost:8888"
+prompt_template = "
 {prefix} {suffix} "  # Example prompt template for CodeLlama model series.
 ```
 
 #### [mistral / codestral](https://docs.mistral.ai/api/#operation/createFIMCompletion)