Skip to content

Commit

Permalink
add min_p sampling parameter (nomic-ai#2014)
Browse files Browse the repository at this point in the history
Signed-off-by: Christopher Barrera <[email protected]>
Co-authored-by: Jared Van Bortel <[email protected]>
  • Loading branch information
chrisbarrera and cebtenzzre authored Feb 24, 2024
1 parent a153cc5 commit f8b1069
Show file tree
Hide file tree
Showing 28 changed files with 176 additions and 14 deletions.
4 changes: 3 additions & 1 deletion gpt4all-backend/llamamodel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,7 @@ static int llama_sample_top_p_top_k(
int last_n_tokens_size,
int top_k,
float top_p,
float min_p,
float temp,
float repeat_penalty,
int32_t pos) {
Expand All @@ -83,6 +84,7 @@ static int llama_sample_top_p_top_k(
llama_sample_tail_free(ctx, &candidates_p, 1.0f, 1);
llama_sample_typical(ctx, &candidates_p, 1.0f, 1);
llama_sample_top_p(ctx, &candidates_p, top_p, 1);
llama_sample_min_p(ctx, &candidates_p, min_p, 1);
llama_sample_temp(ctx, &candidates_p, temp);
return llama_sample_token(ctx, &candidates_p);
}
Expand Down Expand Up @@ -392,7 +394,7 @@ LLModel::Token LLamaModel::sampleToken(PromptContext &promptCtx) const
const size_t n_prev_toks = std::min((size_t) promptCtx.repeat_last_n, promptCtx.tokens.size());
return llama_sample_top_p_top_k(d_ptr->ctx,
promptCtx.tokens.data() + promptCtx.tokens.size() - n_prev_toks,
n_prev_toks, promptCtx.top_k, promptCtx.top_p, promptCtx.temp,
n_prev_toks, promptCtx.top_k, promptCtx.top_p, promptCtx.min_p, promptCtx.temp,
promptCtx.repeat_penalty, promptCtx.n_last_batch_tokens - 1);
}

Expand Down
1 change: 1 addition & 0 deletions gpt4all-backend/llmodel.h
Original file line number Diff line number Diff line change
Expand Up @@ -66,6 +66,7 @@ class LLModel {
int32_t n_predict = 200;
int32_t top_k = 40;
float top_p = 0.9f;
float min_p = 0.0f;
float temp = 0.9f;
int32_t n_batch = 9;
float repeat_penalty = 1.10f;
Expand Down
2 changes: 2 additions & 0 deletions gpt4all-backend/llmodel_c.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -134,6 +134,7 @@ void llmodel_prompt(llmodel_model model, const char *prompt,
wrapper->promptContext.n_predict = ctx->n_predict;
wrapper->promptContext.top_k = ctx->top_k;
wrapper->promptContext.top_p = ctx->top_p;
wrapper->promptContext.min_p = ctx->min_p;
wrapper->promptContext.temp = ctx->temp;
wrapper->promptContext.n_batch = ctx->n_batch;
wrapper->promptContext.repeat_penalty = ctx->repeat_penalty;
Expand All @@ -156,6 +157,7 @@ void llmodel_prompt(llmodel_model model, const char *prompt,
ctx->n_predict = wrapper->promptContext.n_predict;
ctx->top_k = wrapper->promptContext.top_k;
ctx->top_p = wrapper->promptContext.top_p;
ctx->min_p = wrapper->promptContext.min_p;
ctx->temp = wrapper->promptContext.temp;
ctx->n_batch = wrapper->promptContext.n_batch;
ctx->repeat_penalty = wrapper->promptContext.repeat_penalty;
Expand Down
1 change: 1 addition & 0 deletions gpt4all-backend/llmodel_c.h
Original file line number Diff line number Diff line change
Expand Up @@ -39,6 +39,7 @@ struct llmodel_prompt_context {
int32_t n_predict; // number of tokens to predict
int32_t top_k; // top k logits to sample from
float top_p; // nucleus sampling probability threshold
float min_p; // Min P sampling
float temp; // temperature to adjust model's output distribution
int32_t n_batch; // number of predictions to generate in parallel
float repeat_penalty; // penalty factor for repeated tokens
Expand Down
2 changes: 2 additions & 0 deletions gpt4all-bindings/cli/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ def _old_loop(gpt4all_instance):
n_predict=200,
top_k=40,
top_p=0.9,
min_p=0.0,
temp=0.9,
n_batch=9,
repeat_penalty=1.1,
Expand Down Expand Up @@ -156,6 +157,7 @@ def _new_loop(gpt4all_instance):
temp=0.9,
top_k=40,
top_p=0.9,
min_p=0.0,
repeat_penalty=1.1,
repeat_last_n=64,
n_batch=9,
Expand Down
9 changes: 9 additions & 0 deletions gpt4all-bindings/csharp/Gpt4All/Bindings/LLPromptContext.cs
Original file line number Diff line number Diff line change
Expand Up @@ -64,6 +64,15 @@ public float TopP
set => _ctx.top_p = value;
}

/// <summary>
/// min p sampling probability threshold
/// </summary>
public float MinP
{
get => _ctx.min_p;
set => _ctx.min_p = value;
}

/// <summary>
/// temperature to adjust model's output distribution
/// </summary>
Expand Down
2 changes: 2 additions & 0 deletions gpt4all-bindings/csharp/Gpt4All/Bindings/NativeMethods.cs
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,8 @@ public unsafe partial struct llmodel_prompt_context

public float top_p;

public float min_p;

public float temp;

[NativeTypeName("int32_t")]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,7 @@ public static string Dump(this LLModelPromptContext context)
n_predict = {ctx.n_predict}
top_k = {ctx.top_k}
top_p = {ctx.top_p}
min_p = {ctx.min_p}
temp = {ctx.temp}
n_batch = {ctx.n_batch}
repeat_penalty = {ctx.repeat_penalty}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ public static LLModelPromptContext ToPromptContext(this PredictRequestOptions op
TokensSize = opts.TokensSize,
TopK = opts.TopK,
TopP = opts.TopP,
MinP = opts.MinP,
PastNum = opts.PastConversationTokensNum,
RepeatPenalty = opts.RepeatPenalty,
Temperature = opts.Temperature,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -16,6 +16,8 @@ public record PredictRequestOptions

public float TopP { get; init; } = 0.9f;

public float MinP { get; init; } = 0.0f;

public float Temperature { get; init; } = 0.1f;

public int Batches { get; init; } = 8;
Expand Down
4 changes: 3 additions & 1 deletion gpt4all-bindings/golang/binding.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ std::string res = "";
void * mm;

void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
float top_p, float temp, int n_batch,float ctx_erase)
float top_p, float min_p, float temp, int n_batch,float ctx_erase)
{
llmodel_model* model = (llmodel_model*) m;

Expand Down Expand Up @@ -69,6 +69,7 @@ void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n,
.n_predict = 50,
.top_k = 10,
.top_p = 0.9,
.min_p = 0.0,
.temp = 1.0,
.n_batch = 1,
.repeat_penalty = 1.2,
Expand All @@ -83,6 +84,7 @@ void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n,
prompt_context->top_k = top_k;
prompt_context->context_erase = ctx_erase;
prompt_context->top_p = top_p;
prompt_context->min_p = min_p;
prompt_context->temp = temp;
prompt_context->n_batch = n_batch;

Expand Down
4 changes: 2 additions & 2 deletions gpt4all-bindings/golang/binding.h
Original file line number Diff line number Diff line change
Expand Up @@ -7,12 +7,12 @@ extern "C" {
void* load_model(const char *fname, int n_threads);

void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
float top_p, float temp, int n_batch,float ctx_erase);
float top_p, float min_p, float temp, int n_batch,float ctx_erase);

void free_model(void *state_ptr);

extern unsigned char getTokenCallback(void *, char *);

#ifdef __cplusplus
}
#endif
#endif
4 changes: 2 additions & 2 deletions gpt4all-bindings/golang/gpt4all.go
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ package gpt4all
// #cgo LDFLAGS: -lgpt4all -lm -lstdc++ -ldl
// void* load_model(const char *fname, int n_threads);
// void model_prompt( const char *prompt, void *m, char* result, int repeat_last_n, float repeat_penalty, int n_ctx, int tokens, int top_k,
// float top_p, float temp, int n_batch,float ctx_erase);
// float top_p, float min_p, float temp, int n_batch,float ctx_erase);
// void free_model(void *state_ptr);
// extern unsigned char getTokenCallback(void *, char *);
// void llmodel_set_implementation_search_path(const char *path);
Expand Down Expand Up @@ -58,7 +58,7 @@ func (l *Model) Predict(text string, opts ...PredictOption) (string, error) {
out := make([]byte, po.Tokens)

C.model_prompt(input, l.state, (*C.char)(unsafe.Pointer(&out[0])), C.int(po.RepeatLastN), C.float(po.RepeatPenalty), C.int(po.ContextSize),
C.int(po.Tokens), C.int(po.TopK), C.float(po.TopP), C.float(po.Temperature), C.int(po.Batch), C.float(po.ContextErase))
C.int(po.Tokens), C.int(po.TopK), C.float(po.TopP), C.float(po.MinP), C.float(po.Temperature), C.int(po.Batch), C.float(po.ContextErase))

res := C.GoString((*C.char)(unsafe.Pointer(&out[0])))
res = strings.TrimPrefix(res, " ")
Expand Down
10 changes: 9 additions & 1 deletion gpt4all-bindings/golang/options.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@ package gpt4all

type PredictOptions struct {
ContextSize, RepeatLastN, Tokens, TopK, Batch int
TopP, Temperature, ContextErase, RepeatPenalty float64
TopP, MinP, Temperature, ContextErase, RepeatPenalty float64
}

type PredictOption func(p *PredictOptions)
Expand All @@ -11,6 +11,7 @@ var DefaultOptions PredictOptions = PredictOptions{
Tokens: 200,
TopK: 10,
TopP: 0.90,
MinP: 0.0,
Temperature: 0.96,
Batch: 1,
ContextErase: 0.55,
Expand Down Expand Up @@ -50,6 +51,13 @@ func SetTopP(topp float64) PredictOption {
}
}

// SetMinP sets the value for min p sampling
func SetMinP(minp float64) PredictOption {
return func(p *PredictOptions) {
p.MinP = minp
}
}

// SetRepeatPenalty sets the repeat penalty.
func SetRepeatPenalty(ce float64) PredictOption {
return func(p *PredictOptions) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ private GenerationConfig() {
n_predict.set(128);
top_k.set(40);
top_p.set(0.95);
min_p.set(0.0);
temp.set(0.28);
n_batch.set(8);
repeat_penalty.set(1.1);
Expand Down Expand Up @@ -71,6 +72,11 @@ public Builder withTopP(float top_p) {
return this;
}

public Builder withMinP(float min_p) {
configToBuild.min_p.set(min_p);
return this;
}

public Builder withTemp(float temp) {
configToBuild.temp.set(temp);
return this;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -48,6 +48,7 @@ class LLModelPromptContext extends Struct {
public final int32_t n_predict = new int32_t();
public final int32_t top_k = new int32_t();
public final Float top_p = new Float();
public final Float min_p = new Float();
public final Float temp = new Float();
public final int32_t n_batch = new int32_t();
public final Float repeat_penalty = new Float();
Expand Down
6 changes: 6 additions & 0 deletions gpt4all-bindings/python/gpt4all/_pyllmodel.py
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ class LLModelPromptContext(ctypes.Structure):
("n_predict", ctypes.c_int32),
("top_k", ctypes.c_int32),
("top_p", ctypes.c_float),
("min_p", ctypes.c_float),
("temp", ctypes.c_float),
("n_batch", ctypes.c_int32),
("repeat_penalty", ctypes.c_float),
Expand Down Expand Up @@ -241,6 +242,7 @@ def _set_context(
n_predict: int = 4096,
top_k: int = 40,
top_p: float = 0.9,
min_p: float = 0.0,
temp: float = 0.1,
n_batch: int = 8,
repeat_penalty: float = 1.2,
Expand All @@ -257,6 +259,7 @@ def _set_context(
n_predict=n_predict,
top_k=top_k,
top_p=top_p,
min_p=min_p,
temp=temp,
n_batch=n_batch,
repeat_penalty=repeat_penalty,
Expand All @@ -272,6 +275,7 @@ def _set_context(
self.context.n_predict = n_predict
self.context.top_k = top_k
self.context.top_p = top_p
self.context.min_p = min_p
self.context.temp = temp
self.context.n_batch = n_batch
self.context.repeat_penalty = repeat_penalty
Expand All @@ -297,6 +301,7 @@ def prompt_model(
n_predict: int = 4096,
top_k: int = 40,
top_p: float = 0.9,
min_p: float = 0.0,
temp: float = 0.1,
n_batch: int = 8,
repeat_penalty: float = 1.2,
Expand Down Expand Up @@ -334,6 +339,7 @@ def prompt_model(
n_predict=n_predict,
top_k=top_k,
top_p=top_p,
min_p=min_p,
temp=temp,
n_batch=n_batch,
repeat_penalty=repeat_penalty,
Expand Down
3 changes: 3 additions & 0 deletions gpt4all-bindings/python/gpt4all/gpt4all.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ def generate(
temp: float = 0.7,
top_k: int = 40,
top_p: float = 0.4,
min_p: float = 0.0,
repeat_penalty: float = 1.18,
repeat_last_n: int = 64,
n_batch: int = 8,
Expand All @@ -305,6 +306,7 @@ def generate(
temp: The model temperature. Larger values increase creativity but decrease factuality.
top_k: Randomly sample from the top_k most likely tokens at each generation step. Set this to 1 for greedy decoding.
top_p: Randomly sample at each generation step from the top most likely tokens whose probabilities add up to top_p.
min_p: Randomly sample at each generation step from the top most likely tokens whose probabilities are at least min_p.
repeat_penalty: Penalize the model for repetition. Higher values result in less repetition.
repeat_last_n: How far in the models generation history to apply the repeat penalty.
n_batch: Number of prompt tokens processed in parallel. Larger values decrease latency but increase resource requirements.
Expand All @@ -325,6 +327,7 @@ def generate(
temp=temp,
top_k=top_k,
top_p=top_p,
min_p=min_p,
repeat_penalty=repeat_penalty,
repeat_last_n=repeat_last_n,
n_batch=n_batch,
Expand Down
3 changes: 3 additions & 0 deletions gpt4all-bindings/typescript/index.cc
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,7 @@ Napi::Value NodeModelWrapper::GetRequiredMemory(const Napi::CallbackInfo& info)
.n_predict = 128,
.top_k = 40,
.top_p = 0.9f,
.min_p = 0.0f,
.temp = 0.72f,
.n_batch = 8,
.repeat_penalty = 1.0f,
Expand Down Expand Up @@ -277,6 +278,8 @@ Napi::Value NodeModelWrapper::GetRequiredMemory(const Napi::CallbackInfo& info)
promptContext.top_k = inputObject.Get("top_k").As<Napi::Number>().Int32Value();
if(inputObject.Has("top_p"))
promptContext.top_p = inputObject.Get("top_p").As<Napi::Number>().FloatValue();
if(inputObject.Has("min_p"))
promptContext.min_p = inputObject.Get("min_p").As<Napi::Number>().FloatValue();
if(inputObject.Has("temp"))
promptContext.temp = inputObject.Get("temp").As<Napi::Number>().FloatValue();
if(inputObject.Has("n_batch"))
Expand Down
10 changes: 8 additions & 2 deletions gpt4all-chat/chatllm.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -568,16 +568,17 @@ bool ChatLLM::prompt(const QList<QString> &collectionList, const QString &prompt
const int32_t n_predict = MySettings::globalInstance()->modelMaxLength(m_modelInfo);
const int32_t top_k = MySettings::globalInstance()->modelTopK(m_modelInfo);
const float top_p = MySettings::globalInstance()->modelTopP(m_modelInfo);
const float min_p = MySettings::globalInstance()->modelMinP(m_modelInfo);
const float temp = MySettings::globalInstance()->modelTemperature(m_modelInfo);
const int32_t n_batch = MySettings::globalInstance()->modelPromptBatchSize(m_modelInfo);
const float repeat_penalty = MySettings::globalInstance()->modelRepeatPenalty(m_modelInfo);
const int32_t repeat_penalty_tokens = MySettings::globalInstance()->modelRepeatPenaltyTokens(m_modelInfo);
return promptInternal(collectionList, prompt, promptTemplate, n_predict, top_k, top_p, temp, n_batch,
return promptInternal(collectionList, prompt, promptTemplate, n_predict, top_k, top_p, min_p, temp, n_batch,
repeat_penalty, repeat_penalty_tokens);
}

bool ChatLLM::promptInternal(const QList<QString> &collectionList, const QString &prompt, const QString &promptTemplate,
int32_t n_predict, int32_t top_k, float top_p, float temp, int32_t n_batch, float repeat_penalty,
int32_t n_predict, int32_t top_k, float top_p, float min_p, float temp, int32_t n_batch, float repeat_penalty,
int32_t repeat_penalty_tokens)
{
if (!isModelLoaded())
Expand Down Expand Up @@ -608,6 +609,7 @@ bool ChatLLM::promptInternal(const QList<QString> &collectionList, const QString
m_ctx.n_predict = n_predict;
m_ctx.top_k = top_k;
m_ctx.top_p = top_p;
m_ctx.min_p = min_p;
m_ctx.temp = temp;
m_ctx.n_batch = n_batch;
m_ctx.repeat_penalty = repeat_penalty;
Expand Down Expand Up @@ -1020,6 +1022,7 @@ void ChatLLM::processSystemPrompt()
const int32_t n_predict = MySettings::globalInstance()->modelMaxLength(m_modelInfo);
const int32_t top_k = MySettings::globalInstance()->modelTopK(m_modelInfo);
const float top_p = MySettings::globalInstance()->modelTopP(m_modelInfo);
const float min_p = MySettings::globalInstance()->modelMinP(m_modelInfo);
const float temp = MySettings::globalInstance()->modelTemperature(m_modelInfo);
const int32_t n_batch = MySettings::globalInstance()->modelPromptBatchSize(m_modelInfo);
const float repeat_penalty = MySettings::globalInstance()->modelRepeatPenalty(m_modelInfo);
Expand All @@ -1028,6 +1031,7 @@ void ChatLLM::processSystemPrompt()
m_ctx.n_predict = n_predict;
m_ctx.top_k = top_k;
m_ctx.top_p = top_p;
m_ctx.min_p = min_p;
m_ctx.temp = temp;
m_ctx.n_batch = n_batch;
m_ctx.repeat_penalty = repeat_penalty;
Expand Down Expand Up @@ -1067,6 +1071,7 @@ void ChatLLM::processRestoreStateFromText()
const int32_t n_predict = MySettings::globalInstance()->modelMaxLength(m_modelInfo);
const int32_t top_k = MySettings::globalInstance()->modelTopK(m_modelInfo);
const float top_p = MySettings::globalInstance()->modelTopP(m_modelInfo);
const float min_p = MySettings::globalInstance()->modelMinP(m_modelInfo);
const float temp = MySettings::globalInstance()->modelTemperature(m_modelInfo);
const int32_t n_batch = MySettings::globalInstance()->modelPromptBatchSize(m_modelInfo);
const float repeat_penalty = MySettings::globalInstance()->modelRepeatPenalty(m_modelInfo);
Expand All @@ -1075,6 +1080,7 @@ void ChatLLM::processRestoreStateFromText()
m_ctx.n_predict = n_predict;
m_ctx.top_k = top_k;
m_ctx.top_p = top_p;
m_ctx.min_p = min_p;
m_ctx.temp = temp;
m_ctx.n_batch = n_batch;
m_ctx.repeat_penalty = repeat_penalty;
Expand Down
2 changes: 1 addition & 1 deletion gpt4all-chat/chatllm.h
Original file line number Diff line number Diff line change
Expand Up @@ -139,7 +139,7 @@ public Q_SLOTS:

protected:
bool promptInternal(const QList<QString> &collectionList, const QString &prompt, const QString &promptTemplate,
int32_t n_predict, int32_t top_k, float top_p, float temp, int32_t n_batch, float repeat_penalty,
int32_t n_predict, int32_t top_k, float top_p, float min_p, float temp, int32_t n_batch, float repeat_penalty,
int32_t repeat_penalty_tokens);
bool handlePrompt(int32_t token);
bool handleResponse(int32_t token, const std::string &response);
Expand Down
Loading

0 comments on commit f8b1069

Please sign in to comment.