-
Notifications
You must be signed in to change notification settings - Fork 108
/
Get-GPT3Completion.ps1
82 lines (67 loc) · 3.1 KB
/
Get-GPT3Completion.ps1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
function Get-GPT3Completion {
<#
.SYNOPSIS
Get a completion from the OpenAI GPT-3 API
.DESCRIPTION
Given a prompt, the model will return one or more predicted completions, and can also return the probabilities of alternative tokens at each position
.PARAMETER prompt
The prompt to generate completions for
.PARAMETER model
ID of the model to use. Defaults to 'text-davinci-003'
.PARAMETER temperature
The temperature used to control the model's likelihood to take risky actions. Higher values means the model will take more risks. Try 0.9 for more creative applications, and 0 (argmax sampling) for ones with a well-defined answer. Defaults to 0
.PARAMETER max_tokens
The maximum number of tokens to generate. By default, this will be 64 if the prompt is not provided, and 1 if a prompt is provided. The maximum is 2048
.PARAMETER top_p
An alternative to sampling with temperature, called nucleus sampling, where the model considers the results of the tokens with top_p probability mass. So 0.1 means only the tokens comprising the top 10% probability mass are considered. Defaults to 1
.PARAMETER frequency_penalty
A value between 0 and 1 that penalizes new tokens based on whether they appear in the text so far. Defaults to 0
.PARAMETER presence_penalty
A value between 0 and 1 that penalizes new tokens based on whether they appear in the text so far. Defaults to 0
.PARAMETER stop
A list of tokens that will cause the API to stop generating further tokens. By default, the API will stop generating when it hits one of the following tokens: ., !, or ?.
.EXAMPLE
Get-GPT3Completion -prompt "What is 2%2? - please explain"
#>
[CmdletBinding()]
[alias("gpt")]
param(
[Parameter(Mandatory)]
$prompt,
$model = 'text-davinci-003',
[ValidateRange(0, 2)]
[decimal]$temperature = 0.0,
[ValidateRange(1, 2048)]
[int]$max_tokens = 256,
[ValidateRange(0, 1)]
[decimal]$top_p = 1.0,
[ValidateRange(-2, 2)]
[decimal]$frequency_penalty = 0,
[ValidateRange(-2, 2)]
[decimal]$presence_penalty = 0,
$stop,
[Switch]$Raw
)
# if (!(Test-OpenAIKey)) {
# throw 'You must set the $env:OpenAIKey environment variable to your OpenAI API key. https://beta.openai.com/account/api-keys'
# }
$body = [ordered]@{
model = $model
prompt = $prompt
temperature = $temperature
max_tokens = $max_tokens
top_p = $top_p
frequency_penalty = $frequency_penalty
presence_penalty = $presence_penalty
stop = $stop
}
$body = $body | ConvertTo-Json -Depth 5
$body = [System.Text.Encoding]::UTF8.GetBytes($body)
$result = Invoke-OpenAIAPI -Uri (Get-OpenAICompletionsURI) -Method 'Post' -Body $body
if ($Raw) {
$result
}
elseif ($result.choices) {
$result.choices[0].text
}
}