forked from cohere-ai/cohere-go
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathtypes.go
4285 lines (3839 loc) · 156 KB
/
types.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
// This file was auto-generated by Fern from our API Definition.
package api
import (
json "encoding/json"
fmt "fmt"
core "github.com/cohere-ai/cohere-go/v2/core"
time "time"
)
type ChatRequest struct {
// Text input for the model to respond to.
Message string `json:"message" url:"message"`
// Defaults to `command`.
//
// The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
Model *string `json:"model,omitempty" url:"model,omitempty"`
// When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style, and use the `SYSTEM` role.
//
// The `SYSTEM` role is also used for the contents of the optional `chat_history=` parameter. When used with the `chat_history=` parameter it adds content throughout a conversation. Conversely, when used with the `preamble=` parameter it adds content at the start of the conversation only.
Preamble *string `json:"preamble,omitempty" url:"preamble,omitempty"`
// A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`.
//
// Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content.
//
// The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used.
ChatHistory []*ChatMessage `json:"chat_history,omitempty" url:"chat_history,omitempty"`
// An alternative to `chat_history`.
//
// Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string.
ConversationId *string `json:"conversation_id,omitempty" url:"conversation_id,omitempty"`
// Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
//
// Dictates how the prompt will be constructed.
//
// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
//
// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
//
// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
PromptTruncation *ChatRequestPromptTruncation `json:"prompt_truncation,omitempty" url:"prompt_truncation,omitempty"`
// Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one.
//
// When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG).
Connectors []*ChatConnector `json:"connectors,omitempty" url:"connectors,omitempty"`
CitationQuality *ChatRequestCitationQuality `json:"citation_quality,omitempty" url:"citation_quality,omitempty"`
// Defaults to `false`.
//
// When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
SearchQueriesOnly *bool `json:"search_queries_only,omitempty" url:"search_queries_only,omitempty"`
// A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary.
//
// Example:
// `[
//
// { "title": "Tall penguins", "text": "Emperor penguins are the tallest." },
// { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica." },
//
// ]`
//
// Keys and values from each document will be serialized to a string and passed to the model. The resulting generation will include citations that reference some of these documents.
//
// Some suggested keys are "text", "author", and "date". For better generation quality, it is recommended to keep the total word count of the strings in the dictionary to under 300 words.
//
// An `id` field (string) can be optionally supplied to identify the document in the citations. This field will not be passed to the model.
//
// An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model.
//
// See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
Documents []ChatDocument `json:"documents,omitempty" url:"documents,omitempty"`
// Defaults to `0.3`.
//
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
//
// Randomness can be further maximized by increasing the value of the `p` parameter.
Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"`
// The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
MaxTokens *int `json:"max_tokens,omitempty" url:"max_tokens,omitempty"`
// Ensures only the top `k` most likely tokens are considered for generation at each step.
// Defaults to `0`, min value of `0`, max value of `500`.
K *int `json:"k,omitempty" url:"k,omitempty"`
// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
// Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
P *float64 `json:"p,omitempty" url:"p,omitempty"`
// If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
Seed *float64 `json:"seed,omitempty" url:"seed,omitempty"`
// A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
StopSequences []string `json:"stop_sequences,omitempty" url:"stop_sequences,omitempty"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
//
// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"frequency_penalty,omitempty"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
//
// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"presence_penalty,omitempty"`
// When enabled, the user's prompt will be sent to the model without any pre-processing.
RawPrompting *bool `json:"raw_prompting,omitempty" url:"raw_prompting,omitempty"`
// A list of available tools (functions) that the model may suggest invoking before producing a text response.
//
// When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
Tools []*Tool `json:"tools,omitempty" url:"tools,omitempty"`
// A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well.
// Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries.
//
// **Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list.
// ```
// tool_results = [
//
// {
// "call": {
// "name": <tool name>,
// "parameters": {
// <param name>: <param value>
// }
// },
// "outputs": [{
// <key>: <value>
// }]
// },
// ...
//
// ]
// ```
// **Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text.
ToolResults []*ChatRequestToolResultsItem `json:"tool_results,omitempty" url:"tool_results,omitempty"`
stream bool
}
func (c *ChatRequest) Stream() bool {
return c.stream
}
func (c *ChatRequest) UnmarshalJSON(data []byte) error {
type unmarshaler ChatRequest
var body unmarshaler
if err := json.Unmarshal(data, &body); err != nil {
return err
}
*c = ChatRequest(body)
c.stream = false
return nil
}
func (c *ChatRequest) MarshalJSON() ([]byte, error) {
type embed ChatRequest
var marshaler = struct {
embed
Stream bool `json:"stream"`
}{
embed: embed(*c),
Stream: false,
}
return json.Marshal(marshaler)
}
type ChatStreamRequest struct {
// Text input for the model to respond to.
Message string `json:"message" url:"message"`
// Defaults to `command`.
//
// The name of a compatible [Cohere model](https://docs.cohere.com/docs/models) or the ID of a [fine-tuned](https://docs.cohere.com/docs/chat-fine-tuning) model.
Model *string `json:"model,omitempty" url:"model,omitempty"`
// When specified, the default Cohere preamble will be replaced with the provided one. Preambles are a part of the prompt used to adjust the model's overall behavior and conversation style, and use the `SYSTEM` role.
//
// The `SYSTEM` role is also used for the contents of the optional `chat_history=` parameter. When used with the `chat_history=` parameter it adds content throughout a conversation. Conversely, when used with the `preamble=` parameter it adds content at the start of the conversation only.
Preamble *string `json:"preamble,omitempty" url:"preamble,omitempty"`
// A list of previous messages between the user and the model, giving the model conversational context for responding to the user's `message`.
//
// Each item represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content.
//
// The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used.
ChatHistory []*ChatMessage `json:"chat_history,omitempty" url:"chat_history,omitempty"`
// An alternative to `chat_history`.
//
// Providing a `conversation_id` creates or resumes a persisted conversation with the specified ID. The ID can be any non empty string.
ConversationId *string `json:"conversation_id,omitempty" url:"conversation_id,omitempty"`
// Defaults to `AUTO` when `connectors` are specified and `OFF` in all other cases.
//
// Dictates how the prompt will be constructed.
//
// With `prompt_truncation` set to "AUTO", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be changed and ranked by relevance.
//
// With `prompt_truncation` set to "AUTO_PRESERVE_ORDER", some elements from `chat_history` and `documents` will be dropped in an attempt to construct a prompt that fits within the model's context length limit. During this process the order of the documents and chat history will be preserved as they are inputted into the API.
//
// With `prompt_truncation` set to "OFF", no elements will be dropped. If the sum of the inputs exceeds the model's context length limit, a `TooManyTokens` error will be returned.
PromptTruncation *ChatStreamRequestPromptTruncation `json:"prompt_truncation,omitempty" url:"prompt_truncation,omitempty"`
// Accepts `{"id": "web-search"}`, and/or the `"id"` for a custom [connector](https://docs.cohere.com/docs/connectors), if you've [created](https://docs.cohere.com/docs/creating-and-deploying-a-connector) one.
//
// When specified, the model's reply will be enriched with information found by quering each of the connectors (RAG).
Connectors []*ChatConnector `json:"connectors,omitempty" url:"connectors,omitempty"`
CitationQuality *ChatRequestCitationQuality `json:"citation_quality,omitempty" url:"citation_quality,omitempty"`
// Defaults to `false`.
//
// When `true`, the response will only contain a list of generated search queries, but no search will take place, and no reply from the model to the user's `message` will be generated.
SearchQueriesOnly *bool `json:"search_queries_only,omitempty" url:"search_queries_only,omitempty"`
// A list of relevant documents that the model can cite to generate a more accurate reply. Each document is a string-string dictionary.
//
// Example:
// `[
//
// { "title": "Tall penguins", "text": "Emperor penguins are the tallest." },
// { "title": "Penguin habitats", "text": "Emperor penguins only live in Antarctica." },
//
// ]`
//
// Keys and values from each document will be serialized to a string and passed to the model. The resulting generation will include citations that reference some of these documents.
//
// Some suggested keys are "text", "author", and "date". For better generation quality, it is recommended to keep the total word count of the strings in the dictionary to under 300 words.
//
// An `id` field (string) can be optionally supplied to identify the document in the citations. This field will not be passed to the model.
//
// An `_excludes` field (array of strings) can be optionally supplied to omit some key-value pairs from being shown to the model. The omitted fields will still show up in the citation object. The "_excludes" field will not be passed to the model.
//
// See ['Document Mode'](https://docs.cohere.com/docs/retrieval-augmented-generation-rag#document-mode) in the guide for more information.
Documents []ChatDocument `json:"documents,omitempty" url:"documents,omitempty"`
// Defaults to `0.3`.
//
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations, and higher temperatures mean more random generations.
//
// Randomness can be further maximized by increasing the value of the `p` parameter.
Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"`
// The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
MaxTokens *int `json:"max_tokens,omitempty" url:"max_tokens,omitempty"`
// Ensures only the top `k` most likely tokens are considered for generation at each step.
// Defaults to `0`, min value of `0`, max value of `500`.
K *int `json:"k,omitempty" url:"k,omitempty"`
// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
// Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
P *float64 `json:"p,omitempty" url:"p,omitempty"`
// If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinism cannot be totally guaranteed.
Seed *float64 `json:"seed,omitempty" url:"seed,omitempty"`
// A list of up to 5 strings that the model will use to stop generation. If the model generates a string that matches any of the strings in the list, it will stop generating tokens and return the generated text up to that point not including the stop sequence.
StopSequences []string `json:"stop_sequences,omitempty" url:"stop_sequences,omitempty"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
//
// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"frequency_penalty,omitempty"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
//
// Used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"presence_penalty,omitempty"`
// When enabled, the user's prompt will be sent to the model without any pre-processing.
RawPrompting *bool `json:"raw_prompting,omitempty" url:"raw_prompting,omitempty"`
// A list of available tools (functions) that the model may suggest invoking before producing a text response.
//
// When `tools` is passed (without `tool_results`), the `text` field in the response will be `""` and the `tool_calls` field in the response will be populated with a list of tool calls that need to be made. If no calls need to be made, the `tool_calls` array will be empty.
Tools []*Tool `json:"tools,omitempty" url:"tools,omitempty"`
// A list of results from invoking tools recommended by the model in the previous chat turn. Results are used to produce a text response and will be referenced in citations. When using `tool_results`, `tools` must be passed as well.
// Each tool_result contains information about how it was invoked, as well as a list of outputs in the form of dictionaries.
//
// **Note**: `outputs` must be a list of objects. If your tool returns a single object (eg `{"status": 200}`), make sure to wrap it in a list.
// ```
// tool_results = [
//
// {
// "call": {
// "name": <tool name>,
// "parameters": {
// <param name>: <param value>
// }
// },
// "outputs": [{
// <key>: <value>
// }]
// },
// ...
//
// ]
// ```
// **Note**: Chat calls with `tool_results` should not be included in the Chat history to avoid duplication of the message text.
ToolResults []*ChatStreamRequestToolResultsItem `json:"tool_results,omitempty" url:"tool_results,omitempty"`
stream bool
}
func (c *ChatStreamRequest) Stream() bool {
return c.stream
}
func (c *ChatStreamRequest) UnmarshalJSON(data []byte) error {
type unmarshaler ChatStreamRequest
var body unmarshaler
if err := json.Unmarshal(data, &body); err != nil {
return err
}
*c = ChatStreamRequest(body)
c.stream = true
return nil
}
func (c *ChatStreamRequest) MarshalJSON() ([]byte, error) {
type embed ChatStreamRequest
var marshaler = struct {
embed
Stream bool `json:"stream"`
}{
embed: embed(*c),
Stream: true,
}
return json.Marshal(marshaler)
}
type ClassifyRequest struct {
// A list of up to 96 texts to be classified. Each one must be a non-empty string.
// There is, however, no consistent, universal limit to the length a particular input can be. We perform classification on the first `x` tokens of each input, and `x` varies depending on which underlying model is powering classification. The maximum token length for each model is listed in the "max tokens" column [here](https://docs.cohere.com/docs/models).
// Note: by default the `truncate` parameter is set to `END`, so tokens exceeding the limit will be automatically dropped. This behavior can be disabled by setting `truncate` to `NONE`, which will result in validation errors for longer texts.
Inputs []string `json:"inputs,omitempty" url:"inputs,omitempty"`
// An array of examples to provide context to the model. Each example is a text string and its associated label/class. Each unique label requires at least 2 examples associated with it; the maximum number of examples is 2500, and each example has a maximum length of 512 tokens. The values should be structured as `{text: "...",label: "..."}`.
// Note: [Fine-tuned Models](https://docs.cohere.com/docs/classify-fine-tuning) trained on classification examples don't require the `examples` parameter to be passed in explicitly.
Examples []*ClassifyExample `json:"examples,omitempty" url:"examples,omitempty"`
// The identifier of the model. Currently available models are `embed-multilingual-v2.0`, `embed-english-light-v2.0`, and `embed-english-v2.0` (default). Smaller "light" models are faster, while larger models will perform better. [Fine-tuned models](https://docs.cohere.com/docs/fine-tuning) can also be supplied with their full ID.
Model *string `json:"model,omitempty" url:"model,omitempty"`
// The ID of a custom playground preset. You can create presets in the [playground](https://dashboard.cohere.ai/playground/classify?model=large). If you use a preset, all other parameters become optional, and any included parameters will override the preset's parameters.
Preset *string `json:"preset,omitempty" url:"preset,omitempty"`
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
Truncate *ClassifyRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"`
}
type DetokenizeRequest struct {
// The list of tokens to be detokenized.
Tokens []int `json:"tokens,omitempty" url:"tokens,omitempty"`
// An optional parameter to provide the model name. This will ensure that the detokenization is done by the tokenizer used by that model.
Model *string `json:"model,omitempty" url:"model,omitempty"`
}
type EmbedRequest struct {
// An array of strings for the model to embed. Maximum number of texts per call is `96`. We recommend reducing the length of each text to be under `512` tokens for optimal quality.
Texts []string `json:"texts,omitempty" url:"texts,omitempty"`
// Defaults to embed-english-v2.0
//
// The identifier of the model. Smaller "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
//
// Available models and corresponding embedding dimensions:
//
// * `embed-english-v3.0` 1024
// * `embed-multilingual-v3.0` 1024
// * `embed-english-light-v3.0` 384
// * `embed-multilingual-light-v3.0` 384
//
// * `embed-english-v2.0` 4096
// * `embed-english-light-v2.0` 1024
// * `embed-multilingual-v2.0` 768
Model *string `json:"model,omitempty" url:"model,omitempty"`
InputType *EmbedInputType `json:"input_type,omitempty" url:"input_type,omitempty"`
// Specifies the types of embeddings you want to get back. Not required and default is None, which returns the Embed Floats response type. Can be one or more of the following types.
//
// * `"float"`: Use this when you want to get back the default float embeddings. Valid for all models.
// * `"int8"`: Use this when you want to get back signed int8 embeddings. Valid for only v3 models.
// * `"uint8"`: Use this when you want to get back unsigned int8 embeddings. Valid for only v3 models.
// * `"binary"`: Use this when you want to get back signed binary embeddings. Valid for only v3 models.
// * `"ubinary"`: Use this when you want to get back unsigned binary embeddings. Valid for only v3 models.
EmbeddingTypes []EmbedRequestEmbeddingTypesItem `json:"embedding_types,omitempty" url:"embedding_types,omitempty"`
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
//
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
//
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
Truncate *EmbedRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"`
}
type GenerateRequest struct {
// The input text that serves as the starting point for generating the response.
// Note: The prompt will be pre-processed and modified before reaching the model.
Prompt string `json:"prompt" url:"prompt"`
// The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental).
// Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
Model *string `json:"model,omitempty" url:"model,omitempty"`
// The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`.
NumGenerations *int `json:"num_generations,omitempty" url:"num_generations,omitempty"`
// The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
//
// This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details.
//
// Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt.
MaxTokens *int `json:"max_tokens,omitempty" url:"max_tokens,omitempty"`
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
//
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
//
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
Truncate *GenerateRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"`
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
// Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"`
// If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
Seed *float64 `json:"seed,omitempty" url:"seed,omitempty"`
// Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
// When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
Preset *string `json:"preset,omitempty" url:"preset,omitempty"`
// The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text.
EndSequences []string `json:"end_sequences,omitempty" url:"end_sequences,omitempty"`
// The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text.
StopSequences []string `json:"stop_sequences,omitempty" url:"stop_sequences,omitempty"`
// Ensures only the top `k` most likely tokens are considered for generation at each step.
// Defaults to `0`, min value of `0`, max value of `500`.
K *int `json:"k,omitempty" url:"k,omitempty"`
// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
// Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
P *float64 `json:"p,omitempty" url:"p,omitempty"`
// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
//
// Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models.
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"frequency_penalty,omitempty"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
//
// Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
//
// Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models.
PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"presence_penalty,omitempty"`
// One of `GENERATION|ALL|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`.
//
// If `GENERATION` is selected, the token likelihoods will only be provided for generated text.
//
// If `ALL` is selected, the token likelihoods will be provided both for the prompt and the generated text.
ReturnLikelihoods *GenerateRequestReturnLikelihoods `json:"return_likelihoods,omitempty" url:"return_likelihoods,omitempty"`
// When enabled, the user's prompt will be sent to the model without any pre-processing.
RawPrompting *bool `json:"raw_prompting,omitempty" url:"raw_prompting,omitempty"`
stream bool
}
func (g *GenerateRequest) Stream() bool {
return g.stream
}
func (g *GenerateRequest) UnmarshalJSON(data []byte) error {
type unmarshaler GenerateRequest
var body unmarshaler
if err := json.Unmarshal(data, &body); err != nil {
return err
}
*g = GenerateRequest(body)
g.stream = false
return nil
}
func (g *GenerateRequest) MarshalJSON() ([]byte, error) {
type embed GenerateRequest
var marshaler = struct {
embed
Stream bool `json:"stream"`
}{
embed: embed(*g),
Stream: false,
}
return json.Marshal(marshaler)
}
type GenerateStreamRequest struct {
// The input text that serves as the starting point for generating the response.
// Note: The prompt will be pre-processed and modified before reaching the model.
Prompt string `json:"prompt" url:"prompt"`
// The identifier of the model to generate with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental).
// Smaller, "light" models are faster, while larger models will perform better. [Custom models](/docs/training-custom-models) can also be supplied with their full ID.
Model *string `json:"model,omitempty" url:"model,omitempty"`
// The maximum number of generations that will be returned. Defaults to `1`, min value of `1`, max value of `5`.
NumGenerations *int `json:"num_generations,omitempty" url:"num_generations,omitempty"`
// The maximum number of tokens the model will generate as part of the response. Note: Setting a low value may result in incomplete generations.
//
// This parameter is off by default, and if it's not specified, the model will continue generating until it emits an EOS completion token. See [BPE Tokens](/bpe-tokens-wiki) for more details.
//
// Can only be set to `0` if `return_likelihoods` is set to `ALL` to get the likelihood of the prompt.
MaxTokens *int `json:"max_tokens,omitempty" url:"max_tokens,omitempty"`
// One of `NONE|START|END` to specify how the API will handle inputs longer than the maximum token length.
//
// Passing `START` will discard the start of the input. `END` will discard the end of the input. In both cases, input is discarded until the remaining input is exactly the maximum input token length for the model.
//
// If `NONE` is selected, when the input exceeds the maximum input token length an error will be returned.
Truncate *GenerateStreamRequestTruncate `json:"truncate,omitempty" url:"truncate,omitempty"`
// A non-negative float that tunes the degree of randomness in generation. Lower temperatures mean less random generations. See [Temperature](/temperature-wiki) for more details.
// Defaults to `0.75`, min value of `0.0`, max value of `5.0`.
Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"`
// If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
Seed *float64 `json:"seed,omitempty" url:"seed,omitempty"`
// Identifier of a custom preset. A preset is a combination of parameters, such as prompt, temperature etc. You can create presets in the [playground](https://dashboard.cohere.ai/playground/generate).
// When a preset is specified, the `prompt` parameter becomes optional, and any included parameters will override the preset's parameters.
Preset *string `json:"preset,omitempty" url:"preset,omitempty"`
// The generated text will be cut at the beginning of the earliest occurrence of an end sequence. The sequence will be excluded from the text.
EndSequences []string `json:"end_sequences,omitempty" url:"end_sequences,omitempty"`
// The generated text will be cut at the end of the earliest occurrence of a stop sequence. The sequence will be included the text.
StopSequences []string `json:"stop_sequences,omitempty" url:"stop_sequences,omitempty"`
// Ensures only the top `k` most likely tokens are considered for generation at each step.
// Defaults to `0`, min value of `0`, max value of `500`.
K *int `json:"k,omitempty" url:"k,omitempty"`
// Ensures that only the most likely tokens, with total probability mass of `p`, are considered for generation at each step. If both `k` and `p` are enabled, `p` acts after `k`.
// Defaults to `0.75`. min value of `0.01`, max value of `0.99`.
P *float64 `json:"p,omitempty" url:"p,omitempty"`
// Used to reduce repetitiveness of generated tokens. The higher the value, the stronger a penalty is applied to previously present tokens, proportional to how many times they have already appeared in the prompt or prior generation.
//
// Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models.
FrequencyPenalty *float64 `json:"frequency_penalty,omitempty" url:"frequency_penalty,omitempty"`
// Defaults to `0.0`, min value of `0.0`, max value of `1.0`.
//
// Can be used to reduce repetitiveness of generated tokens. Similar to `frequency_penalty`, except that this penalty is applied equally to all tokens that have already appeared, regardless of their exact frequencies.
//
// Using `frequency_penalty` in combination with `presence_penalty` is not supported on newer models.
PresencePenalty *float64 `json:"presence_penalty,omitempty" url:"presence_penalty,omitempty"`
// One of `GENERATION|ALL|NONE` to specify how and if the token likelihoods are returned with the response. Defaults to `NONE`.
//
// If `GENERATION` is selected, the token likelihoods will only be provided for generated text.
//
// If `ALL` is selected, the token likelihoods will be provided both for the prompt and the generated text.
ReturnLikelihoods *GenerateStreamRequestReturnLikelihoods `json:"return_likelihoods,omitempty" url:"return_likelihoods,omitempty"`
// When enabled, the user's prompt will be sent to the model without any pre-processing.
RawPrompting *bool `json:"raw_prompting,omitempty" url:"raw_prompting,omitempty"`
stream bool
}
func (g *GenerateStreamRequest) Stream() bool {
return g.stream
}
func (g *GenerateStreamRequest) UnmarshalJSON(data []byte) error {
type unmarshaler GenerateStreamRequest
var body unmarshaler
if err := json.Unmarshal(data, &body); err != nil {
return err
}
*g = GenerateStreamRequest(body)
g.stream = true
return nil
}
func (g *GenerateStreamRequest) MarshalJSON() ([]byte, error) {
type embed GenerateStreamRequest
var marshaler = struct {
embed
Stream bool `json:"stream"`
}{
embed: embed(*g),
Stream: true,
}
return json.Marshal(marshaler)
}
type RerankRequest struct {
// The identifier of the model to use, one of : `rerank-english-v2.0`, `rerank-multilingual-v2.0`
Model *string `json:"model,omitempty" url:"model,omitempty"`
// The search query
Query string `json:"query" url:"query"`
// A list of document objects or strings to rerank.
// If a document is provided the text fields is required and all other fields will be preserved in the response.
//
// The total max chunks (length of documents * max_chunks_per_doc) must be less than 10000.
//
// We recommend a maximum of 1,000 documents for optimal endpoint performance.
Documents []*RerankRequestDocumentsItem `json:"documents,omitempty" url:"documents,omitempty"`
// The number of most relevant documents or indices to return, defaults to the length of the documents
TopN *int `json:"top_n,omitempty" url:"top_n,omitempty"`
// - If false, returns results without the doc text - the api will return a list of {index, relevance score} where index is inferred from the list passed into the request.
// - If true, returns results with the doc text passed in - the api will return an ordered list of {index, text, relevance score} where index + text refers to the list passed into the request.
ReturnDocuments *bool `json:"return_documents,omitempty" url:"return_documents,omitempty"`
// The maximum number of chunks to produce internally from a document
MaxChunksPerDoc *int `json:"max_chunks_per_doc,omitempty" url:"max_chunks_per_doc,omitempty"`
}
type SummarizeRequest struct {
// The text to generate a summary for. Can be up to 100,000 characters long. Currently the only supported language is English.
Text string `json:"text" url:"text"`
// One of `short`, `medium`, `long`, or `auto` defaults to `auto`. Indicates the approximate length of the summary. If `auto` is selected, the best option will be picked based on the input text.
Length *SummarizeRequestLength `json:"length,omitempty" url:"length,omitempty"`
// One of `paragraph`, `bullets`, or `auto`, defaults to `auto`. Indicates the style in which the summary will be delivered - in a free form paragraph or in bullet points. If `auto` is selected, the best option will be picked based on the input text.
Format *SummarizeRequestFormat `json:"format,omitempty" url:"format,omitempty"`
// The identifier of the model to generate the summary with. Currently available models are `command` (default), `command-nightly` (experimental), `command-light`, and `command-light-nightly` (experimental). Smaller, "light" models are faster, while larger models will perform better.
Model *string `json:"model,omitempty" url:"model,omitempty"`
// One of `low`, `medium`, `high`, or `auto`, defaults to `auto`. Controls how close to the original text the summary is. `high` extractiveness summaries will lean towards reusing sentences verbatim, while `low` extractiveness summaries will tend to paraphrase more. If `auto` is selected, the best option will be picked based on the input text.
Extractiveness *SummarizeRequestExtractiveness `json:"extractiveness,omitempty" url:"extractiveness,omitempty"`
// Ranges from 0 to 5. Controls the randomness of the output. Lower values tend to generate more “predictable” output, while higher values tend to generate more “creative” output. The sweet spot is typically between 0 and 1.
Temperature *float64 `json:"temperature,omitempty" url:"temperature,omitempty"`
// A free-form instruction for modifying how the summaries get generated. Should complete the sentence "Generate a summary _". Eg. "focusing on the next steps" or "written by Yoda"
AdditionalCommand *string `json:"additional_command,omitempty" url:"additional_command,omitempty"`
}
type TokenizeRequest struct {
// The string to be tokenized, the minimum text length is 1 character, and the maximum text length is 65536 characters.
Text string `json:"text" url:"text"`
// An optional parameter to provide the model name. This will ensure that the tokenization uses the tokenizer used by that model.
Model *string `json:"model,omitempty" url:"model,omitempty"`
}
type ApiMeta struct {
ApiVersion *ApiMetaApiVersion `json:"api_version,omitempty" url:"api_version,omitempty"`
BilledUnits *ApiMetaBilledUnits `json:"billed_units,omitempty" url:"billed_units,omitempty"`
Warnings []string `json:"warnings,omitempty" url:"warnings,omitempty"`
_rawJSON json.RawMessage
}
func (a *ApiMeta) UnmarshalJSON(data []byte) error {
type unmarshaler ApiMeta
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*a = ApiMeta(value)
a._rawJSON = json.RawMessage(data)
return nil
}
func (a *ApiMeta) String() string {
if len(a._rawJSON) > 0 {
if value, err := core.StringifyJSON(a._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(a); err == nil {
return value
}
return fmt.Sprintf("%#v", a)
}
type ApiMetaApiVersion struct {
Version string `json:"version" url:"version"`
IsDeprecated *bool `json:"is_deprecated,omitempty" url:"is_deprecated,omitempty"`
IsExperimental *bool `json:"is_experimental,omitempty" url:"is_experimental,omitempty"`
_rawJSON json.RawMessage
}
func (a *ApiMetaApiVersion) UnmarshalJSON(data []byte) error {
type unmarshaler ApiMetaApiVersion
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*a = ApiMetaApiVersion(value)
a._rawJSON = json.RawMessage(data)
return nil
}
func (a *ApiMetaApiVersion) String() string {
if len(a._rawJSON) > 0 {
if value, err := core.StringifyJSON(a._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(a); err == nil {
return value
}
return fmt.Sprintf("%#v", a)
}
type ApiMetaBilledUnits struct {
// The number of billed input tokens.
InputTokens *float64 `json:"input_tokens,omitempty" url:"input_tokens,omitempty"`
// The number of billed output tokens.
OutputTokens *float64 `json:"output_tokens,omitempty" url:"output_tokens,omitempty"`
// The number of billed search units.
SearchUnits *float64 `json:"search_units,omitempty" url:"search_units,omitempty"`
// The number of billed classifications units.
Classifications *float64 `json:"classifications,omitempty" url:"classifications,omitempty"`
_rawJSON json.RawMessage
}
func (a *ApiMetaBilledUnits) UnmarshalJSON(data []byte) error {
type unmarshaler ApiMetaBilledUnits
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*a = ApiMetaBilledUnits(value)
a._rawJSON = json.RawMessage(data)
return nil
}
func (a *ApiMetaBilledUnits) String() string {
if len(a._rawJSON) > 0 {
if value, err := core.StringifyJSON(a._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(a); err == nil {
return value
}
return fmt.Sprintf("%#v", a)
}
// The token_type specifies the way the token is passed in the Authorization header. Valid values are "bearer", "basic", and "noscheme".
type AuthTokenType string
const (
AuthTokenTypeBearer AuthTokenType = "bearer"
AuthTokenTypeBasic AuthTokenType = "basic"
AuthTokenTypeNoscheme AuthTokenType = "noscheme"
)
func NewAuthTokenTypeFromString(s string) (AuthTokenType, error) {
switch s {
case "bearer":
return AuthTokenTypeBearer, nil
case "basic":
return AuthTokenTypeBasic, nil
case "noscheme":
return AuthTokenTypeNoscheme, nil
}
var t AuthTokenType
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (a AuthTokenType) Ptr() *AuthTokenType {
return &a
}
// A section of the generated reply which cites external knowledge.
type ChatCitation struct {
// The index of text that the citation starts at, counting from zero. For example, a generation of `Hello, world!` with a citation on `world` would have a start value of `7`. This is because the citation starts at `w`, which is the seventh character.
Start int `json:"start" url:"start"`
// The index of text that the citation ends after, counting from zero. For example, a generation of `Hello, world!` with a citation on `world` would have an end value of `11`. This is because the citation ends after `d`, which is the eleventh character.
End int `json:"end" url:"end"`
// The text of the citation. For example, a generation of `Hello, world!` with a citation of `world` would have a text value of `world`.
Text string `json:"text" url:"text"`
// Identifiers of documents cited by this section of the generated reply.
DocumentIds []string `json:"document_ids,omitempty" url:"document_ids,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatCitation) UnmarshalJSON(data []byte) error {
type unmarshaler ChatCitation
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatCitation(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatCitation) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
type ChatCitationGenerationEvent struct {
// Citations for the generated reply.
Citations []*ChatCitation `json:"citations,omitempty" url:"citations,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatCitationGenerationEvent) UnmarshalJSON(data []byte) error {
type unmarshaler ChatCitationGenerationEvent
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatCitationGenerationEvent(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatCitationGenerationEvent) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// The connector used for fetching documents.
type ChatConnector struct {
// The identifier of the connector.
Id string `json:"id" url:"id"`
// When specified, this user access token will be passed to the connector in the Authorization header instead of the Cohere generated one.
UserAccessToken *string `json:"user_access_token,omitempty" url:"user_access_token,omitempty"`
// Defaults to `false`.
//
// When `true`, the request will continue if this connector returned an error.
ContinueOnFailure *bool `json:"continue_on_failure,omitempty" url:"continue_on_failure,omitempty"`
// Provides the connector with different settings at request time. The key/value pairs of this object are specific to each connector.
//
// For example, the connector `web-search` supports the `site` option, which limits search results to the specified domain.
Options map[string]interface{} `json:"options,omitempty" url:"options,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatConnector) UnmarshalJSON(data []byte) error {
type unmarshaler ChatConnector
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatConnector(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatConnector) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
type ChatDataMetrics struct {
// The sum of all turns of valid train examples.
NumTrainTurns *string `json:"numTrainTurns,omitempty" url:"numTrainTurns,omitempty"`
// The sum of all turns of valid eval examples.
NumEvalTurns *string `json:"numEvalTurns,omitempty" url:"numEvalTurns,omitempty"`
// The preamble of this dataset.
Preamble *string `json:"preamble,omitempty" url:"preamble,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatDataMetrics) UnmarshalJSON(data []byte) error {
type unmarshaler ChatDataMetrics
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatDataMetrics(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatDataMetrics) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// Relevant information that could be used by the model to generate a more accurate reply.
// The contents of each document are generally short (under 300 words), and are passed in the form of a
// dictionary of strings. Some suggested keys are "text", "author", "date". Both the key name and the value will be
// passed to the model.
type ChatDocument = map[string]string
// Represents a single message in the chat history, excluding the current user turn. It has two properties: `role` and `message`. The `role` identifies the sender (`CHATBOT`, `SYSTEM`, or `USER`), while the `message` contains the text content.
//
// The chat_history parameter should not be used for `SYSTEM` messages in most cases. Instead, to add a `SYSTEM` role message at the beginning of a conversation, the `preamble` parameter should be used.
type ChatMessage struct {
// One of `CHATBOT`, `SYSTEM`, or `USER` to identify who the message is coming from.
Role ChatMessageRole `json:"role,omitempty" url:"role,omitempty"`
// Contents of the chat message.
Message string `json:"message" url:"message"`
_rawJSON json.RawMessage
}
func (c *ChatMessage) UnmarshalJSON(data []byte) error {
type unmarshaler ChatMessage
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatMessage(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatMessage) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// One of `CHATBOT`, `SYSTEM`, or `USER` to identify who the message is coming from.
type ChatMessageRole string
const (
ChatMessageRoleChatbot ChatMessageRole = "CHATBOT"
ChatMessageRoleSystem ChatMessageRole = "SYSTEM"
ChatMessageRoleUser ChatMessageRole = "USER"
)
func NewChatMessageRoleFromString(s string) (ChatMessageRole, error) {
switch s {
case "CHATBOT":
return ChatMessageRoleChatbot, nil
case "SYSTEM":
return ChatMessageRoleSystem, nil
case "USER":
return ChatMessageRoleUser, nil
}
var t ChatMessageRole
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (c ChatMessageRole) Ptr() *ChatMessageRole {
return &c
}
// Defaults to `"accurate"`.
//
// Dictates the approach taken to generating citations as part of the RAG flow by allowing the user to specify whether they want `"accurate"` results or `"fast"` results.
type ChatRequestCitationQuality string
const (
ChatRequestCitationQualityFast ChatRequestCitationQuality = "fast"
ChatRequestCitationQualityAccurate ChatRequestCitationQuality = "accurate"
)
func NewChatRequestCitationQualityFromString(s string) (ChatRequestCitationQuality, error) {
switch s {
case "fast":
return ChatRequestCitationQualityFast, nil
case "accurate":
return ChatRequestCitationQualityAccurate, nil
}
var t ChatRequestCitationQuality
return "", fmt.Errorf("%s is not a valid %T", s, t)
}
func (c ChatRequestCitationQuality) Ptr() *ChatRequestCitationQuality {
return &c
}
// (internal) Sets inference and model options for RAG search query and tool use generations. Defaults are used when options are not specified here, meaning that other parameters outside of connectors_search_options are ignored (such as model= or temperature=).
type ChatRequestConnectorsSearchOptions struct {
Model interface{} `json:"model,omitempty" url:"model,omitempty"`
Temperature interface{} `json:"temperature,omitempty" url:"temperature,omitempty"`
MaxTokens interface{} `json:"max_tokens,omitempty" url:"max_tokens,omitempty"`
Preamble interface{} `json:"preamble,omitempty" url:"preamble,omitempty"`
// If specified, the backend will make a best effort to sample tokens deterministically, such that repeated requests with the same seed and parameters should return the same result. However, determinsim cannot be totally guaranteed.
Seed *float64 `json:"seed,omitempty" url:"seed,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatRequestConnectorsSearchOptions) UnmarshalJSON(data []byte) error {
type unmarshaler ChatRequestConnectorsSearchOptions
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatRequestConnectorsSearchOptions(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatRequestConnectorsSearchOptions) String() string {
if len(c._rawJSON) > 0 {
if value, err := core.StringifyJSON(c._rawJSON); err == nil {
return value
}
}
if value, err := core.StringifyJSON(c); err == nil {
return value
}
return fmt.Sprintf("%#v", c)
}
// (internal) Overrides specified parts of the default Chat or RAG preamble. It is recommended that these options only be used in specific scenarios where the defaults are not adequate.
type ChatRequestPromptOverride struct {
Preamble interface{} `json:"preamble,omitempty" url:"preamble,omitempty"`
TaskDescription interface{} `json:"task_description,omitempty" url:"task_description,omitempty"`
StyleGuide interface{} `json:"style_guide,omitempty" url:"style_guide,omitempty"`
_rawJSON json.RawMessage
}
func (c *ChatRequestPromptOverride) UnmarshalJSON(data []byte) error {
type unmarshaler ChatRequestPromptOverride
var value unmarshaler
if err := json.Unmarshal(data, &value); err != nil {
return err
}
*c = ChatRequestPromptOverride(value)
c._rawJSON = json.RawMessage(data)
return nil
}
func (c *ChatRequestPromptOverride) String() string {
if len(c._rawJSON) > 0 {