From f15f970032a6b968d2abcd754e7cbf22a61cd5c8 Mon Sep 17 00:00:00 2001 From: Tyler Thomas Date: Thu, 22 Feb 2024 23:27:23 -0700 Subject: [PATCH] initial docs for `generate_content` --- src/GoogleGenAI.jl | 25 ++++++++++++++++--------- 1 file changed, 16 insertions(+), 9 deletions(-) diff --git a/src/GoogleGenAI.jl b/src/GoogleGenAI.jl index 6767578..280a5e6 100644 --- a/src/GoogleGenAI.jl +++ b/src/GoogleGenAI.jl @@ -64,16 +64,23 @@ function _parse_response(response::HTTP.Messages.Response) ) end -#TODO: Add Documentation and tests (this is from the python api) -# temperature: The temperature for randomness in generation. Defaults to None. -# candidate_count: The number of candidates to consider. Defaults to None. (Only one can be specified right now) -# max_output_tokens: The maximum number of output tokens. Defaults to None. -# safety_settings: Safety settings for generated text. Defaults to None. -# stop_sequences: Stop sequences to halt text generation. Can be a string -# or iterable of strings. Defaults to None. - """ - +generate_content(provider::GoogleProvider, model_name::String, input::String; kwargs...) +generate_content(api_key::String, model_name::String, input::String; kwargs...) + +Generate text using the specified model. + +# Arguments +- `provider::GoogleProvider`: The provider to use for the request. +- `model_name::String`: The model to use for the request. +- `input::String`: The input prompt to use for the request. + +# Keyword Arguments +- `temperature::Float64`: The temperature for randomness in generation. +- `candidate_count::Int`: The number of candidates to consider. (Only one can be specified right now) +- `max_output_tokens::Int`: The maximum number of output tokens. +- `stop_sequences::Vector{String}`: Stop sequences to halt text generation. +- `safety_settings::Dict`: Safety settings for generated text. """ function generate_content( provider::GoogleProvider, model_name::String, input::String; kwargs...