forked from All-Hands-AI/OpenHands
-
Notifications
You must be signed in to change notification settings - Fork 0
/
config.template.toml
243 lines (175 loc) Β· 6.2 KB
/
config.template.toml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
###################### OpenHands Configuration Example ######################
#
# All settings have default values, so you only need to uncomment and
# modify what you want to change
# The fields within each section are sorted in alphabetical order.
#
##############################################################################
#################################### Core ####################################
# General core configurations
##############################################################################
[core]
# API key for E2B
#e2b_api_key = ""
# Base path for the workspace
workspace_base = "./workspace"
# Cache directory path
#cache_dir = "/tmp/cache"
# Debugging enabled
#debug = false
# Disable color in terminal output
#disable_color = false
# Enable saving and restoring the session when run from CLI
#enable_cli_session = false
# File store path
#file_store_path = "/tmp/file_store"
# File store type
#file_store = "memory"
# List of allowed file extensions for uploads
#file_uploads_allowed_extensions = [".*"]
# Maximum file size for uploads, in megabytes
#file_uploads_max_file_size_mb = 0
# Maximum budget per task, 0.0 means no limit
#max_budget_per_task = 0.0
# Maximum number of iterations
#max_iterations = 100
# Path to mount the workspace in the sandbox
#workspace_mount_path_in_sandbox = "/workspace"
# Path to mount the workspace
#workspace_mount_path = ""
# Path to rewrite the workspace mount path to
#workspace_mount_rewrite = ""
# Run as openhands
#run_as_openhands = true
# Runtime environment
#runtime = "eventstream"
# Name of the default agent
#default_agent = "CodeActAgent"
# JWT secret for authentication
#jwt_secret = ""
# Restrict file types for file uploads
#file_uploads_restrict_file_types = false
# List of allowed file extensions for uploads
#file_uploads_allowed_extensions = [".*"]
#################################### LLM #####################################
# Configuration for LLM models (group name starts with 'llm')
# use 'llm' for the default LLM config
##############################################################################
[llm]
# AWS access key ID
#aws_access_key_id = ""
# AWS region name
#aws_region_name = ""
# AWS secret access key
#aws_secret_access_key = ""
# API key to use
api_key = "your-api-key"
# API base URL
#base_url = ""
# API version
#api_version = ""
# Cost per input token
#input_cost_per_token = 0.0
# Cost per output token
#output_cost_per_token = 0.0
# Custom LLM provider
#custom_llm_provider = ""
# Embedding API base URL
#embedding_base_url = ""
# Embedding deployment name
#embedding_deployment_name = ""
# Embedding model to use
embedding_model = "local"
# Maximum number of characters in an observation's content
#max_message_chars = 10000
# Maximum number of input tokens
#max_input_tokens = 0
# Maximum number of output tokens
#max_output_tokens = 0
# Model to use
model = "gpt-4o"
# Number of retries to attempt when an operation fails with the LLM.
# Increase this value to allow more attempts before giving up
#num_retries = 8
# Maximum wait time (in seconds) between retry attempts
# This caps the exponential backoff to prevent excessively long
#retry_max_wait = 120
# Minimum wait time (in seconds) between retry attempts
# This sets the initial delay before the first retry
#retry_min_wait = 15
# Multiplier for exponential backoff calculation
# The wait time increases by this factor after each failed attempt
# A value of 2.0 means each retry waits twice as long as the previous one
#retry_multiplier = 2.0
# Drop any unmapped (unsupported) params without causing an exception
#drop_params = false
# Using the prompt caching feature if provided by the LLM and supported
#caching_prompt = true
# Base URL for the OLLAMA API
#ollama_base_url = ""
# Temperature for the API
#temperature = 0.0
# Timeout for the API
#timeout = 0
# Top p for the API
#top_p = 1.0
# If model is vision capable, this option allows to disable image processing (useful for cost reduction).
#disable_vision = true
[llm.gpt4o-mini]
# API key to use
api_key = "your-api-key"
# Model to use
model = "gpt-4o-mini"
#################################### Agent ###################################
# Configuration for agents (group name starts with 'agent')
# Use 'agent' for the default agent config
# otherwise, group name must be `agent.<agent_name>` (case-sensitive), e.g.
# agent.CodeActAgent
##############################################################################
[agent]
# Name of the micro agent to use for this agent
#micro_agent_name = ""
# Memory enabled
#memory_enabled = false
# Memory maximum threads
#memory_max_threads = 3
# LLM config group to use
#llm_config = 'your-llm-config-group'
[agent.RepoExplorerAgent]
# Example: use a cheaper model for RepoExplorerAgent to reduce cost, especially
# useful when an agent doesn't demand high quality but uses a lot of tokens
llm_config = 'gpt3'
#################################### Sandbox ###################################
# Configuration for the sandbox
##############################################################################
[sandbox]
# Sandbox timeout in seconds
#timeout = 120
# Sandbox user ID
#user_id = 1000
# Container image to use for the sandbox
#base_container_image = "nikolaik/python-nodejs:python3.12-nodejs22"
# Use host network
#use_host_network = false
# Enable auto linting after editing
#enable_auto_lint = false
# Whether to initialize plugins
#initialize_plugins = true
# Extra dependencies to install in the runtime image
#runtime_extra_deps = ""
# Environment variables to set at the launch of the runtime
#runtime_startup_env_vars = {}
# BrowserGym environment to use for evaluation
#browsergym_eval_env = ""
#################################### Security ###################################
# Configuration for security features
##############################################################################
[security]
# Enable confirmation mode
#confirmation_mode = false
# The security analyzer to use
#security_analyzer = ""
#################################### Eval ####################################
# Configuration for the evaluation, please refer to the specific evaluation
# plugin for the available options
##############################################################################