From d35e4eaeabd336c5bf9143efbaf0e9c7d0df6292 Mon Sep 17 00:00:00 2001 From: <> Date: Mon, 15 Apr 2024 12:04:58 +0000 Subject: [PATCH] Deployed 9103d06 with MkDocs version: 1.5.3 --- .nojekyll | 0 404.html | 1794 +++++ api/guide/index.html | 4272 +++++++++++ api/index.html | 1859 +++++ api/json_schema/index.html | 2758 +++++++ api/models/index.html | 4841 ++++++++++++ api/parsing/index.html | 3281 ++++++++ api/prompts/index.html | 3296 ++++++++ api/regex/index.html | 2100 ++++++ api/samplers/index.html | 3616 +++++++++ assets/_mkdocstrings.css | 114 + assets/images/dottxt.png | Bin 0 -> 42419 bytes assets/images/favicon.png | Bin 0 -> 1870 bytes assets/images/logo.png | Bin 0 -> 372647 bytes assets/images/normal_computing.jpg | Bin 0 -> 64918 bytes assets/images/social/api/guide.png | Bin 0 -> 26940 bytes assets/images/social/api/index.png | Bin 0 -> 26940 bytes assets/images/social/api/json_schema.png | Bin 0 -> 26940 bytes assets/images/social/api/models.png | Bin 0 -> 26940 bytes assets/images/social/api/parsing.png | Bin 0 -> 26940 bytes assets/images/social/api/prompts.png | Bin 0 -> 26940 bytes assets/images/social/api/regex.png | Bin 0 -> 26940 bytes assets/images/social/api/samplers.png | Bin 0 -> 26940 bytes assets/images/social/blog/archive/2024.png | Bin 0 -> 26940 bytes .../images/social/blog/category/roadmap.png | Bin 0 -> 26940 bytes assets/images/social/blog/index.png | Bin 0 -> 26940 bytes .../images/social/blog/posts/roadmap-2024.png | Bin 0 -> 26940 bytes assets/images/social/community/contribute.png | Bin 0 -> 26940 bytes assets/images/social/community/feedback.png | Bin 0 -> 26940 bytes assets/images/social/community/index.png | Bin 0 -> 26940 bytes .../social/cookbook/chain_of_density.png | Bin 0 -> 26940 bytes .../images/social/cookbook/classification.png | Bin 0 -> 26940 bytes .../social/cookbook/dating_profiles.png | Bin 0 -> 26940 bytes assets/images/social/cookbook/extraction.png | Bin 0 -> 26940 bytes assets/images/social/cookbook/index.png | Bin 0 -> 26940 bytes .../social/cookbook/models_playing_chess.png | Bin 0 -> 26940 bytes assets/images/social/index.png | Bin 0 -> 26940 bytes assets/images/social/installation.png | Bin 0 -> 26940 bytes assets/images/social/licence.png | Bin 0 -> 26940 bytes assets/images/social/quickstart.png | Bin 0 -> 26940 bytes assets/images/social/reference/cfg.png | Bin 0 -> 26940 bytes assets/images/social/reference/choices.png | Bin 0 -> 26940 bytes .../social/reference/custom_fsm_ops.png | Bin 0 -> 26940 bytes assets/images/social/reference/functions.png | Bin 0 -> 26940 bytes assets/images/social/reference/index.png | Bin 0 -> 26940 bytes assets/images/social/reference/json.png | Bin 0 -> 26940 bytes assets/images/social/reference/json_mode.png | Bin 0 -> 26940 bytes .../social/reference/models/exllamav2.png | Bin 0 -> 26940 bytes .../social/reference/models/llamacpp.png | Bin 0 -> 26940 bytes .../images/social/reference/models/mamba.png | Bin 0 -> 26940 bytes .../images/social/reference/models/openai.png | Bin 0 -> 26940 bytes .../social/reference/models/transformers.png | Bin 0 -> 26940 bytes .../images/social/reference/models/vllm.png | Bin 0 -> 26940 bytes assets/images/social/reference/prompting.png | Bin 0 -> 26940 bytes assets/images/social/reference/regex.png | Bin 0 -> 26940 bytes assets/images/social/reference/samplers.png | Bin 0 -> 26940 bytes assets/images/social/reference/serve/vllm.png | Bin 0 -> 26940 bytes assets/images/social/reference/text.png | Bin 0 -> 26940 bytes assets/images/social/reference/types.png | Bin 0 -> 26940 bytes assets/images/social/welcome.png | Bin 0 -> 26940 bytes assets/javascripts/bundle.1e8ae164.min.js | 29 + assets/javascripts/bundle.1e8ae164.min.js.map | 7 + assets/javascripts/lunr/min/lunr.ar.min.js | 1 + assets/javascripts/lunr/min/lunr.da.min.js | 18 + assets/javascripts/lunr/min/lunr.de.min.js | 18 + assets/javascripts/lunr/min/lunr.du.min.js | 18 + assets/javascripts/lunr/min/lunr.el.min.js | 1 + assets/javascripts/lunr/min/lunr.es.min.js | 18 + assets/javascripts/lunr/min/lunr.fi.min.js | 18 + assets/javascripts/lunr/min/lunr.fr.min.js | 18 + assets/javascripts/lunr/min/lunr.he.min.js | 1 + assets/javascripts/lunr/min/lunr.hi.min.js | 1 + assets/javascripts/lunr/min/lunr.hu.min.js | 18 + assets/javascripts/lunr/min/lunr.hy.min.js | 1 + assets/javascripts/lunr/min/lunr.it.min.js | 18 + assets/javascripts/lunr/min/lunr.ja.min.js | 1 + assets/javascripts/lunr/min/lunr.jp.min.js | 1 + assets/javascripts/lunr/min/lunr.kn.min.js | 1 + assets/javascripts/lunr/min/lunr.ko.min.js | 1 + assets/javascripts/lunr/min/lunr.multi.min.js | 1 + assets/javascripts/lunr/min/lunr.nl.min.js | 18 + assets/javascripts/lunr/min/lunr.no.min.js | 18 + assets/javascripts/lunr/min/lunr.pt.min.js | 18 + assets/javascripts/lunr/min/lunr.ro.min.js | 18 + assets/javascripts/lunr/min/lunr.ru.min.js | 18 + assets/javascripts/lunr/min/lunr.sa.min.js | 1 + .../lunr/min/lunr.stemmer.support.min.js | 1 + assets/javascripts/lunr/min/lunr.sv.min.js | 18 + assets/javascripts/lunr/min/lunr.ta.min.js | 1 + assets/javascripts/lunr/min/lunr.te.min.js | 1 + assets/javascripts/lunr/min/lunr.th.min.js | 1 + assets/javascripts/lunr/min/lunr.tr.min.js | 18 + assets/javascripts/lunr/min/lunr.vi.min.js | 1 + assets/javascripts/lunr/min/lunr.zh.min.js | 1 + assets/javascripts/lunr/tinyseg.js | 206 + assets/javascripts/lunr/wordcut.js | 6708 +++++++++++++++++ .../workers/search.b8dbb3d2.min.js | 42 + .../workers/search.b8dbb3d2.min.js.map | 7 + assets/stylesheets/main.bcfcd587.min.css | 1 + assets/stylesheets/main.bcfcd587.min.css.map | 1 + assets/stylesheets/palette.06af60db.min.css | 1 + .../stylesheets/palette.06af60db.min.css.map | 1 + blog/2024/01/10/roadmap-for-2024/index.html | 2111 ++++++ blog/archive/2024/index.html | 1942 +++++ blog/assets/4000_stars.png | Bin 0 -> 57307 bytes blog/category/roadmap/index.html | 1942 +++++ blog/index.html | 1958 +++++ community/belonging.png | Bin 0 -> 142520 bytes community/contribute/index.html | 2077 +++++ community/feedback/index.html | 1930 +++++ community/index.html | 1880 +++++ cookbook/chain_of_density/index.html | 2044 +++++ cookbook/classification/index.html | 2017 +++++ cookbook/dating_profiles/index.html | 2190 ++++++ cookbook/extraction/index.html | 1940 +++++ cookbook/images/chain_of_density.png | Bin 0 -> 515603 bytes cookbook/index.html | 1866 +++++ cookbook/models_playing_chess/index.html | 2057 +++++ index.html | 1885 +++++ installation/index.html | 1961 +++++ licence/index.html | 1892 +++++ logos/amazon.png | Bin 0 -> 19489 bytes logos/apple.png | Bin 0 -> 21938 bytes logos/best_buy.png | Bin 0 -> 22328 bytes logos/canoe.png | Bin 0 -> 3719 bytes logos/cisco.png | Bin 0 -> 5784 bytes logos/dassault_systems.png | Bin 0 -> 21865 bytes logos/databricks.png | Bin 0 -> 59004 bytes logos/datadog.png | Bin 0 -> 32779 bytes logos/dbt_labs.png | Bin 0 -> 13418 bytes logos/gladia.jpg | Bin 0 -> 11198 bytes logos/harvard.png | Bin 0 -> 31910 bytes logos/hf.png | Bin 0 -> 91529 bytes logos/johns_hopkins.png | Bin 0 -> 76335 bytes logos/meta.png | Bin 0 -> 20763 bytes logos/mit.png | Bin 0 -> 1483 bytes logos/mount_sinai.png | Bin 0 -> 23749 bytes logos/nvidia.png | Bin 0 -> 15146 bytes logos/nyu.png | Bin 0 -> 30682 bytes logos/safran.png | Bin 0 -> 24435 bytes logos/salesforce.png | Bin 0 -> 20712 bytes logos/shopify.png | Bin 0 -> 8003 bytes logos/smithsonian.png | Bin 0 -> 35574 bytes logos/tinder.png | Bin 0 -> 28182 bytes logos/upenn.png | Bin 0 -> 97063 bytes objects.inv | Bin 0 -> 838 bytes overrides/index.html | 11 + overrides/main.html | 22 + quickstart/index.html | 2474 ++++++ reference/cfg/index.html | 2134 ++++++ reference/choices/index.html | 1892 +++++ reference/custom_fsm_ops/index.html | 1995 +++++ reference/functions/index.html | 1881 +++++ reference/index.html | 1897 +++++ reference/json/index.html | 2055 +++++ reference/json_mode/index.html | 1894 +++++ reference/models/exllamav2/index.html | 1882 +++++ reference/models/llamacpp/index.html | 2455 ++++++ reference/models/mamba/index.html | 1882 +++++ reference/models/openai/index.html | 2014 +++++ reference/models/transformers/index.html | 1900 +++++ reference/models/vllm/index.html | 2424 ++++++ reference/prompting/index.html | 2466 ++++++ reference/regex/index.html | 1903 +++++ reference/samplers/index.html | 2103 ++++++ reference/serve/vllm/index.html | 1993 +++++ reference/text/index.html | 2082 +++++ reference/types/index.html | 1899 +++++ search/search_index.json | 1 + sitemap.xml | 3 + sitemap.xml.gz | Bin 0 -> 127 bytes stylesheets/extra.css | 119 + welcome/index.html | 2134 ++++++ 173 files changed, 110428 insertions(+) create mode 100644 .nojekyll create mode 100644 404.html create mode 100644 api/guide/index.html create mode 100644 api/index.html create mode 100644 api/json_schema/index.html create mode 100644 api/models/index.html create mode 100644 api/parsing/index.html create mode 100644 api/prompts/index.html create mode 100644 api/regex/index.html create mode 100644 api/samplers/index.html create mode 100644 assets/_mkdocstrings.css create mode 100644 assets/images/dottxt.png create mode 100644 assets/images/favicon.png create mode 100644 assets/images/logo.png create mode 100644 assets/images/normal_computing.jpg create mode 100644 assets/images/social/api/guide.png create mode 100644 assets/images/social/api/index.png create mode 100644 assets/images/social/api/json_schema.png create mode 100644 assets/images/social/api/models.png create mode 100644 assets/images/social/api/parsing.png create mode 100644 assets/images/social/api/prompts.png create mode 100644 assets/images/social/api/regex.png create mode 100644 assets/images/social/api/samplers.png create mode 100644 assets/images/social/blog/archive/2024.png create mode 100644 assets/images/social/blog/category/roadmap.png create mode 100644 assets/images/social/blog/index.png create mode 100644 assets/images/social/blog/posts/roadmap-2024.png create mode 100644 assets/images/social/community/contribute.png create mode 100644 assets/images/social/community/feedback.png create mode 100644 assets/images/social/community/index.png create mode 100644 assets/images/social/cookbook/chain_of_density.png create mode 100644 assets/images/social/cookbook/classification.png create mode 100644 assets/images/social/cookbook/dating_profiles.png create mode 100644 assets/images/social/cookbook/extraction.png create mode 100644 assets/images/social/cookbook/index.png create mode 100644 assets/images/social/cookbook/models_playing_chess.png create mode 100644 assets/images/social/index.png create mode 100644 assets/images/social/installation.png create mode 100644 assets/images/social/licence.png create mode 100644 assets/images/social/quickstart.png create mode 100644 assets/images/social/reference/cfg.png create mode 100644 assets/images/social/reference/choices.png create mode 100644 assets/images/social/reference/custom_fsm_ops.png create mode 100644 assets/images/social/reference/functions.png create mode 100644 assets/images/social/reference/index.png create mode 100644 assets/images/social/reference/json.png create mode 100644 assets/images/social/reference/json_mode.png create mode 100644 assets/images/social/reference/models/exllamav2.png create mode 100644 assets/images/social/reference/models/llamacpp.png create mode 100644 assets/images/social/reference/models/mamba.png create mode 100644 assets/images/social/reference/models/openai.png create mode 100644 assets/images/social/reference/models/transformers.png create mode 100644 assets/images/social/reference/models/vllm.png create mode 100644 assets/images/social/reference/prompting.png create mode 100644 assets/images/social/reference/regex.png create mode 100644 assets/images/social/reference/samplers.png create mode 100644 assets/images/social/reference/serve/vllm.png create mode 100644 assets/images/social/reference/text.png create mode 100644 assets/images/social/reference/types.png create mode 100644 assets/images/social/welcome.png create mode 100644 assets/javascripts/bundle.1e8ae164.min.js create mode 100644 assets/javascripts/bundle.1e8ae164.min.js.map create mode 100644 assets/javascripts/lunr/min/lunr.ar.min.js create mode 100644 assets/javascripts/lunr/min/lunr.da.min.js create mode 100644 assets/javascripts/lunr/min/lunr.de.min.js create mode 100644 assets/javascripts/lunr/min/lunr.du.min.js create mode 100644 assets/javascripts/lunr/min/lunr.el.min.js create mode 100644 assets/javascripts/lunr/min/lunr.es.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.fr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.he.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hu.min.js create mode 100644 assets/javascripts/lunr/min/lunr.hy.min.js create mode 100644 assets/javascripts/lunr/min/lunr.it.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ja.min.js create mode 100644 assets/javascripts/lunr/min/lunr.jp.min.js create mode 100644 assets/javascripts/lunr/min/lunr.kn.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ko.min.js create mode 100644 assets/javascripts/lunr/min/lunr.multi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.nl.min.js create mode 100644 assets/javascripts/lunr/min/lunr.no.min.js create mode 100644 assets/javascripts/lunr/min/lunr.pt.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ro.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ru.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sa.min.js create mode 100644 assets/javascripts/lunr/min/lunr.stemmer.support.min.js create mode 100644 assets/javascripts/lunr/min/lunr.sv.min.js create mode 100644 assets/javascripts/lunr/min/lunr.ta.min.js create mode 100644 assets/javascripts/lunr/min/lunr.te.min.js create mode 100644 assets/javascripts/lunr/min/lunr.th.min.js create mode 100644 assets/javascripts/lunr/min/lunr.tr.min.js create mode 100644 assets/javascripts/lunr/min/lunr.vi.min.js create mode 100644 assets/javascripts/lunr/min/lunr.zh.min.js create mode 100644 assets/javascripts/lunr/tinyseg.js create mode 100644 assets/javascripts/lunr/wordcut.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js create mode 100644 assets/javascripts/workers/search.b8dbb3d2.min.js.map create mode 100644 assets/stylesheets/main.bcfcd587.min.css create mode 100644 assets/stylesheets/main.bcfcd587.min.css.map create mode 100644 assets/stylesheets/palette.06af60db.min.css create mode 100644 assets/stylesheets/palette.06af60db.min.css.map create mode 100644 blog/2024/01/10/roadmap-for-2024/index.html create mode 100644 blog/archive/2024/index.html create mode 100644 blog/assets/4000_stars.png create mode 100644 blog/category/roadmap/index.html create mode 100644 blog/index.html create mode 100644 community/belonging.png create mode 100644 community/contribute/index.html create mode 100644 community/feedback/index.html create mode 100644 community/index.html create mode 100644 cookbook/chain_of_density/index.html create mode 100644 cookbook/classification/index.html create mode 100644 cookbook/dating_profiles/index.html create mode 100644 cookbook/extraction/index.html create mode 100644 cookbook/images/chain_of_density.png create mode 100644 cookbook/index.html create mode 100644 cookbook/models_playing_chess/index.html create mode 100644 index.html create mode 100644 installation/index.html create mode 100644 licence/index.html create mode 100644 logos/amazon.png create mode 100644 logos/apple.png create mode 100644 logos/best_buy.png create mode 100644 logos/canoe.png create mode 100644 logos/cisco.png create mode 100644 logos/dassault_systems.png create mode 100644 logos/databricks.png create mode 100644 logos/datadog.png create mode 100644 logos/dbt_labs.png create mode 100644 logos/gladia.jpg create mode 100644 logos/harvard.png create mode 100644 logos/hf.png create mode 100644 logos/johns_hopkins.png create mode 100644 logos/meta.png create mode 100644 logos/mit.png create mode 100644 logos/mount_sinai.png create mode 100644 logos/nvidia.png create mode 100644 logos/nyu.png create mode 100644 logos/safran.png create mode 100644 logos/salesforce.png create mode 100644 logos/shopify.png create mode 100644 logos/smithsonian.png create mode 100644 logos/tinder.png create mode 100644 logos/upenn.png create mode 100644 objects.inv create mode 100644 overrides/index.html create mode 100644 overrides/main.html create mode 100644 quickstart/index.html create mode 100644 reference/cfg/index.html create mode 100644 reference/choices/index.html create mode 100644 reference/custom_fsm_ops/index.html create mode 100644 reference/functions/index.html create mode 100644 reference/index.html create mode 100644 reference/json/index.html create mode 100644 reference/json_mode/index.html create mode 100644 reference/models/exllamav2/index.html create mode 100644 reference/models/llamacpp/index.html create mode 100644 reference/models/mamba/index.html create mode 100644 reference/models/openai/index.html create mode 100644 reference/models/transformers/index.html create mode 100644 reference/models/vllm/index.html create mode 100644 reference/prompting/index.html create mode 100644 reference/regex/index.html create mode 100644 reference/samplers/index.html create mode 100644 reference/serve/vllm/index.html create mode 100644 reference/text/index.html create mode 100644 reference/types/index.html create mode 100644 search/search_index.json create mode 100644 sitemap.xml create mode 100644 sitemap.xml.gz create mode 100644 stylesheets/extra.css create mode 100644 welcome/index.html diff --git a/.nojekyll b/.nojekyll new file mode 100644 index 000000000..e69de29bb diff --git a/404.html b/404.html new file mode 100644 index 000000000..4f5db59d0 --- /dev/null +++ b/404.html @@ -0,0 +1,1794 @@ + + + +
+ + + + + + + + + + + + + + + + + + +CFGGuide
+
+
+
+ Bases: Guide
Guide to generate text that is in the language of a context-free grammar.
+ +outlines/fsm/guide.py
263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 |
|
copy()
+
+get_next_instruction(state)
+
+Generate an instruction for the next step.
+Upon initialization, the CFG incremental parser is used to determine the +first regex and construct the first FSM to generate the first terminal.
+This FSM is used for proposals until either:
+The CFG incremental parser is allowed to propose the EOS token from any accepting state, +and once it is generated, the FSM will continue to always generate the EOS token.
+state + The current state of the FSM.
+A list that contains the tokens to mask.
+ +outlines/fsm/guide.py
297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 |
|
get_next_state(state, token_id)
+
+Update the state of the guide.
+Transitions the underlying regex FSM to its next state. +If at max tokens or EOS token, transition permanently to the final state. +Update stored partial generations for subsequent incremental parsing.
+state + The current state of the FSM. +token_id + The id of the token that was just generated.
+The new state of the FSM.
+ +outlines/fsm/guide.py
Generate
+
+
+
+ dataclass
+
+
+Generate instruction
+tokens + The tokens that lead to a valid completion if generated.
+ +outlines/fsm/guide.py
Guide
+
+
+
+ Bases: Protocol
Base definition of a generation guide.
+A generation guide defines the behavior of a finite-state machine that guides
+a text generation procedure. Unlike the DFAs built from regular expressions
+guides can also emit a Write
instructions which tells the model that it can
+append a sequence of tokens (or token word) instead of generating it.
outlines/fsm/guide.py
RegexGuide
+
+
+
+ Bases: Guide
Guide to generate text in the language of a regular expression.
+ +outlines/fsm/guide.py
107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 |
|
__init__(regex_string, tokenizer)
+
+outlines/fsm/guide.py
get_next_instruction(state)
+
+Return the next instruction for guided generation.
+The initialization of the guide builds an index which maps FSM states to a +map from authorized tokens to the state in which the guide needs to move +if said token is generated. Therefore the authorized tokens at the +current state are the keys of the map returned by the value of the index +for current state.
+If the current state is not contained in the end this means that we are +in a final state of the guide. We only authorize EOS tokens in the final +state.
+state + The current state of the guide.
+A Generate
instance that contains the model and the allowed token ids.
outlines/fsm/guide.py
get_next_state(state, token_id)
+
+Update the state of the guide.
+We use the index to determine to which state the guide should transition +given the token that was just generated.
+state + The current state of the guide. +token_id + The id of the token that was just generated.
+The new state of the guide.
+ +outlines/fsm/guide.py
is_final_state(state)
+
+StopAtEOSGuide
+
+
+
+ Bases: Guide
Guide to generate tokens until the EOS token has been generated.
+ +outlines/fsm/guide.py
__init__(tokenizer)
+
+Initialize the generation guide.
+model + The logit generator used to generate the next token.
+ +outlines/fsm/guide.py
Write
+
+
+
+ dataclass
+
+
+Write instruction.
+tokens + The sequence of tokens to be added to the current sequence by the + generation process.
+ +outlines/fsm/guide.py
build_regex_from_schema(schema, whitespace_pattern=None)
+
+Turn a JSON schema into a regex that matches any JSON object that follows + this schema.
+JSON Schema is a declarative language that allows to annotate JSON documents + with types and descriptions. These schemas can be generated from any Python + datastructure that has type annotation: namedtuples, dataclasses, Pydantic + models. And by ensuring that the generation respects the schema we ensure + that the output can be parsed into these objects. + This function parses the provided schema and builds a generation schedule which + mixes deterministic generation (fixed strings), and sampling with constraints.
+Parameters
+schema
+ A string that represents a JSON Schema.
+ whitespace_pattern
+ Pattern to use for JSON syntactic whitespace (doesn't impact string literals)
+ Example: allow only a single space or newline with whitespace_pattern=r"[
+]?"
Returns
+A generation schedule. A list of strings that represent the JSON + schema's structure and regular expression that define the structure of + the fields.
+References
+.. [0] JSON Schema. https://json-schema.org/
+ +outlines/fsm/json_schema.py
get_schema_from_signature(fn)
+
+Turn a function signature into a JSON schema.
+Every JSON object valid to the output JSON Schema can be passed
+to fn
using the ** unpacking syntax.
outlines/fsm/json_schema.py
to_regex(resolver, instance, whitespace_pattern=None)
+
+Translate a JSON Schema instance into a regex that validates the schema.
+Note
+Many features of JSON schema are missing:
+ - Handle additionalProperties
keyword
+ - Handle types defined as a list
+ - Handle constraints on numbers
+ - Handle special patterns: date
, uri
, etc.
This does not support recursive definitions.
+Parameters
+resolver
+ An object that resolves references to other instances within a schema
+ instance
+ The instance to translate
+ whitespace_pattern
+ Pattern to use for JSON syntactic whitespace (doesn't impact string literals)
+ Example: allow only a single space or newline with whitespace_pattern=r"[
+]?"
outlines/fsm/json_schema.py
98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 +322 +323 +324 +325 +326 +327 +328 +329 +330 +331 +332 +333 +334 +335 +336 +337 +338 +339 +340 +341 +342 +343 +344 +345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 |
|
TransformerTokenizer
+
+
+
+ Bases: Tokenizer
Represents a tokenizer for models in the transformers
library.
outlines/models/transformers.py
Transformers
+
+
+Represents a transformers
model.
outlines/models/transformers.py
117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 |
|
forward(input_ids, attention_mask, past_key_values=None)
+
+Compute a forward pass through the transformer model.
+input_ids + The input token ids. Must be one or two dimensional. +attention_mask + The attention mask. Must be one or two dimensional. +past_key_values + A tuple of tuples containing the cached key and value tensors for each + attention head.
+The computed logits and the new cached key and value tensors.
+ +outlines/models/transformers.py
get_llama_tokenizer_types()
+
+Get all the Llama tokenizer types/classes that need work-arounds.
+When they can't be imported, a dummy class is created.
+ +outlines/models/transformers.py
transformers(model_name, device=None, model_kwargs={}, tokenizer_kwargs={})
+
+Instantiate a model from the transformers
library and its tokenizer.
model_name
+ The name of the model as listed on Hugging Face's model page.
+device
+ The device(s) on which the model should be loaded. This overrides
+ the device_map
entry in model_kwargs
when provided.
+model_kwargs
+ A dictionary that contains the keyword arguments to pass to the
+ from_pretrained
method when loading the model.
+tokenizer_kwargs
+ A dictionary that contains the keyword arguments to pass to the
+ from_pretrained
method when loading the tokenizer.
A TransformersModel
model instance.
outlines/models/transformers.py
Integration with OpenAI's API.
+ + + +OpenAI
+
+
+An object that represents the OpenAI API.
+ +outlines/models/openai.py
69 + 70 + 71 + 72 + 73 + 74 + 75 + 76 + 77 + 78 + 79 + 80 + 81 + 82 + 83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 |
|
__call__(prompt, max_tokens=None, stop_at=None, *, system_prompt=None, temperature=None, samples=None)
+
+Call the OpenAI API to generate text.
+prompt + A string or list of strings that will be used to prompt the model +max_tokens + The maximum number of tokens to generate +stop_at + A string or array of strings which, such that the generation stops + when they are generated. +system_prompt + The content of the system message that precedes the user's prompt. +temperature + The value of the temperature used to sample tokens +samples + The number of completions to generate for each prompt +stop_at + Up to 4 words where the API will stop the completion.
+ +outlines/models/openai.py
__init__(client, config, tokenizer=None, system_prompt=None)
+
+Create an OpenAI
instance.
This class supports the standard OpenAI API, the Azure OpeanAI API as +well as compatible APIs that rely on the OpenAI client.
+client
+ An instance of the API's async client.
+config
+ An instance of OpenAIConfig
. Can be useful to specify some
+ parameters that cannot be set by calling this class' methods.
+tokenizer
+ The tokenizer associated with the model the client connects to.
outlines/models/openai.py
generate_choice(prompt, choices, max_tokens=None, system_prompt=None)
+
+Call the OpenAI API to generate one of several choices.
+prompt + A string or list of strings that will be used to prompt the model +choices + The list of strings between which we ask the model to choose +max_tokens + The maximum number of tokens to generate +system_prompt + The content of the system message that precedes the user's prompt.
+ +outlines/models/openai.py
161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 |
|
OpenAIConfig
+
+
+
+ dataclass
+
+
+Represents the parameters of the OpenAI API.
+The information was last fetched on 2023/11/20. We document below the +properties that are specific to the OpenAI API. Not all these properties are +supported by Outlines.
+model
+ The name of the model. Available models can be found on OpenAI's website.
+frequence_penalty
+ Number between 2.0 and -2.0. Positive values penalize new tokens based on
+ their existing frequency in the text,
+logit_bias
+ Modifies the likelihood of specified tokens to appear in the completion.
+ Number between -100 (forbid) and +100 (only allows).
+n
+ The number of completions to return for each prompt.
+presence_penalty
+ Similar to frequency penalty.
+response_format
+ Specifies the format the model must output. {"type": "json_object"}
+ enables JSON mode.
+seed
+ Two completions with the same seed
value should return the same
+ completion. This is however not guaranteed.
+stop
+ Up to 4 words where the API will stop the completion.
+temperature
+ Number between 0 and 2. Higher values make the output more random, while
+ lower values make it more deterministic.
+top_p
+ Number between 0 and 1. Parameter for nucleus sampling.
+user
+ A unique identifier for the end-user.
outlines/models/openai.py
build_optimistic_mask(transposed, max_mask_size=300)
+
+We build the largest mask possible.
+Tokens are added from left to right, so if the encoded choices are e.g.
+[[1,2], [3,4]]
, 1
and 3
will be added before 2
and 4
.
transposed + A list of lists that contain the nth token of each choice.
+ +outlines/models/openai.py
error_handler(api_call_fn)
+
+Handle OpenAI API errors and missing API key.
+ +outlines/models/openai.py
find_longest_intersection(response, choice)
+
+Find the longest intersection between the response and the choice.
+ +outlines/models/openai.py
find_response_choices_intersection(response, choices)
+
+Find the longest intersection between the response and the different +choices.
+Say the response is of the form [1, 2, 3, 4, 5]
and we have the choices
+[[1, 2], [1, 2, 3], [6, 7, 8]
then the function will return [1, 2, 3]
as the
+intersection, and [[]]
as the list of choices left.
response + The model's response +choices + The remaining possible choices
+A tuple that contains the longest intersection between the response and the +different choices, and the choices which start with this intersection, with the +intersection removed.
+ +outlines/models/openai.py
generate_chat(prompt, system_prompt, client, config)
+
+
+ async
+
+
+Call OpenAI's Chat Completion API.
+prompt
+ The prompt we use to start the generation. Passed to the model
+ with the "user" role.
+system_prompt
+ The system prompt, passed to the model with the "system" role
+ before the prompt.
+client
+ The API client
+config
+ An OpenAIConfig
instance.
A tuple that contains the model's response(s) and usage statistics.
+ +outlines/models/openai.py
PartialIndenter
+
+
+
+ Bases: Indenter
An Indenter
that doesn't reset its state every time process
is called.
outlines/fsm/parsing.py
PartialParserState
+
+
+
+ Bases: ParserState
outlines/fsm/parsing.py
345 +346 +347 +348 +349 +350 +351 +352 +353 +354 +355 +356 +357 +358 +359 +360 +361 +362 +363 +364 +365 +366 +367 +368 +369 +370 +371 +372 +373 +374 +375 +376 +377 +378 +379 +380 +381 +382 +383 +384 +385 +386 +387 +388 +389 +390 +391 +392 +393 +394 +395 +396 +397 +398 +399 +400 +401 +402 +403 +404 +405 +406 +407 +408 +409 +410 +411 +412 +413 +414 +415 +416 +417 +418 +419 +420 +421 +422 +423 +424 +425 +426 +427 +428 +429 +430 +431 +432 +433 +434 +435 +436 +437 +438 +439 +440 +441 +442 +443 +444 +445 +446 +447 +448 +449 +450 +451 +452 +453 +454 +455 +456 +457 +458 +459 |
|
feed_token_no_stack(token, is_end=False)
+
+This is a copy of ParserState.feed_token
with all the value stack
+steps removed. Since we're not exactly parsing in order to obtain a
+CST or anything similar, we can avoid the growing expense of tracking
+the parse tree.
outlines/fsm/parsing.py
PartialParsingFrontend
+
+
+
+ Bases: ParsingFrontend
outlines/fsm/parsing.py
161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 +214 +215 +216 +217 +218 +219 +220 +221 +222 +223 +224 +225 +226 +227 +228 +229 +230 +231 +232 +233 +234 +235 +236 +237 +238 +239 +240 +241 +242 +243 +244 +245 +246 +247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 |
|
PartialScanner
+
+
+
+ Bases: Scanner
outlines/fsm/parsing.py
511 +512 +513 +514 +515 +516 +517 +518 +519 +520 +521 +522 +523 +524 +525 +526 +527 +528 +529 +530 +531 +532 +533 +534 +535 +536 +537 +538 +539 +540 +541 +542 +543 +544 +545 +546 +547 +548 +549 +550 +551 +552 +553 +554 +555 +556 +557 +558 +559 +560 +561 +562 +563 +564 +565 +566 +567 +568 +569 +570 +571 +572 +573 +574 +575 +576 +577 +578 +579 +580 +581 +582 +583 +584 +585 +586 +587 |
|
get_terminals_info(fsm_state_seq)
+
+Get the possible terminal symbols for an FSM state sequence.
+ +outlines/fsm/parsing.py
match(text, pos, last_fsm_state_seq=None)
+
+Determine an FSM match over text
starting at pos
and continuing last_fsm_state_seq
.
outlines/fsm/parsing.py
terminals_to_fsms(lp)
+
+Construct a dict
mapping terminal symbol names to their finite state machines.
outlines/fsm/parsing.py
Prompt
+
+
+
+ dataclass
+
+
+Represents a prompt function.
+We return a Prompt
class instead of a simple function so the
+template defined in prompt functions can be accessed.
outlines/prompts.py
__call__(*args, **kwargs)
+
+Render and return the template.
+The rendered template as a Python str
.
outlines/prompts.py
get_fn_description(fn)
+
+Returns the first line of a callable's docstring.
+ +outlines/prompts.py
get_fn_name(fn)
+
+Returns the name of a callable.
+ +outlines/prompts.py
get_fn_signature(fn)
+
+Return the signature of a callable.
+ +outlines/prompts.py
get_fn_source(fn)
+
+Return the source code of a callable.
+ +outlines/prompts.py
get_schema_dict(model)
+
+get_schema_pydantic(model)
+
+Return the schema of a Pydantic model.
+ +outlines/prompts.py
parse_pydantic_schema(raw_schema, definitions)
+
+Parse the output of Basemodel.[schema|model_json_schema]()
.
This recursively follows the references to other schemas in case +of nested models. Other schemas are stored under the "definitions" +key in the schema of the top-level model.
+ +outlines/prompts.py
prompt(fn)
+
+Decorate a function that contains a prompt template.
+This allows to define prompts in the docstring of a function and simplify their
+manipulation by providing some degree of encapsulation. It uses the render
+function internally to render templates.
++++++import outlines
+@outlines.prompt +def build_prompt(question): +... "I have a ${question}" +... +prompt = build_prompt("How are you?")
+
This API can also be helpful in an "agent" context where parts of the prompt +are set when the agent is initialized and never modified later. In this situation +we can partially apply the prompt function at initialization.
+++++++import outlines +import functools as ft +... +@outlines.prompt +... def solve_task(name: str, objective: str, task: str): +... '''Your name is {{name}}. +.. Your overall objective is to {{objective}}. +... Please solve the following task: {{task}} +... ''' +... +hal = ft.partial(solve_task, "HAL", "Travel to Jupiter")
+
A Prompt
callable class which will render the template when called.
outlines/prompts.py
render(template, **values)
+
+Parse a Jinaj2 template and translate it into an Outlines graph.
+This function removes extra whitespaces and linebreaks from templates to +allow users to enter prompts more naturally than if they used Python's +constructs directly. See the examples for a detailed explanation.
+Outlines follow Jinja2's syntax
+++++++import outlines +outline = outlines.render("I like {{food}} and {{sport}}", food="tomatoes", sport="tennis") +I like tomatoes and tennis
+
If the first line of the template is empty, render
removes it
++++++from outlines import render
+tpl = ''' +... A new string''' +tpl +... '\nA new string' +render(tpl) +... 'a new string'
+
Similarly, render
ignores linebreaks introduced by placing the closing quotes
+underneath the text:
++++++tpl = ''' +... A new string +... ''' +tpl +... '\nA new string\n' +render(tpl) +... 'A new string'
+
If you want to insert a linebreak at the end of the rendered template, you will +need to leave an empty line at the end of the template:
+++++++tpl = ''' +... A new string +... +... ''' +tpl +... '\nA new string\n\n' +render(tpl) +... 'A new string\n'
+
render
removes the identation in docstrings. This is particularly important
+when using prompt functions
++++++tpl = ''' +... a string +... and another string''' +tpl +... '\n a string\n and another string' +render(tpl) +... 'a string\nand another string'
+
The indentation of the first line is assumed to be the same as the second line's
+++++++tpl = '''a string +... and another''' +tpl +... 'a string\n and another' +render(tpl) +... 'a string\nand another'
+
To get a different indentation for the first and the second line, we can start the +prompt on the string's second line:
+++++++tpl = ''' +... First line +... Second line''' +render(tpl) +... 'First Line\n Second Line'
+
template + A string that contains a template written with the Jinja2 syntax. +**values + Map from the variables in the template to their value.
+A string that contains the rendered template.
+ +outlines/prompts.py
94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 +163 +164 +165 +166 +167 +168 +169 +170 +171 +172 +173 +174 +175 +176 +177 +178 +179 +180 +181 +182 +183 +184 +185 +186 +187 +188 +189 +190 +191 +192 +193 +194 +195 +196 +197 +198 +199 +200 +201 +202 +203 +204 +205 +206 +207 +208 +209 +210 +211 +212 +213 |
|
regex(model, regex_str, sampler=multinomial())
+
+Generate structured text in the language of a regular expression.
+model:
+ An instance of Transformer
that represents a model from the
+ transformers
library.
+regex_str:
+ The regular expression that the output must follow.
+sampler:
+ The sampling algorithm to use to generate token ids from the logits
+ distribution.
A SequenceGenerator
instance that generates text constrained by the
+regular expression.
outlines/generate/regex.py
BeamSearchSampler
+
+
+Beam Search sampling algorithm.
+samples + The number of samples taken for each input sequence.
+ +outlines/samplers.py
247 +248 +249 +250 +251 +252 +253 +254 +255 +256 +257 +258 +259 +260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 |
|
__call__(next_token_logits, sequence_weights, _)
+
+Call the beam search sampler.
+next_token_logits
+ A tensor of shape (n_seqs, vocab_size,)
that represents the
+ probability distribution of the next token over the vocabulary.
+sequence_weights
+ A tensor of shape (n_seqs,)
that represents the cumulative
+ weight of each sequence.
+rng
+ A random number generator.
A tuple with an array that contains the ids of the sampled tokens of
+shape (n_seqs, 1)
, an array that contains the ancestors of each
+sampled id of shape (n_seqs,)
and an array that contains the updated
+cumulative weights of each sequence of shape (n_seqs,)
.
outlines/samplers.py
260 +261 +262 +263 +264 +265 +266 +267 +268 +269 +270 +271 +272 +273 +274 +275 +276 +277 +278 +279 +280 +281 +282 +283 +284 +285 +286 +287 +288 +289 +290 +291 +292 +293 +294 +295 +296 +297 +298 +299 +300 +301 +302 +303 +304 +305 +306 +307 +308 +309 +310 +311 +312 +313 +314 +315 +316 +317 +318 +319 +320 +321 |
|
GreedySampler
+
+
+Greedy Sampling algorithm.
+Greedy sampling consists in choosing the token with the largest +likelihood at every step.
+We don't allow more than one sample. We could attribute this a meaning, for +instance the k-th sample represents the k-th most likely token. In which +case it would be equivalent to beam search without the sequence weights.
+samples + The number of samples taken for each input sequence.
+ +outlines/samplers.py
__call__(next_token_logits, sequence_weights, _)
+
+Call the greedy sampler.
+next_token_logits
+ A tensor of shape (n_seqs, vocab_size,)
that represents the
+ probability distribution of the next token over the vocabulary.
+sequence_weights
+ A tensor of shape (n_seqs,)
that represents the cumulative
+ weight of each sequence.
+rng
+ A random number generator.
A tuple with an array that contains the ids of the sampled tokens of
+shape (n_seqs, 1)
, an array that contains the ancestors of each
+sampled id of shape (n_seqs,)
and an array that contains the updated
+cumulative weights of each sequence of shape (n_seqs,)
.
outlines/samplers.py
MultinomialSampler
+
+
+Multinomial sampling algorithm.
+Multinomial sampling consists in randomly sampling the next token assuming +its distribution is a Categorical distribution parametrized by the +next-token logits.
+samples + The number of samples taken for each input sequence.
+ +outlines/samplers.py
83 + 84 + 85 + 86 + 87 + 88 + 89 + 90 + 91 + 92 + 93 + 94 + 95 + 96 + 97 + 98 + 99 +100 +101 +102 +103 +104 +105 +106 +107 +108 +109 +110 +111 +112 +113 +114 +115 +116 +117 +118 +119 +120 +121 +122 +123 +124 +125 +126 +127 +128 +129 +130 +131 +132 +133 +134 +135 +136 +137 +138 +139 +140 +141 +142 +143 +144 +145 +146 +147 +148 +149 +150 +151 +152 +153 +154 +155 +156 +157 +158 +159 +160 +161 +162 |
|
__call__(next_token_logits, sequence_weights, rng)
+
+Call the multinomial sampler.
+next_token_logits
+ A tensor of shape (n_seqs, vocab_size,)
that represents the
+ probability distribution of the next token over the vocabulary.
+sequence_weights
+ A tensor of shape (n_seqs,)
that represents the cumulative
+ weight of each sequence.
+rng
+ A random number generator.
A tuple with an array that contains the ids of the sampled tokens of
+shape (n_seqs, 1)
, an array that contains the ancestors of each
+sampled id of shape (n_seqs,)
and an array that contains the updated
+cumulative weights of each sequence of shape (n_seqs,)
.
outlines/samplers.py
keep_top_k_logits(k)
+
+Build a function that masks logits values smaller than the top k
ones.
k
+ The ranking below which logit values are replaced by -math.inf
.
outlines/samplers.py
keep_top_p_logits(p)
+
+Build a function that masks the lowest probability tokens whose +cumulative probability is below a certain threshold.
+p
+ The value of the threshold. We keep the highest probability tokens whose
+ cumulative distribution is greater than or equal to p
and mask the
+ others. Its value must be between 0 (excluded) and 1 (included).
outlines/samplers.py
rescale_logits(temperature)
+
+Build a function that rescales the token probabilities exponentially.
+temperature + The value by which we rescale the logits.
+ +outlines/samplers.py
SCw?g1AZdtb{ZFl
z%_QRDDvV9d$zFv<9N43+R}q<=aJ<0cQRvq6Y>g57Nj0o)^pEM>={?FLD{Wf!R|_)5
ztd0|kul@rbYGAM>3Z1u}!9swXgv5kEC5*^a#?}+RI799q)hq(IB`%O7Pv^R?zQA$G
zpD&k0hs++Df%%M2pEM^E(-Ys)*>i-2VB?^tHt>@K5lWt+_fChi$l$&+TXpJ1hA#=%
zxagg4NtpL!(+*z77R?40Qehrg$dSzC`T!nNP!lKcZUltkGt(IYw8M{1l~0}pk05B9
zg{JfC!<=NMi9^JUhq(%mpz2x&uRA_&O6Q~skMR2k{*c3;em43u9kO8riQ9*?NY+IY
zDiI4fO%Ckqz2TDiCsltfo8{fd%$mVE>;pGCawuW3xPv!Gck?KeV6B|&oKj3eYOHXe
z2<-Qy8Co7!0JxFzRKx4D_G;-7qt<)V<4qn5i9cXUS2zh%T$;~i0g>;wM*-l+{l=EF
zY1oZ_#NLkhBW>>CkO#%(>$02{_D&%kFtb34cV` n)eKl*BiAN1Tp5@I|=#iLr)t
zVpgJ_)=S(njLO`vyp1<6s 4c}N
zLQAK-`yUved#jHWR)}D~#{;4XS&`2DM$uP*!ej$fswv=3Ia+=PTUt=A0A2(p9On^s
zs#)9nCLPWef5rY53ahHe4MzC2MgthyPP@2;a>Hf_l3%aKOHP)4V-yQBdztQtU@3PS
zzr_has0r>gd}L<=g`oGozOD}PBlGjvk{fKr43P$-%Hhf5_8|H{UQ+4<;b5B2TBf~y
zvps4xNVB?8z>=EouM63?&N(L=N&&=zvf-y OnzzZww{o}
zFI@4?tHP1p$!SE^ymdPo)+nH9{BPq|9$mm!Q@RESU0MnNlAUZ!Dd6JQ{E}B+r!&3i2G;h4XEwW(+(JQuozDy9hjhGDyUPQMjT`5b-pLcpgNG#vO>`CdtBW6%*j>|
zc`B(+n+7EA#_|vd2J3aGQP}7=!FCRbP%ClteCKf#dgPZ2C)CEDI7|Y|tL8}gcU5|X
z$`PtzqLazkQ2x+6Npy~7<`mra_!d8OMaLnYDgN;!$iyNpu@EMKTKKDET69Dn_a-up
z<1Po^p`sim8~p5nde3s8Guh6-8Hw@0yI
zXAdiYuS&-l(miUeOUWFQ@||?^Sr}&w>gZfH7fYt>MH^|TJz9n~L0SuU2#(~uXY%Q^
zN13Avz0I1=HU=)Gf58->BBX5L&?~Y&4~o!l-tP(-s00}+|H^a}E;5X^uqcwoxn=$V
zIqpAMAeR06rt?h}QLUHkqK>(DWquz0ZXKc0+TX<#oIVJ$k|sa9FOu6HU2$ZIjov$q
zuViIVUBz-eo*f4_lF<
zKVQ%1<3$v1^*N`kb%DA$^uf=U-d
pjUMp!tuksP(4GXn)NtQF
zerg8nN;R#ysD$#=Op+7;a9>;Zlfkkv
UikSZtwU^&|CL&SWtF*3GT+>Ei={
zhR|bLNm(|DU7j
Dv)P3RVz!
zg_Eb9v8;Z|frRrhylo5}h%f0PXSDe${)C8H-U6oDl20afEO!kmmy@Pe;G?f}SY9)#
znsDU?Ynk*6w7b;q76*&&