diff --git a/docs/api-inference/tasks/chat-completion.md b/docs/api-inference/tasks/chat-completion.md
index 1452756d2..15893bc48 100644
--- a/docs/api-inference/tasks/chat-completion.md
+++ b/docs/api-inference/tasks/chat-completion.md
@@ -79,7 +79,7 @@ curl 'https://api-inference.huggingface.co/models/google/gemma-2-2b-it/v1/chat/c
-With huggingface_hub client:
+Using `huggingface_hub`:
```py
from huggingface_hub import InferenceClient
@@ -103,7 +103,7 @@ for chunk in stream:
print(chunk.choices[0].delta.content, end="")
```
-With openai client:
+Using `openai`:
```py
from openai import OpenAI
@@ -134,11 +134,11 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu
-With huggingface_hub client:
+Using `huggingface.js`:
```js
-import { HfInference } from "@huggingface/inference"
+import { HfInference } from "@huggingface/inference";
-const client = new HfInference("hf_***")
+const client = new HfInference("hf_***");
let out = "";
@@ -162,14 +162,14 @@ for await (const chunk of stream) {
}
```
-With openai client:
+Using `openai`:
```js
-import { OpenAI } from "openai"
+import { OpenAI } from "openai";
const client = new OpenAI({
baseURL: "https://api-inference.huggingface.co/v1/",
apiKey: "hf_***"
-})
+});
let out = "";
@@ -237,7 +237,7 @@ curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Visio
-With huggingface_hub client:
+Using `huggingface_hub`:
```py
from huggingface_hub import InferenceClient
@@ -272,7 +272,7 @@ for chunk in stream:
print(chunk.choices[0].delta.content, end="")
```
-With openai client:
+Using `openai`:
```py
from openai import OpenAI
@@ -314,11 +314,11 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu
-With huggingface_hub client:
+Using `huggingface.js`:
```js
-import { HfInference } from "@huggingface/inference"
+import { HfInference } from "@huggingface/inference";
-const client = new HfInference("hf_***")
+const client = new HfInference("hf_***");
let out = "";
@@ -353,14 +353,14 @@ for await (const chunk of stream) {
}
```
-With openai client:
+Using `openai`:
```js
-import { OpenAI } from "openai"
+import { OpenAI } from "openai";
const client = new OpenAI({
baseURL: "https://api-inference.huggingface.co/v1/",
apiKey: "hf_***"
-})
+});
let out = "";
diff --git a/docs/api-inference/tasks/image-text-to-text.md b/docs/api-inference/tasks/image-text-to-text.md
index e1e44c1d6..ee60dd020 100644
--- a/docs/api-inference/tasks/image-text-to-text.md
+++ b/docs/api-inference/tasks/image-text-to-text.md
@@ -45,13 +45,8 @@ curl https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision
-With huggingface_hub client:
+Using `huggingface_hub`:
```py
-import requests
-
-API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct"
-headers = {"Authorization": "Bearer hf_***"}
-
from huggingface_hub import InferenceClient
client = InferenceClient(api_key="hf_***")
@@ -69,13 +64,8 @@ for chunk in stream:
print(chunk.choices[0].delta.content, end="")
```
-With openai client:
+Using `openai`:
```py
-import requests
-
-API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct"
-headers = {"Authorization": "Bearer hf_***"}
-
from openai import OpenAI
client = OpenAI(
diff --git a/docs/api-inference/tasks/text-to-image.md b/docs/api-inference/tasks/text-to-image.md
index df2bb4d2c..6e01b9b43 100644
--- a/docs/api-inference/tasks/text-to-image.md
+++ b/docs/api-inference/tasks/text-to-image.md
@@ -45,6 +45,16 @@ curl https://api-inference.huggingface.co/models/black-forest-labs/FLUX.1-dev \
+Using `huggingface_hub`:
+```py
+from huggingface_hub import InferenceClient
+client = InferenceClient("black-forest-labs/FLUX.1-dev", token="hf_***")
+
+# output is a PIL.Image object
+image = client.text_to_image("Astronaut riding a horse")
+```
+
+Using `requests`:
```py
import requests
@@ -57,6 +67,7 @@ def query(payload):
image_bytes = query({
"inputs": "Astronaut riding a horse",
})
+
# You can access the image with PIL.Image for example
import io
from PIL import Image
diff --git a/scripts/api-inference/package.json b/scripts/api-inference/package.json
index 26eede48e..6d13899d7 100644
--- a/scripts/api-inference/package.json
+++ b/scripts/api-inference/package.json
@@ -14,7 +14,7 @@
"author": "",
"license": "ISC",
"dependencies": {
- "@huggingface/tasks": "^0.12.15",
+ "@huggingface/tasks": "^0.13.3",
"@types/node": "^22.5.0",
"handlebars": "^4.7.8",
"node": "^20.17.0",
diff --git a/scripts/api-inference/pnpm-lock.yaml b/scripts/api-inference/pnpm-lock.yaml
index a271cf344..4e7428def 100644
--- a/scripts/api-inference/pnpm-lock.yaml
+++ b/scripts/api-inference/pnpm-lock.yaml
@@ -9,8 +9,8 @@ importers:
.:
dependencies:
'@huggingface/tasks':
- specifier: ^0.12.15
- version: 0.12.30
+ specifier: ^0.13.3
+ version: 0.13.3
'@types/node':
specifier: ^22.5.0
version: 22.5.0
@@ -186,8 +186,8 @@ packages:
cpu: [x64]
os: [win32]
- '@huggingface/tasks@0.12.30':
- resolution: {integrity: sha512-A1ITdxbEzx9L8wKR8pF7swyrTLxWNDFIGDLUWInxvks2ruQ8PLRBZe8r0EcjC3CDdtlj9jV1V4cgV35K/iy3GQ==}
+ '@huggingface/tasks@0.13.3':
+ resolution: {integrity: sha512-nIQSodwZPyARxcOrIvGO/l9Pk4/dTTZzvDUE3XKkbA+atfYZO3OrqtyT/srPaMhUG7mgdn/1nE2yS7TBKq+sMg==}
'@jridgewell/resolve-uri@3.1.2':
resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==}
@@ -404,7 +404,7 @@ snapshots:
'@esbuild/win32-x64@0.23.1':
optional: true
- '@huggingface/tasks@0.12.30': {}
+ '@huggingface/tasks@0.13.3': {}
'@jridgewell/resolve-uri@3.1.2': {}