Skip to content

Commit

Permalink
Update API inference documentation (automated)
Browse files Browse the repository at this point in the history
  • Loading branch information
Wauplin authored and github-actions[bot] committed Nov 15, 2024
1 parent 7b57ea3 commit e33e9b0
Show file tree
Hide file tree
Showing 19 changed files with 326 additions and 1,056 deletions.
39 changes: 3 additions & 36 deletions docs/api-inference/tasks/audio-classification.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,54 +40,21 @@ Explore all available models and find the one that suits you best [here](https:/

<curl>
```bash
curl https://api-inference.huggingface.co/models/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition \
-X POST \
--data-binary '@sample1.flac' \
-H "Authorization: Bearer hf_***"
[object Object]
```
</curl>

<python>
```py
import requests

API_URL = "https://api-inference.huggingface.co/models/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition"
headers = {"Authorization": "Bearer hf_***"}

def query(filename):
with open(filename, "rb") as f:
data = f.read()
response = requests.post(API_URL, headers=headers, data=data)
return response.json()

output = query("sample1.flac")
[object Object]
```

To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.audio_classification).
</python>

<js>
```js
async function query(filename) {
const data = fs.readFileSync(filename);
const response = await fetch(
"https://api-inference.huggingface.co/models/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition",
{
headers: {
Authorization: "Bearer hf_***"
"Content-Type": "application/json",
},
method: "POST",
body: data,
}
);
const result = await response.json();
return result;
}

query("sample1.flac").then((response) => {
console.log(JSON.stringify(response));
});
[object Object]
```

To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#audioclassification).
Expand Down
39 changes: 3 additions & 36 deletions docs/api-inference/tasks/automatic-speech-recognition.md
Original file line number Diff line number Diff line change
Expand Up @@ -41,54 +41,21 @@ Explore all available models and find the one that suits you best [here](https:/

<curl>
```bash
curl https://api-inference.huggingface.co/models/openai/whisper-large-v3 \
-X POST \
--data-binary '@sample1.flac' \
-H "Authorization: Bearer hf_***"
[object Object]
```
</curl>

<python>
```py
import requests

API_URL = "https://api-inference.huggingface.co/models/openai/whisper-large-v3"
headers = {"Authorization": "Bearer hf_***"}

def query(filename):
with open(filename, "rb") as f:
data = f.read()
response = requests.post(API_URL, headers=headers, data=data)
return response.json()

output = query("sample1.flac")
[object Object]
```

To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.automatic_speech-recognition).
</python>

<js>
```js
async function query(filename) {
const data = fs.readFileSync(filename);
const response = await fetch(
"https://api-inference.huggingface.co/models/openai/whisper-large-v3",
{
headers: {
Authorization: "Bearer hf_***"
"Content-Type": "application/json",
},
method: "POST",
body: data,
}
);
const result = await response.json();
return result;
}

query("sample1.flac").then((response) => {
console.log(JSON.stringify(response));
});
[object Object]
```

To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#automaticspeech-recognition).
Expand Down
97 changes: 6 additions & 91 deletions docs/api-inference/tasks/chat-completion.md
Original file line number Diff line number Diff line change
Expand Up @@ -61,50 +61,21 @@ The API supports:

<curl>
```bash
curl 'https://api-inference.huggingface.co/models/google/gemma-2-2b-it/v1/chat/completions' \
-H "Authorization: Bearer hf_***" \
-H 'Content-Type: application/json' \
-d '{
"model": "google/gemma-2-2b-it",
"messages": [{"role": "user", "content": "What is the capital of France?"}],
"max_tokens": 500,
"stream": false
}'

[object Object]
```
</curl>

<python>
```py
from huggingface_hub import InferenceClient

client = InferenceClient(api_key="hf_***")

for message in client.chat_completion(
model="google/gemma-2-2b-it",
messages=[{"role": "user", "content": "What is the capital of France?"}],
max_tokens=500,
stream=True,
):
print(message.choices[0].delta.content, end="")
[object Object],[object Object]
```

To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.chat_completion).
</python>

<js>
```js
import { HfInference } from "@huggingface/inference";

const inference = new HfInference("hf_***");

for await (const chunk of inference.chatCompletionStream({
model: "google/gemma-2-2b-it",
messages: [{ role: "user", content: "What is the capital of France?" }],
max_tokens: 500,
})) {
process.stdout.write(chunk.choices[0]?.delta?.content || "");
}
[object Object],[object Object]
```

To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#chatcompletion).
Expand All @@ -121,77 +92,21 @@ To use the JavaScript client, see `huggingface.js`'s [package reference](https:/

<curl>
```bash
curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions' \
-H "Authorization: Bearer hf_***" \
-H 'Content-Type: application/json' \
-d '{
"model": "meta-llama/Llama-3.2-11B-Vision-Instruct",
"messages": [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"}},
{"type": "text", "text": "Describe this image in one sentence."}
]
}
],
"max_tokens": 500,
"stream": false
}'

[object Object]
```
</curl>

<python>
```py
from huggingface_hub import InferenceClient

client = InferenceClient(api_key="hf_***")

image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"

for message in client.chat_completion(
model="meta-llama/Llama-3.2-11B-Vision-Instruct",
messages=[
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": image_url}},
{"type": "text", "text": "Describe this image in one sentence."},
],
}
],
max_tokens=500,
stream=True,
):
print(message.choices[0].delta.content, end="")
[object Object],[object Object]
```

To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.chat_completion).
</python>

<js>
```js
import { HfInference } from "@huggingface/inference";

const inference = new HfInference("hf_***");
const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg";

for await (const chunk of inference.chatCompletionStream({
model: "meta-llama/Llama-3.2-11B-Vision-Instruct",
messages: [
{
"role": "user",
"content": [
{"type": "image_url", "image_url": {"url": imageUrl}},
{"type": "text", "text": "Describe this image in one sentence."},
],
}
],
max_tokens: 500,
})) {
process.stdout.write(chunk.choices[0]?.delta?.content || "");
}
[object Object],[object Object]
```

To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#chatcompletion).
Expand Down
39 changes: 3 additions & 36 deletions docs/api-inference/tasks/feature-extraction.md
Original file line number Diff line number Diff line change
Expand Up @@ -40,54 +40,21 @@ Explore all available models and find the one that suits you best [here](https:/

<curl>
```bash
curl https://api-inference.huggingface.co/models/thenlper/gte-large \
-X POST \
-d '{"inputs": "Today is a sunny day and I will get some ice cream."}' \
-H 'Content-Type: application/json' \
-H "Authorization: Bearer hf_***"
[object Object]
```
</curl>

<python>
```py
import requests

API_URL = "https://api-inference.huggingface.co/models/thenlper/gte-large"
headers = {"Authorization": "Bearer hf_***"}

def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()

output = query({
"inputs": "Today is a sunny day and I will get some ice cream.",
})
[object Object]
```

To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.feature_extraction).
</python>

<js>
```js
async function query(data) {
const response = await fetch(
"https://api-inference.huggingface.co/models/thenlper/gte-large",
{
headers: {
Authorization: "Bearer hf_***"
"Content-Type": "application/json",
},
method: "POST",
body: JSON.stringify(data),
}
);
const result = await response.json();
return result;
}

query({"inputs": "Today is a sunny day and I will get some ice cream."}).then((response) => {
console.log(JSON.stringify(response));
});
[object Object]
```

To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#featureextraction).
Expand Down
39 changes: 3 additions & 36 deletions docs/api-inference/tasks/fill-mask.md
Original file line number Diff line number Diff line change
Expand Up @@ -36,54 +36,21 @@ Explore all available models and find the one that suits you best [here](https:/

<curl>
```bash
curl https://api-inference.huggingface.co/models/google-bert/bert-base-uncased \
-X POST \
-d '{"inputs": "The answer to the universe is [MASK]."}' \
-H 'Content-Type: application/json' \
-H "Authorization: Bearer hf_***"
[object Object]
```
</curl>

<python>
```py
import requests

API_URL = "https://api-inference.huggingface.co/models/google-bert/bert-base-uncased"
headers = {"Authorization": "Bearer hf_***"}

def query(payload):
response = requests.post(API_URL, headers=headers, json=payload)
return response.json()

output = query({
"inputs": "The answer to the universe is [MASK].",
})
[object Object]
```

To use the Python client, see `huggingface_hub`'s [package reference](https://huggingface.co/docs/huggingface_hub/package_reference/inference_client#huggingface_hub.InferenceClient.fill_mask).
</python>

<js>
```js
async function query(data) {
const response = await fetch(
"https://api-inference.huggingface.co/models/google-bert/bert-base-uncased",
{
headers: {
Authorization: "Bearer hf_***"
"Content-Type": "application/json",
},
method: "POST",
body: JSON.stringify(data),
}
);
const result = await response.json();
return result;
}

query({"inputs": "The answer to the universe is [MASK]."}).then((response) => {
console.log(JSON.stringify(response));
});
[object Object]
```

To use the JavaScript client, see `huggingface.js`'s [package reference](https://huggingface.co/docs/huggingface.js/inference/classes/HfInference#fillmask).
Expand Down
Loading

0 comments on commit e33e9b0

Please sign in to comment.