From b94abfa04062e803908d433ec445ba3210a01dee Mon Sep 17 00:00:00 2001 From: Celina Hanouti Date: Wed, 16 Oct 2024 16:20:48 +0200 Subject: [PATCH] regenerate inference api docs --- docs/api-inference/tasks/audio-classification.md | 9 +++++---- .../tasks/automatic-speech-recognition.md | 3 +-- docs/api-inference/tasks/chat-completion.md | 9 +++++---- docs/api-inference/tasks/feature-extraction.md | 2 +- docs/api-inference/tasks/fill-mask.md | 2 +- docs/api-inference/tasks/image-classification.md | 3 ++- docs/api-inference/tasks/image-segmentation.md | 2 +- docs/api-inference/tasks/image-text-to-text.md | 11 ++++++----- docs/api-inference/tasks/image-to-image.md | 2 +- docs/api-inference/tasks/object-detection.md | 2 +- docs/api-inference/tasks/question-answering.md | 3 +-- docs/api-inference/tasks/summarization.md | 2 +- docs/api-inference/tasks/table-question-answering.md | 9 ++++----- docs/api-inference/tasks/text-classification.md | 3 ++- docs/api-inference/tasks/text-generation.md | 2 +- docs/api-inference/tasks/text-to-image.md | 3 +-- docs/api-inference/tasks/token-classification.md | 2 +- docs/api-inference/tasks/translation.md | 2 +- docs/api-inference/tasks/zero-shot-classification.md | 3 +-- scripts/api-inference/scripts/generate.ts | 2 +- 20 files changed, 38 insertions(+), 38 deletions(-) diff --git a/docs/api-inference/tasks/audio-classification.md b/docs/api-inference/tasks/audio-classification.md index 538fda748..f91a18dc3 100644 --- a/docs/api-inference/tasks/audio-classification.md +++ b/docs/api-inference/tasks/audio-classification.md @@ -29,8 +29,9 @@ For more details about the `audio-classification` task, check out its [dedicated ### Recommended models +- [ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition](https://huggingface.co/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition): An emotion recognition model. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=audio-classification&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=audio-classification&sort=trending). ### Using the API @@ -39,7 +40,7 @@ This is only a subset of the supported models. Find the model that suits you bes ```bash -curl https://api-inference.huggingface.co/models/ \ +curl https://api-inference.huggingface.co/models/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition \ -X POST \ --data-binary '@sample1.flac' \ -H "Authorization: Bearer hf_***" @@ -50,7 +51,7 @@ curl https://api-inference.huggingface.co/models/ \ ```py import requests -API_URL = "https://api-inference.huggingface.co/models/" +API_URL = "https://api-inference.huggingface.co/models/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition" headers = {"Authorization": "Bearer hf_***"} def query(filename): @@ -70,7 +71,7 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu async function query(filename) { const data = fs.readFileSync(filename); const response = await fetch( - "https://api-inference.huggingface.co/models/", + "https://api-inference.huggingface.co/models/ehcalabres/wav2vec2-lg-xlsr-en-speech-emotion-recognition", { headers: { Authorization: "Bearer hf_***" diff --git a/docs/api-inference/tasks/automatic-speech-recognition.md b/docs/api-inference/tasks/automatic-speech-recognition.md index 74023b28c..819bbf862 100644 --- a/docs/api-inference/tasks/automatic-speech-recognition.md +++ b/docs/api-inference/tasks/automatic-speech-recognition.md @@ -30,10 +30,9 @@ For more details about the `automatic-speech-recognition` task, check out its [d ### Recommended models - [openai/whisper-large-v3](https://huggingface.co/openai/whisper-large-v3): A powerful ASR model by OpenAI. -- [facebook/seamless-m4t-v2-large](https://huggingface.co/facebook/seamless-m4t-v2-large): An end-to-end model that performs ASR and Speech Translation by MetaAI. - [pyannote/speaker-diarization-3.1](https://huggingface.co/pyannote/speaker-diarization-3.1): Powerful speaker diarization model. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=automatic-speech-recognition&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=automatic-speech-recognition&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/chat-completion.md b/docs/api-inference/tasks/chat-completion.md index 7246beae5..7acba716b 100644 --- a/docs/api-inference/tasks/chat-completion.md +++ b/docs/api-inference/tasks/chat-completion.md @@ -29,6 +29,7 @@ This is a subtask of [`text-generation`](https://huggingface.co/docs/api-inferen #### Conversational Vision-Language Models (VLMs) +- [meta-llama/Llama-3.2-11B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct): Powerful vision language model with great visual understanding and reasoning capabilities. - [microsoft/Phi-3.5-vision-instruct](https://huggingface.co/microsoft/Phi-3.5-vision-instruct): Strong image-text-to-text model. ### Using the API @@ -106,11 +107,11 @@ To use the JavaScript client, see `huggingface.js`'s [package reference](https:/ ```bash -curl 'https://api-inference.huggingface.co/models/microsoft/Phi-3.5-vision-instruct/v1/chat/completions' \ +curl 'https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct/v1/chat/completions' \ -H "Authorization: Bearer hf_***" \ -H 'Content-Type: application/json' \ -d '{ - "model": "microsoft/Phi-3.5-vision-instruct", + "model": "meta-llama/Llama-3.2-11B-Vision-Instruct", "messages": [ { "role": "user", @@ -136,7 +137,7 @@ client = InferenceClient(api_key="hf_***") image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" for message in client.chat_completion( - model="microsoft/Phi-3.5-vision-instruct", + model="meta-llama/Llama-3.2-11B-Vision-Instruct", messages=[ { "role": "user", @@ -163,7 +164,7 @@ const inference = new HfInference("hf_***"); const imageUrl = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg"; for await (const chunk of inference.chatCompletionStream({ - model: "microsoft/Phi-3.5-vision-instruct", + model: "meta-llama/Llama-3.2-11B-Vision-Instruct", messages: [ { "role": "user", diff --git a/docs/api-inference/tasks/feature-extraction.md b/docs/api-inference/tasks/feature-extraction.md index 75002dc7e..b9d0fb312 100644 --- a/docs/api-inference/tasks/feature-extraction.md +++ b/docs/api-inference/tasks/feature-extraction.md @@ -31,7 +31,7 @@ For more details about the `feature-extraction` task, check out its [dedicated p - [thenlper/gte-large](https://huggingface.co/thenlper/gte-large): A powerful feature extraction model for natural language processing tasks. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=feature-extraction&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=feature-extraction&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/fill-mask.md b/docs/api-inference/tasks/fill-mask.md index f23fab37f..b4c07c07a 100644 --- a/docs/api-inference/tasks/fill-mask.md +++ b/docs/api-inference/tasks/fill-mask.md @@ -27,7 +27,7 @@ For more details about the `fill-mask` task, check out its [dedicated page](http - [google-bert/bert-base-uncased](https://huggingface.co/google-bert/bert-base-uncased): The famous BERT model. - [FacebookAI/xlm-roberta-base](https://huggingface.co/FacebookAI/xlm-roberta-base): A multilingual model trained on 100 languages. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=fill-mask&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=fill-mask&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/image-classification.md b/docs/api-inference/tasks/image-classification.md index a7686ee68..ce5ad7192 100644 --- a/docs/api-inference/tasks/image-classification.md +++ b/docs/api-inference/tasks/image-classification.md @@ -25,8 +25,9 @@ For more details about the `image-classification` task, check out its [dedicated ### Recommended models - [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224): A strong image classification model. +- [facebook/deit-base-distilled-patch16-224](https://huggingface.co/facebook/deit-base-distilled-patch16-224): A robust image classification model. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-classification&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-classification&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/image-segmentation.md b/docs/api-inference/tasks/image-segmentation.md index 274176b46..90017e6f6 100644 --- a/docs/api-inference/tasks/image-segmentation.md +++ b/docs/api-inference/tasks/image-segmentation.md @@ -26,7 +26,7 @@ For more details about the `image-segmentation` task, check out its [dedicated p - [nvidia/segformer-b0-finetuned-ade-512-512](https://huggingface.co/nvidia/segformer-b0-finetuned-ade-512-512): Semantic segmentation model trained on ADE20k benchmark dataset with 512x512 resolution. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-segmentation&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-segmentation&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/image-text-to-text.md b/docs/api-inference/tasks/image-text-to-text.md index 19819ce4a..bacc08dac 100644 --- a/docs/api-inference/tasks/image-text-to-text.md +++ b/docs/api-inference/tasks/image-text-to-text.md @@ -24,10 +24,11 @@ For more details about the `image-text-to-text` task, check out its [dedicated p ### Recommended models +- [meta-llama/Llama-3.2-11B-Vision-Instruct](https://huggingface.co/meta-llama/Llama-3.2-11B-Vision-Instruct): Powerful vision language model with great visual understanding and reasoning capabilities. - [HuggingFaceM4/idefics2-8b-chatty](https://huggingface.co/HuggingFaceM4/idefics2-8b-chatty): Cutting-edge conversational vision language model that can take multiple image inputs. - [microsoft/Phi-3.5-vision-instruct](https://huggingface.co/microsoft/Phi-3.5-vision-instruct): Strong image-text-to-text model. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-text-to-text&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-text-to-text&sort=trending). ### Using the API @@ -36,7 +37,7 @@ This is only a subset of the supported models. Find the model that suits you bes ```bash -curl https://api-inference.huggingface.co/models/HuggingFaceM4/idefics2-8b-chatty \ +curl https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct \ -X POST \ -d '{"inputs": No input example has been defined for this model task.}' \ -H 'Content-Type: application/json' \ @@ -48,7 +49,7 @@ curl https://api-inference.huggingface.co/models/HuggingFaceM4/idefics2-8b-chatt ```py import requests -API_URL = "https://api-inference.huggingface.co/models/HuggingFaceM4/idefics2-8b-chatty" +API_URL = "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct" headers = {"Authorization": "Bearer hf_***"} from huggingface_hub import InferenceClient @@ -58,7 +59,7 @@ client = InferenceClient(api_key="hf_***") image_url = "https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg" for message in client.chat_completion( - model="HuggingFaceM4/idefics2-8b-chatty", + model="meta-llama/Llama-3.2-11B-Vision-Instruct", messages=[ { "role": "user", @@ -81,7 +82,7 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu ```js async function query(data) { const response = await fetch( - "https://api-inference.huggingface.co/models/HuggingFaceM4/idefics2-8b-chatty", + "https://api-inference.huggingface.co/models/meta-llama/Llama-3.2-11B-Vision-Instruct", { headers: { Authorization: "Bearer hf_***" diff --git a/docs/api-inference/tasks/image-to-image.md b/docs/api-inference/tasks/image-to-image.md index 7b5cfaad4..1c2f277b3 100644 --- a/docs/api-inference/tasks/image-to-image.md +++ b/docs/api-inference/tasks/image-to-image.md @@ -31,7 +31,7 @@ For more details about the `image-to-image` task, check out its [dedicated page] - [timbrooks/instruct-pix2pix](https://huggingface.co/timbrooks/instruct-pix2pix): A model that takes an image and an instruction to edit the image. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-to-image&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=image-to-image&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/object-detection.md b/docs/api-inference/tasks/object-detection.md index ce268b99b..b01776f25 100644 --- a/docs/api-inference/tasks/object-detection.md +++ b/docs/api-inference/tasks/object-detection.md @@ -27,7 +27,7 @@ For more details about the `object-detection` task, check out its [dedicated pag - [facebook/detr-resnet-50](https://huggingface.co/facebook/detr-resnet-50): Solid object detection model trained on the benchmark dataset COCO 2017. - [microsoft/beit-base-patch16-224-pt22k-ft22k](https://huggingface.co/microsoft/beit-base-patch16-224-pt22k-ft22k): Strong object detection model trained on ImageNet-21k dataset. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=object-detection&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=object-detection&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/question-answering.md b/docs/api-inference/tasks/question-answering.md index 0b59c7498..1a2beb24f 100644 --- a/docs/api-inference/tasks/question-answering.md +++ b/docs/api-inference/tasks/question-answering.md @@ -26,9 +26,8 @@ For more details about the `question-answering` task, check out its [dedicated p - [deepset/roberta-base-squad2](https://huggingface.co/deepset/roberta-base-squad2): A robust baseline model for most question answering domains. - [distilbert/distilbert-base-cased-distilled-squad](https://huggingface.co/distilbert/distilbert-base-cased-distilled-squad): Small yet robust model that can answer questions. -- [google/tapas-base-finetuned-wtq](https://huggingface.co/google/tapas-base-finetuned-wtq): A special model that can answer questions from tables. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=question-answering&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=question-answering&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/summarization.md b/docs/api-inference/tasks/summarization.md index af10c0e17..6a46c6d6a 100644 --- a/docs/api-inference/tasks/summarization.md +++ b/docs/api-inference/tasks/summarization.md @@ -26,7 +26,7 @@ For more details about the `summarization` task, check out its [dedicated page]( - [facebook/bart-large-cnn](https://huggingface.co/facebook/bart-large-cnn): A strong summarization model trained on English news articles. Excels at generating factual summaries. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=summarization&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=summarization&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/table-question-answering.md b/docs/api-inference/tasks/table-question-answering.md index d5bd0a748..75387053c 100644 --- a/docs/api-inference/tasks/table-question-answering.md +++ b/docs/api-inference/tasks/table-question-answering.md @@ -24,9 +24,8 @@ For more details about the `table-question-answering` task, check out its [dedic ### Recommended models -- [google/tapas-base-finetuned-wtq](https://huggingface.co/google/tapas-base-finetuned-wtq): A robust table question answering model. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=table-question-answering&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=table-question-answering&sort=trending). ### Using the API @@ -35,7 +34,7 @@ This is only a subset of the supported models. Find the model that suits you bes ```bash -curl https://api-inference.huggingface.co/models/google/tapas-base-finetuned-wtq \ +curl https://api-inference.huggingface.co/models/ \ -X POST \ -d '{"inputs": { "query": "How many stars does the transformers repository have?", "table": { "Repository": ["Transformers", "Datasets", "Tokenizers"], "Stars": ["36542", "4512", "3934"], "Contributors": ["651", "77", "34"], "Programming language": [ "Python", "Python", "Rust, Python and NodeJS" ] } }}' \ -H 'Content-Type: application/json' \ @@ -47,7 +46,7 @@ curl https://api-inference.huggingface.co/models/google/tapas-base-finetuned-wtq ```py import requests -API_URL = "https://api-inference.huggingface.co/models/google/tapas-base-finetuned-wtq" +API_URL = "https://api-inference.huggingface.co/models/" headers = {"Authorization": "Bearer hf_***"} def query(payload): @@ -78,7 +77,7 @@ To use the Python client, see `huggingface_hub`'s [package reference](https://hu ```js async function query(data) { const response = await fetch( - "https://api-inference.huggingface.co/models/google/tapas-base-finetuned-wtq", + "https://api-inference.huggingface.co/models/", { headers: { Authorization: "Bearer hf_***" diff --git a/docs/api-inference/tasks/text-classification.md b/docs/api-inference/tasks/text-classification.md index 01c5d3d0d..96640c5b0 100644 --- a/docs/api-inference/tasks/text-classification.md +++ b/docs/api-inference/tasks/text-classification.md @@ -28,8 +28,9 @@ For more details about the `text-classification` task, check out its [dedicated - [ProsusAI/finbert](https://huggingface.co/ProsusAI/finbert): A sentiment analysis model specialized in financial sentiment. - [cardiffnlp/twitter-roberta-base-sentiment-latest](https://huggingface.co/cardiffnlp/twitter-roberta-base-sentiment-latest): A sentiment analysis model specialized in analyzing tweets. - [papluca/xlm-roberta-base-language-detection](https://huggingface.co/papluca/xlm-roberta-base-language-detection): A model that can classify languages. +- [meta-llama/Prompt-Guard-86M](https://huggingface.co/meta-llama/Prompt-Guard-86M): A model that can classify text generation attacks. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=text-classification&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=text-classification&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/text-generation.md b/docs/api-inference/tasks/text-generation.md index 02d8a98e2..7e315ddc4 100644 --- a/docs/api-inference/tasks/text-generation.md +++ b/docs/api-inference/tasks/text-generation.md @@ -33,7 +33,7 @@ For more details about the `text-generation` task, check out its [dedicated page - [HuggingFaceH4/starchat2-15b-v0.1](https://huggingface.co/HuggingFaceH4/starchat2-15b-v0.1): Strong coding assistant model. - [mistralai/Mistral-Nemo-Instruct-2407](https://huggingface.co/mistralai/Mistral-Nemo-Instruct-2407): Very strong open-source large language model. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=text-generation&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=text-generation&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/text-to-image.md b/docs/api-inference/tasks/text-to-image.md index 48f7a063d..df2bb4d2c 100644 --- a/docs/api-inference/tasks/text-to-image.md +++ b/docs/api-inference/tasks/text-to-image.md @@ -25,10 +25,9 @@ For more details about the `text-to-image` task, check out its [dedicated page]( ### Recommended models - [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev): One of the most powerful image generation models that can generate realistic outputs. -- [latent-consistency/lcm-lora-sdxl](https://huggingface.co/latent-consistency/lcm-lora-sdxl): A powerful yet fast image generation model. - [stabilityai/stable-diffusion-3-medium-diffusers](https://huggingface.co/stabilityai/stable-diffusion-3-medium-diffusers): A powerful text-to-image model. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=text-to-image&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=text-to-image&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/token-classification.md b/docs/api-inference/tasks/token-classification.md index 5f01d352e..d1055343a 100644 --- a/docs/api-inference/tasks/token-classification.md +++ b/docs/api-inference/tasks/token-classification.md @@ -29,7 +29,7 @@ For more details about the `token-classification` task, check out its [dedicated - [blaze999/Medical-NER](https://huggingface.co/blaze999/Medical-NER): A token classification model specialized on medical entity recognition. - [flair/ner-english](https://huggingface.co/flair/ner-english): Flair models are typically the state of the art in named entity recognition tasks. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=token-classification&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=token-classification&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/translation.md b/docs/api-inference/tasks/translation.md index 7bfc4982e..18263b71e 100644 --- a/docs/api-inference/tasks/translation.md +++ b/docs/api-inference/tasks/translation.md @@ -26,7 +26,7 @@ For more details about the `translation` task, check out its [dedicated page](ht - [google-t5/t5-base](https://huggingface.co/google-t5/t5-base): A general-purpose Transformer that can be used to translate from English to German, French, or Romanian. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=translation&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=translation&sort=trending). ### Using the API diff --git a/docs/api-inference/tasks/zero-shot-classification.md b/docs/api-inference/tasks/zero-shot-classification.md index f8ab79739..8401bcf93 100644 --- a/docs/api-inference/tasks/zero-shot-classification.md +++ b/docs/api-inference/tasks/zero-shot-classification.md @@ -25,9 +25,8 @@ For more details about the `zero-shot-classification` task, check out its [dedic ### Recommended models - [facebook/bart-large-mnli](https://huggingface.co/facebook/bart-large-mnli): Powerful zero-shot text classification model. -- [MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7](https://huggingface.co/MoritzLaurer/mDeBERTa-v3-base-xnli-multilingual-nli-2mil7): Powerful zero-shot multilingual text classification model that can accomplish multiple tasks. -This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=zero-shot-classification&sort=trending). +Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag=zero-shot-classification&sort=trending). ### Using the API diff --git a/scripts/api-inference/scripts/generate.ts b/scripts/api-inference/scripts/generate.ts index 0aae82e57..51997f008 100644 --- a/scripts/api-inference/scripts/generate.ts +++ b/scripts/api-inference/scripts/generate.ts @@ -318,7 +318,7 @@ For more details about the \`{{task}}\` task, check out its [dedicated page](htt `); const TIP_LIST_MODELS_LINK_TEMPLATE = Handlebars.compile( - `This is only a subset of the supported models. Find the model that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag={{task}}&sort=trending).`, + `Explore all available models and find the one that suits you best [here](https://huggingface.co/models?inference=warm&pipeline_tag={{task}}&sort=trending).`, ); const SPECS_HEADERS = await readTemplate("specs-headers", "common"); const PAGE_HEADER = Handlebars.compile(