From 3e1d9899e665b5dd93134d08acdc93765ec7b126 Mon Sep 17 00:00:00 2001 From: //va Date: Fri, 8 Nov 2024 20:45:49 -0500 Subject: [PATCH 01/42] local mode guide --- docs/guides/_toc.json | 4 + docs/guides/qiskit-code-assistant-local.mdx | 203 ++++++++++++++++++++ qiskit_bot.yaml | 5 + 3 files changed, 212 insertions(+) create mode 100644 docs/guides/qiskit-code-assistant-local.mdx diff --git a/docs/guides/_toc.json b/docs/guides/_toc.json index f92b36419de..eddab4b280f 100644 --- a/docs/guides/_toc.json +++ b/docs/guides/_toc.json @@ -529,6 +529,10 @@ { "title": "Use Qiskit Code Assistant in VS Code", "url": "/guides/qiskit-code-assistant-vscode" + }, + { + "title": "Use Qiskit Code Assistant in local mode", + "url": "/guides/qiskit-code-assistant-local" } ] } diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx new file mode 100644 index 00000000000..95f69c02802 --- /dev/null +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -0,0 +1,203 @@ +--- +title: Use Qiskit Code Assistant in local mode +description: Learn how to deploy and use the Qiskit Code Assistant model locally. +--- + +# Use Qiskit Code Assistant in local mode + +Learn how to install, configure, and use the Qiskit Code Assistant model on your local machine. + + + - This is an experimental feature available only to IBM Quantum Premium Plan users. + - Qiskit Code Assistant is in preview release status and is subject to change. + - If you have feedback or want to contact the developer team, use the [Qiskit Slack Workspace channel](https://qiskit.enterprise.slack.com/archives/C07LYA6PL83) or the related public GitHub repositories. + + +## Download the Qiskit Code Assistant model + +The Qiskit Code Assistant model is available in the GGUF file format and can be downloaded from Hugging Face in one of two ways. + +
+ +Download from the Hugging Face website + +Follow these steps to download the Qiskit Code Assistant GGUF model from the Hugging Face website: + +1. Navigate to the IBM granite model page: https://huggingface.co/ibm-granite +1. Select the Granite Qiskit code assistant GGUF model +1. Go to the Files and Versions tab and download the GGUF model + +
+ + +
+ +Download using the Hugging Face CLI + +To download the Qiskit Code Assistant GGUF model using the Hugging Face CLI follow these steps: + +1. Install the Hugging Face CLI: https://huggingface.co/docs/huggingface_hub/main/en/guides/cli +1. Login to your Hugging Face account + + ``` + huggingface-cli login + ``` + +1. Download the Qiskit Code Assistant GGUF model + + ``` + huggingface-cli download --local-dir + ``` + +
+ + +## Get the Qiskit Code Assistant model up and running + +There are multiple ways to deploy and interact with the downloaded Qiskit Code Assistant GGUF model. These instructions outline how to get up and running on your local machine using [Ollama](https://ollama.com). + +- [Using the Ollama application](#using-the-ollama-application) +- [Using the `llama-cpp-python` package](#using-the-llama-cpp-python-package) + +### Using the Ollama application + +The Ollama application provides a simple solution to run the GGUF models locally. It is easy to use, with a CLI that makes the whole set up process, model management, and interaction fairly straightforward. It’s ideal for quick experimentation and/or for users that want fewer technical details to handle. + +#### Install Ollama + +1. Download the Ollama application: https://ollama.com/download +1. Install the downloaded file +1. Launch the installed Ollama application + + Once running, you will see the Ollama icon in the desktop menu bar, indicating that the application is running successfully. You can also verify the service is running by going to http://localhost:11434/. + +1. Try Ollama in your terminal and start running models e.g., + + ``` + ollama run llama + ``` + +#### Set up Ollama with the Qiskit Code Assistant GGUF model + +1. Create a `Modelfile` entering the below content and be sure to update `` to the actual path of your downloaded model + + ``` + FROM + TEMPLATE """{{ if .System }}System: + {{ .System }} + + {{ end }} + {{ if .Prompt }}Question: + {{ .Prompt }} + + {{ end }} + Answer: + ```python + {{ .Response }} + """ + + SYSTEM """""" + + PARAMETER stop "" + PARAMETER stop "" + PARAMETER stop "" + PARAMETER stop "" + PARAMETER stop "<|endoftext|>" + + PARAMETER mirostat 0 + PARAMETER mirostat_eta 0.1 + PARAMETER mirostat_tau 5.0 + PARAMETER num_ctx 10000 + PARAMETER repeat_penalty 1.0 + PARAMETER temperature 0.8 + PARAMETER seed 0 + PARAMETER tfs_z 1.0 + PARAMETER num_predict 1024 + PARAMETER top_k 50 + PARAMETER top_p 0.95 + PARAMETER min_p 0.05 + ``` + +1. Run the following command to create a custom model instance based on the `Modelfile` + + ``` + ollama create qiskit-granite-local -f ./path-to-model-file + ``` + + This process may take some time, Ollama will read the model file, initialize the model instance and configure it according to the specifications provided. + + +#### Run the Qiskit Code Assistant model in Ollama + +After the Qiskit Code Assistant GGUF model has been set up in Ollama, run the following command to launch the model and interact with it in the terminal (in chat mode) + +``` +ollama run qiskit-granite-local +``` + +Some useful commands: + +- `ollama list` - List models on your computer +- `ollama rm qiskit-granite-local` - Remove/delete the model +- `ollama show qiskit-granite-local` - Show model information +- `ollama stop qiskit-granite-local` - Stop a model which is currently running +- `ollama ps` - List which models are currently loaded + +### Using the `llama-cpp-python` package + +An alternative to the Ollama application is the `llama-cpp-python` package. It is a Python binding for `llama.cpp`. It gives you more control and flexibility to run the GGUF model locally. It’s ideal for users who wish to integrate the local model in their workflows and Python applications. + +1. Install `llama-cpp-python`: https://pypi.org/project/llama-cpp-python/ +1. Interact with the model from within your application using `llama_cpp` e.g., + +```python +from llama_cpp import Llama + +model_path = + +model = Llama( + model_path, + seed=17, + n_ctx=10000, + n_gpu_layers=37, # to offload in gpu, but put 0 if all in cpu + ) + +input = 'Generate a quantum circuit with 2 qubits' +raw_pred = model(input)["choices"][0]["text"] +``` + +You can also add generate parameters to the model to customize the inference: + +```python +generation_kwargs = { + "max_tokens": 512, + "echo": False, # Echo the prompt in the output + "top_k": 1 + } + +raw_pred = model(input, **generation_kwargs)["choices"][0]["text"] +``` + +### Use the Qiskit Code Assistant extensions + +The VS Code extension and JupyterLab extension for the Qiskit Code Assistant can be used to prompt the locally deployed Qiskit Code Assistant GGUF model. Once you have the Ollama application [up and running with the model](#using-the-ollama-application) you can configure the extensions to connect to the local service. + + +#### Connect with the Qiskit Code Assistant VS Code extension + +Using the Qiskit Code Assistant VS Code extension allows you to interact with the model and perform code completion while writing your code. This can work well for users looking for assistance writing Qiskit code for their Python applications. + +1. Install the [Qiskit Code Assistant VS Code extension](/guides/qiskit-code-assistant-vscode) +1. In VS Code, go to the **User Settings** and set the **Qiskit Code Assistant: Url** to the URL of your local Ollama deployment (i.e., http://localhost:11434) +1. Reload VS Code, by going to **View > Command Pallette...** and selecting **Develper: Reload Window** + +The `qiskit-granite-local` configured in Ollama should appear in the status bar and ready to use. + +#### Connect with Qiskit Code Assistant JupyterLab extension + +Using the Qiskit Code Assistant JupyterLab extension allows you to interact with the model and perform code completion directly in your Jupyter Notebook. Users who predominantly work with Jupyter Notebooks can take advantage of this extension to further enhance their experience writing Qiskit code. + +1. Install the [Qiskit Code Assistant JupyterLab extension](/guides/qiskit-code-assistant-jupyterlab) +1. In JupyterLab, go to the **Settings Editor** and set the **Qiskit Code Assistant Service API** to the URL of your local Ollama deployment (i.e., http://localhost:11434) + +The `qiskit-granite-local` configured in Ollama should appear in the status bar and ready to use. diff --git a/qiskit_bot.yaml b/qiskit_bot.yaml index 812742c40cb..78525491be2 100644 --- a/qiskit_bot.yaml +++ b/qiskit_bot.yaml @@ -159,6 +159,11 @@ notifications: - "cbjuan" - "@abbycross" - "@beckykd" + "docs/guides/qiskit-code-assistant-local": + - "@cbjuan" + - "@vabarbosa" + - "@abbycross" + - "@beckykd" "docs/guides/pulse": - "`@nkanazawa1989`" - "@abbycross" From 2af223bc2fb3b43b2cbaf0061a38e08e1fb2c062 Mon Sep 17 00:00:00 2001 From: //va Date: Fri, 8 Nov 2024 21:02:49 -0500 Subject: [PATCH 02/42] Update checkPatternsIndex.ts --- scripts/js/commands/checkPatternsIndex.ts | 1 + 1 file changed, 1 insertion(+) diff --git a/scripts/js/commands/checkPatternsIndex.ts b/scripts/js/commands/checkPatternsIndex.ts index 8fac25215f2..9800ddfd78d 100644 --- a/scripts/js/commands/checkPatternsIndex.ts +++ b/scripts/js/commands/checkPatternsIndex.ts @@ -20,6 +20,7 @@ const ALLOWLIST_MISSING_FROM_INDEX: Set = new Set([ "/guides/qiskit-code-assistant", "/guides/qiskit-code-assistant-jupyterlab", "/guides/qiskit-code-assistant-vscode", + "/guides/qiskit-code-assistant-local", "/guides/addons", ]); From 8340c7f319a4182c72cdd2e2e5597813a5837740 Mon Sep 17 00:00:00 2001 From: //va Date: Fri, 8 Nov 2024 21:04:53 -0500 Subject: [PATCH 03/42] Update qiskit-code-assistant-local.mdx --- docs/guides/qiskit-code-assistant-local.mdx | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 95f69c02802..ed2f3eb1a2c 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -3,6 +3,8 @@ title: Use Qiskit Code Assistant in local mode description: Learn how to deploy and use the Qiskit Code Assistant model locally. --- +{/* cspell:ignore ollama, gguf */} + # Use Qiskit Code Assistant in local mode Learn how to install, configure, and use the Qiskit Code Assistant model on your local machine. @@ -189,7 +191,7 @@ Using the Qiskit Code Assistant VS Code extension allows you to interact with th 1. Install the [Qiskit Code Assistant VS Code extension](/guides/qiskit-code-assistant-vscode) 1. In VS Code, go to the **User Settings** and set the **Qiskit Code Assistant: Url** to the URL of your local Ollama deployment (i.e., http://localhost:11434) -1. Reload VS Code, by going to **View > Command Pallette...** and selecting **Develper: Reload Window** +1. Reload VS Code, by going to **View > Command Pallette...** and selecting **Developer: Reload Window** The `qiskit-granite-local` configured in Ollama should appear in the status bar and ready to use. From 5325ef92f47c4b9e0fb755ca53b5a8245d2a1f10 Mon Sep 17 00:00:00 2001 From: //va Date: Sat, 9 Nov 2024 07:54:25 -0500 Subject: [PATCH 04/42] Update qiskit_bot.yaml --- qiskit_bot.yaml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/qiskit_bot.yaml b/qiskit_bot.yaml index 78525491be2..cd12b006c99 100644 --- a/qiskit_bot.yaml +++ b/qiskit_bot.yaml @@ -162,6 +162,8 @@ notifications: "docs/guides/qiskit-code-assistant-local": - "@cbjuan" - "@vabarbosa" + - "@lucaburatti7" + - "@adarsh-tiwari17" - "@abbycross" - "@beckykd" "docs/guides/pulse": From 28c971a08426c89c0937c5c2da550003907480c8 Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:25:22 -0500 Subject: [PATCH 05/42] local mode guide Co-Authored-By: lucaburatti7 <32306492+lucaburatti7@users.noreply.github.com> Co-Authored-By: adarsh-tiwari17 <187846318+adarsh-tiwari17@users.noreply.github.com> --- docs/guides/_toc.json | 4 + docs/guides/qiskit-code-assistant-local.mdx | 205 ++++++++++++++++++++ qiskit_bot.yaml | 7 + scripts/js/commands/checkPatternsIndex.ts | 1 + 4 files changed, 217 insertions(+) create mode 100644 docs/guides/qiskit-code-assistant-local.mdx diff --git a/docs/guides/_toc.json b/docs/guides/_toc.json index f92b36419de..eddab4b280f 100644 --- a/docs/guides/_toc.json +++ b/docs/guides/_toc.json @@ -529,6 +529,10 @@ { "title": "Use Qiskit Code Assistant in VS Code", "url": "/guides/qiskit-code-assistant-vscode" + }, + { + "title": "Use Qiskit Code Assistant in local mode", + "url": "/guides/qiskit-code-assistant-local" } ] } diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx new file mode 100644 index 00000000000..ed2f3eb1a2c --- /dev/null +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -0,0 +1,205 @@ +--- +title: Use Qiskit Code Assistant in local mode +description: Learn how to deploy and use the Qiskit Code Assistant model locally. +--- + +{/* cspell:ignore ollama, gguf */} + +# Use Qiskit Code Assistant in local mode + +Learn how to install, configure, and use the Qiskit Code Assistant model on your local machine. + + + - This is an experimental feature available only to IBM Quantum Premium Plan users. + - Qiskit Code Assistant is in preview release status and is subject to change. + - If you have feedback or want to contact the developer team, use the [Qiskit Slack Workspace channel](https://qiskit.enterprise.slack.com/archives/C07LYA6PL83) or the related public GitHub repositories. + + +## Download the Qiskit Code Assistant model + +The Qiskit Code Assistant model is available in the GGUF file format and can be downloaded from Hugging Face in one of two ways. + +
+ +Download from the Hugging Face website + +Follow these steps to download the Qiskit Code Assistant GGUF model from the Hugging Face website: + +1. Navigate to the IBM granite model page: https://huggingface.co/ibm-granite +1. Select the Granite Qiskit code assistant GGUF model +1. Go to the Files and Versions tab and download the GGUF model + +
+ + +
+ +Download using the Hugging Face CLI + +To download the Qiskit Code Assistant GGUF model using the Hugging Face CLI follow these steps: + +1. Install the Hugging Face CLI: https://huggingface.co/docs/huggingface_hub/main/en/guides/cli +1. Login to your Hugging Face account + + ``` + huggingface-cli login + ``` + +1. Download the Qiskit Code Assistant GGUF model + + ``` + huggingface-cli download --local-dir + ``` + +
+ + +## Get the Qiskit Code Assistant model up and running + +There are multiple ways to deploy and interact with the downloaded Qiskit Code Assistant GGUF model. These instructions outline how to get up and running on your local machine using [Ollama](https://ollama.com). + +- [Using the Ollama application](#using-the-ollama-application) +- [Using the `llama-cpp-python` package](#using-the-llama-cpp-python-package) + +### Using the Ollama application + +The Ollama application provides a simple solution to run the GGUF models locally. It is easy to use, with a CLI that makes the whole set up process, model management, and interaction fairly straightforward. It’s ideal for quick experimentation and/or for users that want fewer technical details to handle. + +#### Install Ollama + +1. Download the Ollama application: https://ollama.com/download +1. Install the downloaded file +1. Launch the installed Ollama application + + Once running, you will see the Ollama icon in the desktop menu bar, indicating that the application is running successfully. You can also verify the service is running by going to http://localhost:11434/. + +1. Try Ollama in your terminal and start running models e.g., + + ``` + ollama run llama + ``` + +#### Set up Ollama with the Qiskit Code Assistant GGUF model + +1. Create a `Modelfile` entering the below content and be sure to update `` to the actual path of your downloaded model + + ``` + FROM + TEMPLATE """{{ if .System }}System: + {{ .System }} + + {{ end }} + {{ if .Prompt }}Question: + {{ .Prompt }} + + {{ end }} + Answer: + ```python + {{ .Response }} + """ + + SYSTEM """""" + + PARAMETER stop "" + PARAMETER stop "" + PARAMETER stop "" + PARAMETER stop "" + PARAMETER stop "<|endoftext|>" + + PARAMETER mirostat 0 + PARAMETER mirostat_eta 0.1 + PARAMETER mirostat_tau 5.0 + PARAMETER num_ctx 10000 + PARAMETER repeat_penalty 1.0 + PARAMETER temperature 0.8 + PARAMETER seed 0 + PARAMETER tfs_z 1.0 + PARAMETER num_predict 1024 + PARAMETER top_k 50 + PARAMETER top_p 0.95 + PARAMETER min_p 0.05 + ``` + +1. Run the following command to create a custom model instance based on the `Modelfile` + + ``` + ollama create qiskit-granite-local -f ./path-to-model-file + ``` + + This process may take some time, Ollama will read the model file, initialize the model instance and configure it according to the specifications provided. + + +#### Run the Qiskit Code Assistant model in Ollama + +After the Qiskit Code Assistant GGUF model has been set up in Ollama, run the following command to launch the model and interact with it in the terminal (in chat mode) + +``` +ollama run qiskit-granite-local +``` + +Some useful commands: + +- `ollama list` - List models on your computer +- `ollama rm qiskit-granite-local` - Remove/delete the model +- `ollama show qiskit-granite-local` - Show model information +- `ollama stop qiskit-granite-local` - Stop a model which is currently running +- `ollama ps` - List which models are currently loaded + +### Using the `llama-cpp-python` package + +An alternative to the Ollama application is the `llama-cpp-python` package. It is a Python binding for `llama.cpp`. It gives you more control and flexibility to run the GGUF model locally. It’s ideal for users who wish to integrate the local model in their workflows and Python applications. + +1. Install `llama-cpp-python`: https://pypi.org/project/llama-cpp-python/ +1. Interact with the model from within your application using `llama_cpp` e.g., + +```python +from llama_cpp import Llama + +model_path = + +model = Llama( + model_path, + seed=17, + n_ctx=10000, + n_gpu_layers=37, # to offload in gpu, but put 0 if all in cpu + ) + +input = 'Generate a quantum circuit with 2 qubits' +raw_pred = model(input)["choices"][0]["text"] +``` + +You can also add generate parameters to the model to customize the inference: + +```python +generation_kwargs = { + "max_tokens": 512, + "echo": False, # Echo the prompt in the output + "top_k": 1 + } + +raw_pred = model(input, **generation_kwargs)["choices"][0]["text"] +``` + +### Use the Qiskit Code Assistant extensions + +The VS Code extension and JupyterLab extension for the Qiskit Code Assistant can be used to prompt the locally deployed Qiskit Code Assistant GGUF model. Once you have the Ollama application [up and running with the model](#using-the-ollama-application) you can configure the extensions to connect to the local service. + + +#### Connect with the Qiskit Code Assistant VS Code extension + +Using the Qiskit Code Assistant VS Code extension allows you to interact with the model and perform code completion while writing your code. This can work well for users looking for assistance writing Qiskit code for their Python applications. + +1. Install the [Qiskit Code Assistant VS Code extension](/guides/qiskit-code-assistant-vscode) +1. In VS Code, go to the **User Settings** and set the **Qiskit Code Assistant: Url** to the URL of your local Ollama deployment (i.e., http://localhost:11434) +1. Reload VS Code, by going to **View > Command Pallette...** and selecting **Developer: Reload Window** + +The `qiskit-granite-local` configured in Ollama should appear in the status bar and ready to use. + +#### Connect with Qiskit Code Assistant JupyterLab extension + +Using the Qiskit Code Assistant JupyterLab extension allows you to interact with the model and perform code completion directly in your Jupyter Notebook. Users who predominantly work with Jupyter Notebooks can take advantage of this extension to further enhance their experience writing Qiskit code. + +1. Install the [Qiskit Code Assistant JupyterLab extension](/guides/qiskit-code-assistant-jupyterlab) +1. In JupyterLab, go to the **Settings Editor** and set the **Qiskit Code Assistant Service API** to the URL of your local Ollama deployment (i.e., http://localhost:11434) + +The `qiskit-granite-local` configured in Ollama should appear in the status bar and ready to use. diff --git a/qiskit_bot.yaml b/qiskit_bot.yaml index 812742c40cb..cd12b006c99 100644 --- a/qiskit_bot.yaml +++ b/qiskit_bot.yaml @@ -159,6 +159,13 @@ notifications: - "cbjuan" - "@abbycross" - "@beckykd" + "docs/guides/qiskit-code-assistant-local": + - "@cbjuan" + - "@vabarbosa" + - "@lucaburatti7" + - "@adarsh-tiwari17" + - "@abbycross" + - "@beckykd" "docs/guides/pulse": - "`@nkanazawa1989`" - "@abbycross" diff --git a/scripts/js/commands/checkPatternsIndex.ts b/scripts/js/commands/checkPatternsIndex.ts index 8fac25215f2..9800ddfd78d 100644 --- a/scripts/js/commands/checkPatternsIndex.ts +++ b/scripts/js/commands/checkPatternsIndex.ts @@ -20,6 +20,7 @@ const ALLOWLIST_MISSING_FROM_INDEX: Set = new Set([ "/guides/qiskit-code-assistant", "/guides/qiskit-code-assistant-jupyterlab", "/guides/qiskit-code-assistant-vscode", + "/guides/qiskit-code-assistant-local", "/guides/addons", ]); From e310dfcd1b926187b7e6ecdd06620d64d3b75bdd Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:26:35 -0500 Subject: [PATCH 06/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index ed2f3eb1a2c..9b602761fef 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -10,7 +10,6 @@ description: Learn how to deploy and use the Qiskit Code Assistant model locally Learn how to install, configure, and use the Qiskit Code Assistant model on your local machine. - - This is an experimental feature available only to IBM Quantum Premium Plan users. - Qiskit Code Assistant is in preview release status and is subject to change. - If you have feedback or want to contact the developer team, use the [Qiskit Slack Workspace channel](https://qiskit.enterprise.slack.com/archives/C07LYA6PL83) or the related public GitHub repositories. From 55dead549a6907fbb2b663b6d7a025a8f8c57ca3 Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:27:20 -0500 Subject: [PATCH 07/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 9b602761fef..f067ce0fe91 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -16,7 +16,7 @@ Learn how to install, configure, and use the Qiskit Code Assistant model on your ## Download the Qiskit Code Assistant model -The Qiskit Code Assistant model is available in the GGUF file format and can be downloaded from Hugging Face in one of two ways. +The Qiskit Code Assistant model is available in GGUF file format and can be downloaded from the Hugging Face Hub in one of two ways.
From 209f4a54e5ea41590bcf5b0dbe57c78704b9e129 Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:27:45 -0500 Subject: [PATCH 08/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index f067ce0fe91..a2bb1fc05f2 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -24,7 +24,7 @@ The Qiskit Code Assistant model is available in From 8a84f8de6244ae1837b200f2155f936f9d23d882 Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:28:13 -0500 Subject: [PATCH 10/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 5804012cf83..49d1ea3cc96 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -35,7 +35,7 @@ Follow these steps to download the Qiskit Code Assistant GGUF model from the Hug Download using the Hugging Face CLI -To download the Qiskit Code Assistant GGUF model using the Hugging Face CLI follow these steps: +To download the `granite-8b-qiskit` GGUF model using the Hugging Face CLI follow these steps: 1. Install the Hugging Face CLI: https://huggingface.co/docs/huggingface_hub/main/en/guides/cli 1. Login to your Hugging Face account From 8ac731ae5fd3ab6e9dd0251ec979259d3a92578b Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:28:25 -0500 Subject: [PATCH 11/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 49d1ea3cc96..92016af21ae 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -44,7 +44,7 @@ To download the `granite-8b-qiskit` GGUF model using the Hugging Face CLI follow huggingface-cli login ``` -1. Download the Qiskit Code Assistant GGUF model +1. Download the `granite-8b-qiskit` GGUF model ``` huggingface-cli download --local-dir From 675e6997d677d8365fba47d5271f2bd4cc900ba7 Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:28:36 -0500 Subject: [PATCH 12/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 92016af21ae..f06b07bfcff 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -55,7 +55,7 @@ To download the `granite-8b-qiskit` GGUF model using the Hugging Face CLI follow ## Get the Qiskit Code Assistant model up and running -There are multiple ways to deploy and interact with the downloaded Qiskit Code Assistant GGUF model. These instructions outline how to get up and running on your local machine using [Ollama](https://ollama.com). +There are multiple ways to deploy and interact with the downloaded `granite-8b-qiskit` GGUF model. These instructions outline how to get up and running on your local machine using [Ollama](https://ollama.com). - [Using the Ollama application](#using-the-ollama-application) - [Using the `llama-cpp-python` package](#using-the-llama-cpp-python-package) From b047aa89c8821e1dcbc6364928ad905953b58588 Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:29:17 -0500 Subject: [PATCH 13/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index f06b07bfcff..28ff573e539 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -75,7 +75,7 @@ The Ollama application provides a simple solution to run the GGUF models locally 1. Try Ollama in your terminal and start running models e.g., ``` - ollama run llama + ollama run granite3-dense:8b ``` #### Set up Ollama with the Qiskit Code Assistant GGUF model From 54aa0fe92a31df051bc340ede4c0c41ab7d595fb Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:29:28 -0500 Subject: [PATCH 14/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 28ff573e539..5bd7d15ad97 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -122,7 +122,7 @@ The Ollama application provides a simple solution to run the GGUF models locally 1. Run the following command to create a custom model instance based on the `Modelfile` ``` - ollama create qiskit-granite-local -f ./path-to-model-file + ollama create granite-8b-qiskit -f ./path-to-model-file ``` This process may take some time, Ollama will read the model file, initialize the model instance and configure it according to the specifications provided. From 060e3db3f8c41d46a8a581627d5bd9289211757b Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:29:41 -0500 Subject: [PATCH 15/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 5bd7d15ad97..b928ab9a35e 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -130,7 +130,7 @@ The Ollama application provides a simple solution to run the GGUF models locally #### Run the Qiskit Code Assistant model in Ollama -After the Qiskit Code Assistant GGUF model has been set up in Ollama, run the following command to launch the model and interact with it in the terminal (in chat mode) +After the `granite-8b-qiskit` GGUF model has been set up in Ollama, run the following command to launch the model and interact with it in the terminal (in chat mode) ``` ollama run qiskit-granite-local From c4c15d021311a0d21c982b14de5d9f407d1c5e13 Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:29:51 -0500 Subject: [PATCH 16/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index b928ab9a35e..8b65100f4e7 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -140,7 +140,7 @@ Some useful commands: - `ollama list` - List models on your computer - `ollama rm qiskit-granite-local` - Remove/delete the model -- `ollama show qiskit-granite-local` - Show model information +- `ollama show granite-8b-qiskit` - Show model information - `ollama stop qiskit-granite-local` - Stop a model which is currently running - `ollama ps` - List which models are currently loaded From 0bc0e455c333d00c02e5898c8a8837cba42eee7f Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:30:01 -0500 Subject: [PATCH 17/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 8b65100f4e7..f63378d39b6 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -133,7 +133,7 @@ The Ollama application provides a simple solution to run the GGUF models locally After the `granite-8b-qiskit` GGUF model has been set up in Ollama, run the following command to launch the model and interact with it in the terminal (in chat mode) ``` -ollama run qiskit-granite-local +ollama run granite-8b-qiskit ``` Some useful commands: From 3119085c6f190a9998362d4a5bf4445d23dd91fb Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:30:10 -0500 Subject: [PATCH 18/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index f63378d39b6..4bce06f4bba 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -139,7 +139,7 @@ ollama run granite-8b-qiskit Some useful commands: - `ollama list` - List models on your computer -- `ollama rm qiskit-granite-local` - Remove/delete the model +- `ollama rm granite-8b-qiskit` - Remove/delete the model - `ollama show granite-8b-qiskit` - Show model information - `ollama stop qiskit-granite-local` - Stop a model which is currently running - `ollama ps` - List which models are currently loaded From 315e6054d84932ecfd9d26315a78547e4959deff Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:30:21 -0500 Subject: [PATCH 19/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 4bce06f4bba..1459e6a5064 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -141,7 +141,7 @@ Some useful commands: - `ollama list` - List models on your computer - `ollama rm granite-8b-qiskit` - Remove/delete the model - `ollama show granite-8b-qiskit` - Show model information -- `ollama stop qiskit-granite-local` - Stop a model which is currently running +- `ollama stop granite-8b-qiskit` - Stop a model which is currently running - `ollama ps` - List which models are currently loaded ### Using the `llama-cpp-python` package From 4b79bac6e3212460e24baebcb07812c3e1f4458a Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:30:33 -0500 Subject: [PATCH 20/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 1459e6a5064..27889df2a59 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -181,7 +181,7 @@ raw_pred = model(input, **generation_kwargs)["choices"][0]["text"] ### Use the Qiskit Code Assistant extensions -The VS Code extension and JupyterLab extension for the Qiskit Code Assistant can be used to prompt the locally deployed Qiskit Code Assistant GGUF model. Once you have the Ollama application [up and running with the model](#using-the-ollama-application) you can configure the extensions to connect to the local service. +The VS Code extension and JupyterLab extension for the Qiskit Code Assistant can be used to prompt the locally deployed `granite-8b-qiskit` GGUF model. Once you have the Ollama application [up and running with the model](#using-the-ollama-application) you can configure the extensions to connect to the local service. #### Connect with the Qiskit Code Assistant VS Code extension From c272d7f56cf44d5f905a3df9510de5140ac0597c Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:30:46 -0500 Subject: [PATCH 21/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 27889df2a59..bae6109a74b 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -192,7 +192,7 @@ Using the Qiskit Code Assistant VS Code extension allows you to interact with th 1. In VS Code, go to the **User Settings** and set the **Qiskit Code Assistant: Url** to the URL of your local Ollama deployment (i.e., http://localhost:11434) 1. Reload VS Code, by going to **View > Command Pallette...** and selecting **Developer: Reload Window** -The `qiskit-granite-local` configured in Ollama should appear in the status bar and ready to use. +The `granite-8b-qiskit` configured in Ollama should appear in the status bar and ready to use. #### Connect with Qiskit Code Assistant JupyterLab extension From 71e76cc4c78a29321261a2eb01424127ba9de3da Mon Sep 17 00:00:00 2001 From: //va Date: Sun, 10 Nov 2024 00:30:56 -0500 Subject: [PATCH 22/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: Juan Cruz-Benito --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index bae6109a74b..5ecfdac75cf 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -201,4 +201,4 @@ Using the Qiskit Code Assistant JupyterLab extension allows you to interact with 1. Install the [Qiskit Code Assistant JupyterLab extension](/guides/qiskit-code-assistant-jupyterlab) 1. In JupyterLab, go to the **Settings Editor** and set the **Qiskit Code Assistant Service API** to the URL of your local Ollama deployment (i.e., http://localhost:11434) -The `qiskit-granite-local` configured in Ollama should appear in the status bar and ready to use. +The `granite-8b-qiskit` configured in Ollama should appear in the status bar and ready to use. From b32c04d82c64df9d36999be20d9f8480049e0dd1 Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:20:09 -0500 Subject: [PATCH 23/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 5ecfdac75cf..e6053c0fa62 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -24,7 +24,7 @@ The Qiskit Code Assistant model is available in Once running, you will see the Ollama icon in the desktop menu bar, indicating that the application is running successfully. You can also verify the service is running by going to http://localhost:11434/. + The application is running successfully when the Ollama icon appears in the desktop menu bar. You can also verify the service is running by going to http://localhost:11434/. 1. Try Ollama in your terminal and start running models e.g., From ceab2e1099e6ebec783f206b0ec7d360259ea6b5 Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:26:32 -0500 Subject: [PATCH 30/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 72450cc15cf..dc048f9cb2f 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -72,7 +72,7 @@ The Ollama application provides a simple solution to run the GGUF models locally The application is running successfully when the Ollama icon appears in the desktop menu bar. You can also verify the service is running by going to http://localhost:11434/. -1. Try Ollama in your terminal and start running models e.g., +1. Try Ollama in your terminal and start running models. For example: ``` ollama run granite3-dense:8b From 7605d1c0b9089a8cb15f0cc5e1b9eb0d8173f332 Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:26:59 -0500 Subject: [PATCH 31/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index dc048f9cb2f..ed79a2c900d 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -80,7 +80,7 @@ The Ollama application provides a simple solution to run the GGUF models locally #### Set up Ollama with the Qiskit Code Assistant GGUF model -1. Create a `Modelfile` entering the below content and be sure to update `` to the actual path of your downloaded model +1. Create a `Modelfile` entering the following content and be sure to update `` to the actual path of your downloaded model. ``` FROM From e6828b533bfe07565ee8bc0d844285e6e40edc20 Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:27:17 -0500 Subject: [PATCH 32/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index ed79a2c900d..6df1e6c0704 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -119,7 +119,7 @@ The Ollama application provides a simple solution to run the GGUF models locally PARAMETER min_p 0.05 ``` -1. Run the following command to create a custom model instance based on the `Modelfile` +1. Run the following command to create a custom model instance based on the `Modelfile`. ``` ollama create granite-8b-qiskit -f ./path-to-model-file From a05b2840360238b0969969358b021eb1805a1047 Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:27:32 -0500 Subject: [PATCH 33/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 6df1e6c0704..ebfbbd8ec99 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -125,7 +125,7 @@ The Ollama application provides a simple solution to run the GGUF models locally ollama create granite-8b-qiskit -f ./path-to-model-file ``` - This process may take some time, Ollama will read the model file, initialize the model instance and configure it according to the specifications provided. + This process may take some time for Ollama to read the model file, initialize the model instance, and configure it according to the specifications provided. #### Run the Qiskit Code Assistant model in Ollama From 577c746a1e5c850d7b5f5065dbc9301754bf8eff Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:27:52 -0500 Subject: [PATCH 34/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index ebfbbd8ec99..cb687ac14ab 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -141,7 +141,7 @@ Some useful commands: - `ollama list` - List models on your computer - `ollama rm granite-8b-qiskit` - Remove/delete the model - `ollama show granite-8b-qiskit` - Show model information -- `ollama stop granite-8b-qiskit` - Stop a model which is currently running +- `ollama stop granite-8b-qiskit` - Stop a model that is currently running - `ollama ps` - List which models are currently loaded ### Using the `llama-cpp-python` package From d73cfb1fc8d5059639e2849940847fe2f3bcd815 Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:32:56 -0500 Subject: [PATCH 35/42] Apply suggestions from code review Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 28 ++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index cb687ac14ab..64948f55a6e 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -130,7 +130,7 @@ The Ollama application provides a simple solution to run the GGUF models locally #### Run the Qiskit Code Assistant model in Ollama -After the `granite-8b-qiskit` GGUF model has been set up in Ollama, run the following command to launch the model and interact with it in the terminal (in chat mode) +After the `granite-8b-qiskit` GGUF model has been set up in Ollama, run the following command to launch the model and interact with it in the terminal (in chat mode). ``` ollama run granite-8b-qiskit @@ -148,8 +148,8 @@ Some useful commands: An alternative to the Ollama application is the `llama-cpp-python` package. It is a Python binding for `llama.cpp`. It gives you more control and flexibility to run the GGUF model locally. It’s ideal for users who wish to integrate the local model in their workflows and Python applications. -1. Install `llama-cpp-python`: https://pypi.org/project/llama-cpp-python/ -1. Interact with the model from within your application using `llama_cpp` e.g., +1. Install [`llama-cpp-python`](https://pypi.org/project/llama-cpp-python/) +1. Interact with the model from within your application using `llama_cpp`. For example: ```python from llama_cpp import Llama @@ -181,24 +181,24 @@ raw_pred = model(input, **generation_kwargs)["choices"][0]["text"] ### Use the Qiskit Code Assistant extensions -The VS Code extension and JupyterLab extension for the Qiskit Code Assistant can be used to prompt the locally deployed `granite-8b-qiskit` GGUF model. Once you have the Ollama application [up and running with the model](#using-the-ollama-application) you can configure the extensions to connect to the local service. +Use the VS Code extension and JupyterLab extension for the Qiskit Code Assistant to prompt the locally deployed `granite-8b-qiskit` GGUF model. Once you have the Ollama application [set up with the model](#using-the-ollama-application), you can configure the extensions to connect to the local service. #### Connect with the Qiskit Code Assistant VS Code extension -Using the Qiskit Code Assistant VS Code extension allows you to interact with the model and perform code completion while writing your code. This can work well for users looking for assistance writing Qiskit code for their Python applications. +With the Qiskit Code Assistant VS Code extension, you can interact with the model and perform code completion while writing your code. This can work well for users looking for assistance writing Qiskit code for their Python applications. -1. Install the [Qiskit Code Assistant VS Code extension](/guides/qiskit-code-assistant-vscode) -1. In VS Code, go to the **User Settings** and set the **Qiskit Code Assistant: Url** to the URL of your local Ollama deployment (i.e., http://localhost:11434) -1. Reload VS Code, by going to **View > Command Pallette...** and selecting **Developer: Reload Window** +1. Install the [Qiskit Code Assistant VS Code extension](/guides/qiskit-code-assistant-vscode). +1. In VS Code, go to the **User Settings** and set the **Qiskit Code Assistant: Url** to the URL of your local Ollama deployment (for example, http://localhost:11434). +1. Reload VS Code by going to **View > Command Palette...** and selecting **Developer: Reload Window**. -The `granite-8b-qiskit` configured in Ollama should appear in the status bar and ready to use. +The `granite-8b-qiskit` configured in Ollama should appear in the status bar and is then ready to use. -#### Connect with Qiskit Code Assistant JupyterLab extension +#### Connect with the Qiskit Code Assistant JupyterLab extension -Using the Qiskit Code Assistant JupyterLab extension allows you to interact with the model and perform code completion directly in your Jupyter Notebook. Users who predominantly work with Jupyter Notebooks can take advantage of this extension to further enhance their experience writing Qiskit code. +With the Qiskit Code Assistant JupyterLab extension, you can interact with the model and perform code completion directly in your Jupyter Notebook. Users who predominantly work with Jupyter Notebooks can take advantage of this extension to further enhance their experience writing Qiskit code. -1. Install the [Qiskit Code Assistant JupyterLab extension](/guides/qiskit-code-assistant-jupyterlab) -1. In JupyterLab, go to the **Settings Editor** and set the **Qiskit Code Assistant Service API** to the URL of your local Ollama deployment (i.e., http://localhost:11434) +1. Install the [Qiskit Code Assistant JupyterLab extension](/guides/qiskit-code-assistant-jupyterlab). +1. In JupyterLab, go to the **Settings Editor** and set the **Qiskit Code Assistant Service API** to the URL of your local Ollama deployment (for example, http://localhost:11434). -The `granite-8b-qiskit` configured in Ollama should appear in the status bar and ready to use. +The `granite-8b-qiskit` configured in Ollama should appear in the status bar and is then ready to use. From 91661aed7e2cf9851db4e32fa54a7356301b343d Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:45:59 -0500 Subject: [PATCH 36/42] Update docs/guides/qiskit-code-assistant-local.mdx --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 64948f55a6e..a937842fa3c 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -167,7 +167,7 @@ input = 'Generate a quantum circuit with 2 qubits' raw_pred = model(input)["choices"][0]["text"] ``` -You can also add generate parameters to the model to customize the inference: +You can also add text generation parameters to the model to customize the inference: ```python generation_kwargs = { From acce03413d27aad607c92d25a576dab596df40cc Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:50:17 -0500 Subject: [PATCH 37/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index a937842fa3c..f832337f56e 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -146,7 +146,7 @@ Some useful commands: ### Using the `llama-cpp-python` package -An alternative to the Ollama application is the `llama-cpp-python` package. It is a Python binding for `llama.cpp`. It gives you more control and flexibility to run the GGUF model locally. It’s ideal for users who wish to integrate the local model in their workflows and Python applications. +An alternative to the Ollama application is the `llama-cpp-python` package, which is a Python binding for `llama.cpp`. It gives you more control and flexibility to run the GGUF model locall, and is ideal for users who wish to integrate the local model in their workflows and Python applications. 1. Install [`llama-cpp-python`](https://pypi.org/project/llama-cpp-python/) 1. Interact with the model from within your application using `llama_cpp`. For example: From ae787f434c06e908cdc40be1bf08ec1539e71bc1 Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 17:51:03 -0500 Subject: [PATCH 38/42] Update docs/guides/qiskit-code-assistant-local.mdx --- docs/guides/qiskit-code-assistant-local.mdx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index f832337f56e..0866a2bb209 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -146,7 +146,7 @@ Some useful commands: ### Using the `llama-cpp-python` package -An alternative to the Ollama application is the `llama-cpp-python` package, which is a Python binding for `llama.cpp`. It gives you more control and flexibility to run the GGUF model locall, and is ideal for users who wish to integrate the local model in their workflows and Python applications. +An alternative to the Ollama application is the `llama-cpp-python` package, which is a Python binding for `llama.cpp`. It gives you more control and flexibility to run the GGUF model locally, and is ideal for users who wish to integrate the local model in their workflows and Python applications. 1. Install [`llama-cpp-python`](https://pypi.org/project/llama-cpp-python/) 1. Interact with the model from within your application using `llama_cpp`. For example: From 344cfce819bc535a37a2650baacf1c932646f7b4 Mon Sep 17 00:00:00 2001 From: //va Date: Mon, 11 Nov 2024 20:18:33 -0500 Subject: [PATCH 39/42] Update docs/guides/qiskit-code-assistant-local.mdx Co-authored-by: abbycross --- docs/guides/qiskit-code-assistant-local.mdx | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 0866a2bb209..bf9e0caa605 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -55,10 +55,7 @@ To download the `granite-8b-qiskit` GGUF model using the Hugging Face CLI, follo ## Use the Qiskit Code Assistant model -There are multiple ways to deploy and interact with the downloaded `granite-8b-qiskit` GGUF model. These instructions outline how to get up and running on your local machine using [Ollama](https://ollama.com). - -- [Using the Ollama application](#using-the-ollama-application) -- [Using the `llama-cpp-python` package](#using-the-llama-cpp-python-package) +There are multiple ways to deploy and interact with the downloaded `granite-8b-qiskit` GGUF model. This guide demonstrates using [Ollama](https://ollama.com) as follows: either with the [Ollama application](#using-the-ollama-application), or with the [`llama-cpp-python` package](#using-the-llama-cpp-python-package). ### Using the Ollama application From 96a03ac1c945e8e3275f1743ee76117f8dbeb04d Mon Sep 17 00:00:00 2001 From: //va Date: Tue, 12 Nov 2024 20:29:31 -0500 Subject: [PATCH 40/42] additional review feedback Co-Authored-By: lucaburatti7 <32306492+lucaburatti7@users.noreply.github.com> Co-Authored-By: adarsh-tiwari17 <187846318+adarsh-tiwari17@users.noreply.github.com> Co-Authored-By: Juan Cruz-Benito <2938045+cbjuan@users.noreply.github.com> --- docs/guides/qiskit-code-assistant-local.mdx | 59 +++++++++++---------- start | 2 +- 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index bf9e0caa605..90d18113fa2 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -24,7 +24,7 @@ The Qiskit Code Assistant model is available in " - PARAMETER stop "" - PARAMETER stop "" - PARAMETER stop "" - PARAMETER stop "<|endoftext|>" - - PARAMETER mirostat 0 - PARAMETER mirostat_eta 0.1 - PARAMETER mirostat_tau 5.0 - PARAMETER num_ctx 10000 - PARAMETER repeat_penalty 1.0 - PARAMETER temperature 0.8 - PARAMETER seed 0 - PARAMETER tfs_z 1.0 - PARAMETER num_predict 1024 - PARAMETER top_k 50 - PARAMETER top_p 0.95 - PARAMETER min_p 0.05 + PARAMETER stop "Question:" + PARAMETER stop "Answer:" + PARAMETER stop "System:" + PARAMETER stop "```" + + PARAMETER temperature 0 + PARAMETER top_k 1 ``` 1. Run the following command to create a custom model instance based on the `Modelfile`. diff --git a/start b/start index ca2bbe99db2..4d2bdcd1832 100755 --- a/start +++ b/start @@ -36,7 +36,7 @@ def main() -> None: # Keep this aligned with the Dockerfile at the root of the repository. cmd = [ - "docker", + "podman", "run", "-v", f"{PWD}/docs:/home/node/app/docs", From 3cfa6692a0b83e2e472850fc7be7c8a1c313ed75 Mon Sep 17 00:00:00 2001 From: //va Date: Tue, 12 Nov 2024 21:02:43 -0500 Subject: [PATCH 41/42] Update qiskit-code-assistant-local.mdx Co-Authored-By: lucaburatti7 <32306492+lucaburatti7@users.noreply.github.com> Co-Authored-By: adarsh-tiwari17 <187846318+adarsh-tiwari17@users.noreply.github.com> --- docs/guides/qiskit-code-assistant-local.mdx | 1 - 1 file changed, 1 deletion(-) diff --git a/docs/guides/qiskit-code-assistant-local.mdx b/docs/guides/qiskit-code-assistant-local.mdx index 90d18113fa2..877cdf7d358 100644 --- a/docs/guides/qiskit-code-assistant-local.mdx +++ b/docs/guides/qiskit-code-assistant-local.mdx @@ -25,7 +25,6 @@ The Qiskit Code Assistant model is available in