diff --git a/LICENSE b/LICENSE
index 75623755..059b225d 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2023 Opentensor
+Copyright © 2024 cyber~Congress
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/README.md b/README.md
index 42e9be12..d46ec15e 100644
--- a/README.md
+++ b/README.md
@@ -1,30 +1,36 @@
-
+# Welcome to Game of Tensors
-
+#### You can participate in the Game of Tensors by running a miner or a validator on the Cybertensor network launched on Space Pussy blockchain.
-
+- [Machine Learning Verse on SpacePussy](https://spacepussy.ai/cyberver/verses/pussy/ml/faculties)
+- [Prompting subnet on SpacePussy](https://spacepussy.ai/cyberver/verses/pussy/ml/faculties/2)
-# **Bittensor SN1**
-[![Discord Chat](https://img.shields.io/discord/308323056592486420.svg)](https://discord.gg/bittensor)
-[![License: MIT](https://img.shields.io/badge/License-MIT-yellow.svg)](https://opensource.org/licenses/MIT)
+## Launch validator:
+```bash
+python3 neurons/validator.py --netuid 2 --wallet.name=wallet_name --wallet.hotkey=default --cwtensor.network=space-pussy --axon.port=9000 --logging.logging_dir=validator_logs --logging.record_log --logging.debug --neuron.device=cuda --torch_dtype=torch.float16 --neuron.llm_max_allowed_memory_in_gb=14 --neuron.model_id=casperhansen/llama-3-8b-instruct-awq --wandb.off
+```
+
+## Launch miner:
+```bash
+python3 neurons/miners/huggingface/miner.py --netuid 2 --wallet.name=wallet_name --wallet.hotkey=default --cwtensor.network=space-pussy --axon.port=7000 --logging.logging_dir=miner_logs --logging.record_log --logging.debug --neuron.device=cuda --torch_dtype=torch.float16 --neuron.llm_max_allowed_memory_in_gb=8 --neuron.model_id=casperhansen/llama-3-8b-instruct-awq --wandb.off
+```
----
-### The Incentivized Internet
+## Notes
+```bash
+# activate venv
+. venv/bin/activate
-[Discord](https://discord.gg/bittensor) • [Network](https://taostats.io/) • [Research](https://bittensor.com/whitepaper)
+# install requirements
+pip3 install -e .
-
+# if you have errors
+pip3 uninstall uvloop -y
+```
---
-This repository is the **official codebase for Bittensor Subnet 1 (SN1) v1.0.0+, which was released on 22nd January 2024**. To learn more about the Bittensor project and the underlying mechanics, [read here.](https://docs.bittensor.com/).
+This repository is the official codebase for Cybertensor Subnet 1 (SN1) which is fork of Bittensor Subneet 1 (SN1). To learn more about the Cybertensor project and the underlying mechanics, [read here](https://docs.spacepussy.ai/).
# Introduction
@@ -58,10 +64,10 @@ You can use the following command to run a miner or a validator.
```bash
python
--netuid 1
- --subtensor.network
+ --cwtensor.network
--neuron.device cuda
- --wallet.name # Must be created using the bittensor-cli
- --wallet.hotkey # Must be created using the bittensor-cli
+ --wallet.name # Must be created using the cybertensor-cli
+ --wallet.hotkey # Must be created using the cybertensor-cli
--logging.debug # Run in debug mode, alternatively --logging.trace for trace mode
--axon.port # VERY IMPORTANT: set the port to be one of the open TCP ports on your machine
```
@@ -77,27 +83,21 @@ For ease of use, you can run the scripts as well with PM2. Installation of PM2 i
sudo apt update && sudo apt install jq && sudo apt install npm && sudo npm install pm2 -g && pm2 update
```
-Example of running a SOLAR miner:
-```bash
-pm2 start neurons/miners/huggingface/miner.py --interpreter python3 --name solar_miner -- --netuid 1 --subtensor.network finney --wallet.name my_wallet --wallet.hotkey m1 --neuron.model_id casperhansen/llama-3-70b-instruct-awq --axon.port 21988 --logging.debug
-```
-
-# Testnet
-We highly recommend that you run your miners on testnet before deploying on main. This is give you an opportunity to debug your systems, and ensure that you will not lose valuable immunity time. The SN1 testnet is **netuid 61**.
-
-In order to run on testnet, you will need to go through the same hotkey registration proceure as on main, but using **testtao**. You will need to ask for some in the community discord if you do not have any.
-
-To run:
+Example of running a miner:
```bash
-pm2 start neurons/miners/huggingface/miner.py --interpreter python3 --name solar_miner -- --netuid 61 --subtensor.network test --wallet.name my_test_wallet --wallet.hotkey m1 --neuron.model_id casperhansen/llama-3-70b-instruct-awq --axon.port 21988 --logging.debug
+python neurons/miners/huggingface/miner.py --netuid 1 --wallet.name=miner --wallet.hotkey=default --cwtensor.network=space-pussy --axon.port=10000 --logging.logging_dir=validator_logs --logging.record_log --logging.trace --neuron.device=cuda --neuron.model_id=casperhansen/llama-3-8b-instruct-awq
+```
+or
+```bash
+pm2 start neurons/miners/huggingface/miner.py --interpreter=python3 --name=miner -- --netuid=1 --wallet.name=miner --wallet.hotkey=default --cwtensor.network==space-pussy --axon.port=10000 --logging.logging_dir=validator_logs --logging.record_log --neuron.model_id=casperhansen/llama-3-8b-instruct-awq --logging.debug
```
# Limitations
> Important: vLLM currently faces a [notable limitation](https://github.com/vllm-project/vllm/issues/3012) in designating a specific GPU for model execution via code. Consequently, to employ a particular CUDA device for your model's operations, it's necessary to manually adjust your environment variable `CUDA_VISIBLE_DEVICES`. For instance, setting `export CUDA_VISIBLE_DEVICES=1,2` will explicitly define the CUDA devices available for use.
# Resources
-The archiecture and methodology of SN1 is complex, and as such we have created a comprehensive resource to outline our design. Furthermore, we have strict requirements for how miners should interact with the network. Below are the currently available resources for additional information:
+The architecture and methodology of SN1 is complex, and as such we have created a comprehensive resource to outline our design. Furthermore, we have strict requirements for how miners should interact with the network. Below are the currently available resources for additional information:
1. [SN1 architecture details](docs/SN1_validation.md)
2. [StreamMiner requirements](docs/stream_miner_template.md)
diff --git a/contrib/CODE_REVIEW_DOCS.md b/contrib/CODE_REVIEW_DOCS.md
index 9909606a..72a809d5 100644
--- a/contrib/CODE_REVIEW_DOCS.md
+++ b/contrib/CODE_REVIEW_DOCS.md
@@ -33,7 +33,7 @@ higher in terms of discussion and peer review requirements, keeping in mind that
mistakes could be very costly to the wider community. This includes refactoring
of consensus-critical code.
-Where a patch set proposes to change the Bittensor consensus, it must have been
+Where a patch set proposes to change the Cybertensor consensus, it must have been
discussed extensively on the discord server and other channels, be accompanied by a widely
discussed BIP and have a generally widely perceived technical consensus of being
a worthwhile change based on the judgement of the maintainers.
diff --git a/contrib/CONTRIBUTING.md b/contrib/CONTRIBUTING.md
index 77847b2b..10a4da04 100644
--- a/contrib/CONTRIBUTING.md
+++ b/contrib/CONTRIBUTING.md
@@ -1,6 +1,6 @@
-# Contributing to Bittensor Subnet Development
+# Contributing to Cybertensor Subnet Development
-The following is a set of guidelines for contributing to the Bittensor ecosystem. These are **HIGHLY RECOMMENDED** guidelines, but not hard-and-fast rules. Use your best judgment, and feel free to propose changes to this document in a pull request.
+The following is a set of guidelines for contributing to the Cybertensor ecosystem. These are **HIGHLY RECOMMENDED** guidelines, but not hard-and-fast rules. Use your best judgment, and feel free to propose changes to this document in a pull request.
## Table Of Contents
1. [How Can I Contribute?](#how-can-i-contribute)
@@ -19,9 +19,9 @@ The following is a set of guidelines for contributing to the Bittensor ecosystem
We welcome community contributions. Please make an issue or a PR if you encounter any issues or want to make suggested improvements.
## Communication Channels
-You can reach out to the SN1 team on the offical bittensor discord channel [here](https://discord.com/channels/799672011265015819/1161764867166961704).
+You can reach out to the SN1 team on the offical cybertensor discord channel [here]().
-> Please follow the Bittensor Subnet [style guide](./STYLE.md) regardless of your contribution type.
+> Please follow the Cybertensor Subnet [style guide](./STYLE.md) regardless of your contribution type.
Here is a high-level summary:
- Code consistency is crucial; adhere to established programming language conventions.
@@ -40,7 +40,7 @@ Here is a high-level summary:
### Code Contribution General Guidelines
-> Review the Bittensor Subnet [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before contributing.
+> Review the Cybertensor Subnet [style guide](./STYLE.md) and [development workflow](./DEVELOPMENT_WORKFLOW.md) before contributing.
#### Pull Request Philosophy
@@ -99,7 +99,7 @@ After you submit a pull request, it will be reviewed by the maintainers. They ma
> Note: Be sure to merge the latest from "upstream" before making a pull request:
```bash
-git remote add upstream https://github.com/opentensor/bittensor.git # TODO(developer): replace with your repo URL
+git remote add upstream https://github.com/cybercongress/cybertensor.git # TODO(developer): replace with your repo URL
git fetch upstream
git merge upstream/
git push origin
@@ -166,28 +166,28 @@ Please track bugs as GitHub issues.
Explain the problem and include additional details to help maintainers reproduce the problem:
* **Use a clear and descriptive title** for the issue to identify the problem.
-* **Describe the exact steps which reproduce the problem** in as many details as possible. For example, start by explaining how you started the application, e.g. which command exactly you used in the terminal, or how you started Bittensor otherwise. When listing steps, **don't just say what you did, but explain how you did it**. For example, if you ran with a set of custom configs, explain if you used a config file or command line arguments.
+* **Describe the exact steps which reproduce the problem** in as many details as possible. For example, start by explaining how you started the application, e.g. which command exactly you used in the terminal, or how you started Cybertensor otherwise. When listing steps, **don't just say what you did, but explain how you did it**. For example, if you ran with a set of custom configs, explain if you used a config file or command line arguments.
* **Provide specific examples to demonstrate the steps**. Include links to files or GitHub projects, or copy/pasteable snippets, which you use in those examples. If you're providing snippets in the issue, use [Markdown code blocks](https://help.github.com/articles/markdown-basics/#multiple-lines).
* **Describe the behavior you observed after following the steps** and point out what exactly is the problem with that behavior.
* **Explain which behavior you expected to see instead and why.**
* **Include screenshots and animated GIFs** which show you following the described steps and clearly demonstrate the problem. You can use [this tool](https://www.cockos.com/licecap/) to record GIFs on macOS and Windows, and [this tool](https://github.com/colinkeenan/silentcast) or [this tool](https://github.com/GNOME/byzanz) on Linux.
-* **If you're reporting that Bittensor crashed**, include a crash report with a stack trace from the operating system. On macOS, the crash report will be available in `Console.app` under "Diagnostic and usage information" > "User diagnostic reports". Include the crash report in the issue in a [code block](https://help.github.com/articles/markdown-basics/#multiple-lines), a [file attachment](https://help.github.com/articles/file-attachments-on-issues-and-pull-requests/), or put it in a [gist](https://gist.github.com/) and provide link to that gist.
+* **If you're reporting that Cybertensor crashed**, include a crash report with a stack trace from the operating system. On macOS, the crash report will be available in `Console.app` under "Diagnostic and usage information" > "User diagnostic reports". Include the crash report in the issue in a [code block](https://help.github.com/articles/markdown-basics/#multiple-lines), a [file attachment](https://help.github.com/articles/file-attachments-on-issues-and-pull-requests/), or put it in a [gist](https://gist.github.com/) and provide link to that gist.
* **If the problem is related to performance or memory**, include a CPU profile capture with your report, if you're using a GPU then include a GPU profile capture as well. Look into the [PyTorch Profiler](https://pytorch.org/tutorials/recipes/recipes/profiler_recipe.html) to look at memory usage of your model.
* **If the problem wasn't triggered by a specific action**, describe what you were doing before the problem happened and share more information using the guidelines below.
Provide more context by answering these questions:
* **Did the problem start happening recently** (e.g. after updating to a new version) or was this always a problem?
-* If the problem started happening recently, **can you reproduce the problem in an older version of Bittensor?**
+* If the problem started happening recently, **can you reproduce the problem in an older version of Cybertensor?**
* **Can you reliably reproduce the issue?** If not, provide details about how often the problem happens and under which conditions it normally happens.
Include details about your configuration and environment:
-* **Which version of Bittensor Subnet are you using?**
+* **Which version of Cybertensor Subnet are you using?**
* **What commit hash are you on?** You can get the exact commit hash by checking `git log` and pasting the full commit hash.
* **What's the name and version of the OS you're using**?
-* **Are you running Bittensor Subnet in a virtual machine?** If so, which VM software are you using and which operating systems and versions are used for the host and the guest?
-* **Are you running Bittensor Subnet in a dockerized container?** If so, have you made sure that your docker container contains your latest changes and is up to date with Master branch?
+* **Are you running Cybertensor Subnet in a virtual machine?** If so, which VM software are you using and which operating systems and versions are used for the host and the guest?
+* **Are you running Cybertensor Subnet in a dockerized container?** If so, have you made sure that your docker container contains your latest changes and is up to date with Master branch?
### Suggesting Enhancements and Features
@@ -210,4 +210,4 @@ When you are creating an enhancement suggestion, please [include as many details
* **List some other text editors or applications where this enhancement exists.**
* **Specify the name and version of the OS you're using.**
-Thank you for considering contributing to Bittensor! Any help is greatly appreciated along this journey to incentivize open and permissionless intelligence.
+Thank you for considering contributing to Cybertensor! Any help is greatly appreciated along this journey to incentivize open and permissionless intelligence.
diff --git a/contrib/DEVELOPMENT_WORKFLOW.md b/contrib/DEVELOPMENT_WORKFLOW.md
index 13bb07b2..4e384b62 100644
--- a/contrib/DEVELOPMENT_WORKFLOW.md
+++ b/contrib/DEVELOPMENT_WORKFLOW.md
@@ -1,10 +1,10 @@
-# Bittensor Subnet Development Workflow
+# Cybertensor Subnet Development Workflow
-This is a highly advisable workflow to follow to keep your subtensor project organized and foster ease of contribution.
+This is a highly advisable workflow to follow to keep your project organized and foster ease of contribution.
## Table of contents
-- [Bittensor Subnet Development Workflow](#bittensor-subnet-development-workflow)
+- [Cybertensor Subnet Development Workflow](#cybertensor-subnet-development-workflow)
- [Main Branches](#main-branches)
- [Development Model](#development-model)
- [Feature Branches](#feature-branches)
@@ -23,13 +23,13 @@ This is a highly advisable workflow to follow to keep your subtensor project org
## Main Branches
-Bittensor's codebase consists of two main branches: **main** and **staging**.
+Cybertensor's codebase consists of two main branches: **main** and **staging**.
**main**
-- This is Bittensor's live production branch, which should only be updated by the core development team. This branch is protected, so refrain from pushing or merging into it unless authorized.
+- This is Cybertensor's live production branch, which should only be updated by the core development team. This branch is protected, so refrain from pushing or merging into it unless authorized.
**staging**
-- This branch is continuously updated and is where you propose and merge changes. It's essentially Bittensor's active development branch.
+- This branch is continuously updated and is where you propose and merge changes. It's essentially Cybertensor's active development branch.
## Development Model
@@ -92,7 +92,7 @@ So, what you have to keep in mind is:
#### Finishing a Release Branch
-This involves releasing stable code and generating a new version for bittensor.
+This involves releasing stable code and generating a new version for Cybertensor.
1. Switch branch to main: `git checkout main`
2. Merge release branch into main: `git merge --no-ff release/3.4.0/optional-descriptive-message`
@@ -139,7 +139,7 @@ Continuous Integration (CI) is a software development practice where members of
Continuous Deployment (CD) is a software engineering approach in which software functionalities are delivered frequently through automated deployments.
-- **CircleCI job**: Create jobs in CircleCI to automate the merging of staging into main and release version (needed to release code) and building and testing Bittensor (needed to merge PRs).
+- **CircleCI job**: Create jobs in CircleCI to automate the merging of staging into main and release version (needed to release code) and building and testing Cybertensor (needed to merge PRs).
> It is highly recommended to set up your own circleci pipeline with your subnet
@@ -151,7 +151,7 @@ Release notes provide documentation for each version released to the users, high
## Pending Tasks
-Follow these steps when you are contributing to the bittensor subnet:
+Follow these steps when you are contributing to the cybertensor subnet:
- Determine if main and staging are different
- Determine what is in staging that is not merged yet
@@ -160,6 +160,6 @@ Follow these steps when you are contributing to the bittensor subnet:
- When merged into main, generate GitHub release and release notes.
- CircleCI jobs
- Merge staging into main and release version (needed to release code)
- - Build and Test Bittensor (needed to merge PRs)
+ - Build and Test Cybertensor (needed to merge PRs)
-This document can be improved as the Bittensor project continues to develop and change.
+This document can be improved as the Cybertensor project continues to develop and change.
diff --git a/docs/stream_miner_template.md b/docs/stream_miner_template.md
index f09c1eaa..757a9320 100644
--- a/docs/stream_miner_template.md
+++ b/docs/stream_miner_template.md
@@ -31,13 +31,13 @@ def forward(self, synapse: StreamPromptingSynapse) -> Awaitable:
buffer.append(token)
if time.time() - init_time > timeout_threshold:
- bt.logging.debug(f"⏰ Timeout reached, stopping streaming")
+ ct.logging.debug(f"⏰ Timeout reached, stopping streaming")
timeout_reached = True
break
if len(buffer) == self.config.neuron.streaming_batch_size:
joined_buffer = "".join(buffer)
- bt.logging.debug(f"Streamed tokens: {joined_buffer}")
+ ct.logging.debug(f"Streamed tokens: {joined_buffer}")
await send(
{
@@ -53,7 +53,7 @@ def forward(self, synapse: StreamPromptingSynapse) -> Awaitable:
): # Don't send the last buffer of data if timeout.
joined_buffer = "".join(buffer)
temp_completion += joined_buffer
- bt.logging.debug(f"Streamed tokens: {joined_buffer}")
+ ct.logging.debug(f"Streamed tokens: {joined_buffer}")
await send(
{
@@ -64,7 +64,7 @@ def forward(self, synapse: StreamPromptingSynapse) -> Awaitable:
)
except Exception as e:
- bt.logging.error(f"Error in forward: {e}")
+ ct.logging.error(f"Error in forward: {e}")
if self.config.neuron.stop_on_forward_exception:
self.should_exit = True
@@ -84,5 +84,5 @@ This branch contains multiple inplementations. To see:
1. Langchain+OpenAI implementation, refer to `prompting/miners/openai_miner.py`
2. HuggingFace implementation, refer to `prompting/miners/hf_miner.py`
-It is **necessary** that forward method of the miner class returns this `synapse.create_streaming_response(token_streamer)`. As seen, the `token_streamer` is a partial function that takes in a `send` packet. This packet will be sent by the bittensor middleware to facilitate the communications between the validator and the miner. You do **not** need to modify any logic around the `send` packet, as this is the same for **all** miners.
+It is **necessary** that forward method of the miner class returns this `synapse.create_streaming_response(token_streamer)`. As seen, the `token_streamer` is a partial function that takes in a `send` packet. This packet will be sent by the cybertensor middleware to facilitate the communications between the validator and the miner. You do **not** need to modify any logic around the `send` packet, as this is the same for **all** miners.
diff --git a/neurons/miners/huggingface/README.md b/neurons/miners/huggingface/README.md
index 2bf4602a..499ba507 100644
--- a/neurons/miners/huggingface/README.md
+++ b/neurons/miners/huggingface/README.md
@@ -1,5 +1,5 @@
-# Hugging Face Bittensor Miner
-This repository contains a Bittensor Miner integrated with 🤗 Hugging Face pipelines. The miner connects to the Bittensor network, registers its wallet, and serves a hugging face model to the network.
+# Hugging Face Cybertensor Miner
+This repository contains a Cybertensor Miner integrated with 🤗 Hugging Face pipelines. The miner connects to the Cybertensor network, registers its wallet, and serves a hugging face model to the network.
## Prerequisites
@@ -14,7 +14,7 @@ git clone https://github.com/opentensor/prompting.git
2. Install the required packages for the [repository requirements](../../../requirements.txt) with `pip install -r requirements.txt`
-For more configuration options related to the wallet, axon, subtensor, logging, and metagraph, please refer to the Bittensor documentation.
+For more configuration options related to the wallet, axon, cwtensor, logging, and metagraph, please refer to the Cybertensor documentation.
## Example Usage
@@ -29,7 +29,7 @@ Here are some model examples that could be leveraged by the HuggingFace Miner, a
> \* Big models such as mixtral are very costly to run and optimize, so always bear in mind the trade-offs between model speed, model quality and infra cost.
-To run the Hugging Face Bittensor Miner with default settings, use the following command:
+To run the Hugging Face Cybertensor Miner with default settings, use the following command:
```bash
python3 neurons/miners/huggingface/miner.py \
--wallet.name <> \
diff --git a/neurons/miners/huggingface/miner.py b/neurons/miners/huggingface/miner.py
index 29eaa640..e08b7b63 100644
--- a/neurons/miners/huggingface/miner.py
+++ b/neurons/miners/huggingface/miner.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -15,7 +16,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import time
-import bittensor as bt
+import cybertensor as ct
from prompting.miners import HuggingFaceMiner
@@ -27,5 +28,5 @@
time.sleep(5)
if miner.should_exit:
- bt.logging.warning("Ending miner...")
+ ct.logging.warning("Ending miner...")
break
diff --git a/neurons/miners/openai/README.md b/neurons/miners/openai/README.md
index 421bc819..5154a24b 100644
--- a/neurons/miners/openai/README.md
+++ b/neurons/miners/openai/README.md
@@ -1,5 +1,5 @@
-# OpenAI Bittensor Miner
-This repository contains a Bittensor Miner that uses langchain and OpenAI's model as its synapse. The miner connects to the Bittensor network, registers its wallet, and serves the GPT model to the network.
+# OpenAI Cybertensor Miner
+This repository contains a Cybertensor Miner that uses langchain and OpenAI's model as its synapse. The miner connects to the Cybertensor network, registers its wallet, and serves the GPT model to the network.
## Prerequisites
@@ -20,11 +20,11 @@ git clone https://github.com/opentensor/prompting.git
echo OPENAI_API_KEY=YOUR-KEY > .env
```
-For more configuration options related to the wallet, axon, subtensor, logging, and metagraph, please refer to the Bittensor documentation.
+For more configuration options related to the wallet, axon, cwtensor, logging, and metagraph, please refer to the Cybertensor documentation.
## Example Usage
-To run the OpenAI Bittensor Miner with default settings, use the following command:
+To run the OpenAI Cybertensor Miner with default settings, use the following command:
```bash
python3 neurons/miners/openai/miner.py \
diff --git a/neurons/miners/openai/miner.py b/neurons/miners/openai/miner.py
index e8e5a254..0efc10df 100644
--- a/neurons/miners/openai/miner.py
+++ b/neurons/miners/openai/miner.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -15,7 +16,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import time
-import bittensor as bt
+import cybertensor as ct
from prompting.miners import OpenAIMiner
# This is the main function, which runs the miner.
@@ -26,5 +27,5 @@
time.sleep(5)
if miner.should_exit:
- bt.logging.warning("Ending miner...")
+ ct.logging.warning("Ending miner...")
break
diff --git a/neurons/miners/test/echo.py b/neurons/miners/test/echo.py
index 697dd9f5..ca3eef32 100644
--- a/neurons/miners/test/echo.py
+++ b/neurons/miners/test/echo.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -15,7 +16,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import time
-import bittensor as bt
+import cybertensor as ct
from prompting.miners import EchoMiner
diff --git a/neurons/miners/test/mock.py b/neurons/miners/test/mock.py
index 6e2d6716..34fc83a1 100644
--- a/neurons/miners/test/mock.py
+++ b/neurons/miners/test/mock.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -15,7 +16,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import time
-import bittensor as bt
+import cybertensor as ct
from prompting.miners import MockMiner
diff --git a/neurons/miners/test/phrase.py b/neurons/miners/test/phrase.py
index a3c92984..315e4525 100644
--- a/neurons/miners/test/phrase.py
+++ b/neurons/miners/test/phrase.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -15,7 +16,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import time
-import bittensor as bt
+import cybertensor as ct
from prompting.miners import PhraseMiner
diff --git a/neurons/validator.py b/neurons/validator.py
index 62c01d40..a4df6612 100644
--- a/neurons/validator.py
+++ b/neurons/validator.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -15,7 +16,7 @@
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
import time
-import bittensor as bt
+import cybertensor as ct
from prompting.validator import Validator
@@ -23,11 +24,11 @@
if __name__ == "__main__":
with Validator() as v:
while True:
- bt.logging.info(
- f"Validator running:: network: {v.subtensor.network} | block: {v.block} | step: {v.step} | uid: {v.uid} | last updated: {v.block-v.metagraph.last_update[v.uid]} | vtrust: {v.metagraph.validator_trust[v.uid]:.3f} | emission {v.metagraph.emission[v.uid]:.3f}"
+ ct.logging.info(
+ f"Validator running:: network: {v.cwtensor.network} | block: {v.block} | step: {v.step} | uid: {v.uid} | last updated: {v.block-v.metagraph.last_update[v.uid]} | vtrust: {v.metagraph.validator_trust[v.uid]:.3f} | emission {v.metagraph.emission[v.uid]:.3f}"
)
time.sleep(5)
if v.should_exit:
- bt.logging.warning("Ending validator...")
+ ct.logging.warning("Ending validator...")
break
diff --git a/prompting/__init__.py b/prompting/__init__.py
index 476009f4..d37e443e 100644
--- a/prompting/__init__.py
+++ b/prompting/__init__.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -16,34 +17,32 @@
# DEALINGS IN THE SOFTWARE.
# Define the version of the template module.
-__version__ = "2.3.1"
+__version__ = "0.0.1"
version_split = __version__.split(".")
__spec_version__ = (
- (10000 * int(version_split[0]))
- + (100 * int(version_split[1]))
- + (1 * int(version_split[2]))
+ (10000 * int(version_split[0]))
+ + (100 * int(version_split[1]))
+ + (1 * int(version_split[2]))
)
# Import all submodules.
-from . import protocol
+from . import agent
from . import base
+from . import conversation
+from . import dendrite
+from . import forward
+from . import protocol
from . import rewards
+from . import shared
from . import tasks
from . import tools
from . import utils
-
-from . import forward
-from . import agent
-from . import conversation
-from . import dendrite
-from . import shared
from . import validator
from .llms import hf
-
+from .task_registry import TASK_REGISTRY
from .tasks import TASKS
from .tools import DATASETS
-from .task_registry import TASK_REGISTRY
# Assert that all tasks have a dataset, and all tasks/datasets are in the TASKS and DATASETS dictionaries.
registry_missing_task = set(TASKS.keys()) - set(TASK_REGISTRY.keys())
diff --git a/prompting/agent.py b/prompting/agent.py
index b4b67255..5db418fe 100644
--- a/prompting/agent.py
+++ b/prompting/agent.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -16,7 +17,7 @@
# DEALINGS IN THE SOFTWARE.
import textwrap
import time
-import bittensor as bt
+import cybertensor as ct
from dataclasses import asdict
from prompting.tasks import Task
from prompting.llms import HuggingFaceLLM, vLLM_LLM
@@ -76,7 +77,7 @@ def __init__(
)
if begin_conversation:
- bt.logging.info("🤖 Generating challenge query...")
+ ct.logging.info("🤖 Generating challenge query...")
# initiates the conversation with the miner
self.challenge = self.create_challenge()
@@ -96,7 +97,7 @@ def create_challenge(self) -> str:
elif self.task.challenge_type == 'query':
self.challenge = self.task.query
else:
- bt.logging.error(f"Task {self.task.name} has challenge type of: {self.task.challenge_type} which is not supported.")
+ ct.logging.error(f"Task {self.task.name} has challenge type of: {self.task.challenge_type} which is not supported.")
self.challenge = self.task.format_challenge(self.challenge)
self.challenge_time = time.time() - t0
@@ -130,11 +131,11 @@ def update_progress(
self.task.complete = True
self.messages.append({"content": top_response, "role": "user"})
- bt.logging.info("Agent finished its goal")
+ ct.logging.info("Agent finished its goal")
return
if continue_conversation:
- bt.logging.info(
+ ct.logging.info(
"↪ Agent did not finish its goal, continuing conversation..."
)
self.continue_conversation(miner_response=top_response)
diff --git a/prompting/base/miner.py b/prompting/base/miner.py
index e23cbcbf..b1903104 100644
--- a/prompting/base/miner.py
+++ b/prompting/base/miner.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -19,7 +20,7 @@
import argparse
import asyncio
import threading
-import bittensor as bt
+import cybertensor as ct
from prompting.protocol import StreamPromptingSynapse
from prompting.base.neuron import BaseNeuron
from prompting.utils.config import add_miner_args
@@ -28,7 +29,7 @@
class BaseStreamMinerNeuron(BaseNeuron):
"""
- Base class for Bittensor miners.
+ Base class for Cybertensor miners.
"""
@classmethod
@@ -41,25 +42,25 @@ def __init__(self, config=None):
# Warn if allowing incoming requests from anyone.
if not self.config.blacklist.force_validator_permit:
- bt.logging.warning(
+ ct.logging.warning(
"You are allowing non-validators to send requests to your miner. This is a security risk."
)
if self.config.blacklist.allow_non_registered:
- bt.logging.warning(
+ ct.logging.warning(
"You are allowing non-registered entities to send requests to your miner. This is a security risk."
)
# The axon handles request processing, allowing validators to send this miner requests.
- self.axon = bt.axon(wallet=self.wallet, config=self.config)
+ self.axon = ct.axon(wallet=self.wallet, config=self.config)
# Attach determiners which functions are called when servicing a request.
- bt.logging.info(f"Attaching forward function to miner axon.")
+ ct.logging.info(f"Attaching forward function to miner axon.")
self.axon.attach(
forward_fn=self._forward,
blacklist_fn=self.blacklist,
priority_fn=self.priority,
)
- bt.logging.info(f"Axon created: {self.axon}")
+ ct.logging.info(f"Axon created: {self.axon}")
# Instantiate runners
self.should_exit: bool = False
@@ -69,21 +70,21 @@ def __init__(self, config=None):
def run(self):
"""
- Initiates and manages the main loop for the miner on the Bittensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors.
+ Initiates and manages the main loop for the miner on the Cybertensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors.
This function performs the following primary tasks:
- 1. Check for registration on the Bittensor network.
+ 1. Check for registration on the Cybertensor network.
2. Starts the miner's axon, making it active on the network.
3. Periodically resynchronizes with the chain; updating the metagraph with the latest network state and setting weights.
The miner continues its operations until `should_exit` is set to True or an external interruption occurs.
- During each epoch of its operation, the miner waits for new blocks on the Bittensor network, updates its
+ During each epoch of its operation, the miner waits for new blocks on the Cybertensor network, updates its
knowledge of the network (metagraph), and sets its weights. This process ensures the miner remains active
and up-to-date with the network's latest state.
Note:
- The function leverages the global configurations set during the initialization of the miner.
- - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests.
+ - The miner's axon serves as its interface to the Cybertensor network, handling incoming and outgoing requests.
Raises:
KeyboardInterrupt: If the miner is stopped by a manual interruption.
@@ -95,21 +96,22 @@ def run(self):
# Serve passes the axon information to the network + netuid we are hosting on.
# This will auto-update if the axon port of external ip have changed.
- bt.logging.info(
- f"Serving miner axon {self.axon} on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}"
+ ct.logging.info(
+ f"Serving miner axon {self.axon} on network: {self.config.cwtensor.network} with netuid: {self.config.netuid}"
)
- self.axon.serve(netuid=self.config.netuid, subtensor=self.subtensor)
+ self.axon.serve(netuid=self.config.netuid, cwtensor=self.cwtensor)
# Start starts the miner's axon, making it active on the network.
self.axon.start()
- bt.logging.info(f"Miner starting at block: {self.block}")
+ ct.logging.info(f"Miner starting at block: {self.block}")
# This loop maintains the miner's operations until intentionally stopped.
+ last_update_block = 0
try:
while not self.should_exit:
while (
- self.block - self.metagraph.last_update[self.uid]
+ self.block - last_update_block
< self.config.neuron.epoch_length
):
# Wait before checking again.
@@ -121,18 +123,19 @@ def run(self):
# Sync metagraph and potentially set weights.
self.sync()
+ last_update_block = self.block
self.step += 1
# If someone intentionally stops the miner, it'll safely terminate operations.
except KeyboardInterrupt:
self.axon.stop()
- bt.logging.success("Miner killed by keyboard interrupt.")
+ ct.logging.success("Miner killed by keyboard interrupt.")
exit()
# In case of unforeseen errors, the miner will log the error and continue operations.
except Exception as err:
- bt.logging.error("Error during mining", str(err))
- bt.logging.debug(print_exception(type(err), err, err.__traceback__))
+ ct.logging.error("Error during mining", str(err))
+ ct.logging.debug(print_exception(type(err), err, err.__traceback__))
self.should_exit = True
def run_in_background_thread(self):
@@ -141,23 +144,23 @@ def run_in_background_thread(self):
This is useful for non-blocking operations.
"""
if not self.is_running:
- bt.logging.debug("Starting miner in background thread.")
+ ct.logging.debug("Starting miner in background thread.")
self.should_exit = False
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
self.is_running = True
- bt.logging.debug("Started")
+ ct.logging.debug("Started")
def stop_run_thread(self):
"""
Stops the miner's operations that are running in the background thread.
"""
if self.is_running:
- bt.logging.debug("Stopping miner in background thread.")
+ ct.logging.debug("Stopping miner in background thread.")
self.should_exit = True
self.thread.join(5)
self.is_running = False
- bt.logging.debug("Stopped")
+ ct.logging.debug("Stopped")
def __enter__(self):
"""
@@ -185,10 +188,10 @@ def __exit__(self, exc_type, exc_value, traceback):
def resync_metagraph(self):
"""Resyncs the metagraph and updates the hotkeys and moving averages based on the new metagraph."""
- bt.logging.info("resync_metagraph()")
+ ct.logging.info("resync_metagraph()")
# Sync the metagraph.
- self.metagraph.sync(subtensor=self.subtensor)
+ self.metagraph.sync(cwtensor=self.cwtensor)
def _forward(self, synapse: StreamPromptingSynapse) -> StreamPromptingSynapse:
"""
diff --git a/prompting/base/neuron.py b/prompting/base/neuron.py
index 80fe4226..37053d5a 100644
--- a/prompting/base/neuron.py
+++ b/prompting/base/neuron.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -18,7 +19,7 @@
import copy
import sys
-import bittensor as bt
+import cybertensor as ct
from abc import ABC, abstractmethod
@@ -27,18 +28,18 @@
from prompting.utils.misc import ttl_get_block
from prompting import __spec_version__ as spec_version
-from prompting.mock import MockSubtensor, MockMetagraph
+from prompting.mock import MockCwtensor, MockMetagraph
class BaseNeuron(ABC):
"""
- Base class for Bittensor miners. This class is abstract and should be inherited by a subclass. It contains the core logic for all neurons; validators and miners.
+ Base class for Cybertensor miners. This class is abstract and should be inherited by a subclass. It contains the core logic for all neurons; validators and miners.
- In addition to creating a wallet, subtensor, and metagraph, this class also handles the synchronization of the network state via a basic checkpointing mechanism based on epoch length.
+ In addition to creating a wallet, cwtensor, and metagraph, this class also handles the synchronization of the network state via a basic checkpointing mechanism based on epoch length.
"""
@classmethod
- def check_config(cls, config: "bt.Config"):
+ def check_config(cls, config: "ct.Config"):
check_config(cls, config)
@classmethod
@@ -49,9 +50,9 @@ def add_args(cls, parser):
def _config(cls):
return config(cls)
- subtensor: "bt.subtensor"
- wallet: "bt.wallet"
- metagraph: "bt.metagraph"
+ cwtensor: "ct.cwtensor"
+ wallet: "ct.Wallet"
+ metagraph: "ct.metagraph"
spec_version: int = spec_version
@property
@@ -65,44 +66,44 @@ def __init__(self, config=None):
self.check_config(self.config)
# Set up logging with the provided configuration and directory.
- bt.logging(config=self.config, logging_dir=self.config.full_path)
+ ct.logging(config=self.config, logging_dir=self.config.full_path)
# If a gpu is required, set the device to cuda:N (e.g. cuda:0)
self.device = self.config.neuron.device
# Log the configuration for reference.
- bt.logging.info(self.config)
+ ct.logging.info(self.config)
- # Build Bittensor objects
- # These are core Bittensor classes to interact with the network.
- bt.logging.info("Setting up bittensor objects.")
+ # Build Cybertensor objects
+ # These are core Cybertensor classes to interact with the network.
+ ct.logging.info("Setting up Cybertensor objects.")
# The wallet holds the cryptographic key pairs for the miner.
if self.config.mock:
- self.wallet = bt.MockWallet(config=self.config)
- self.subtensor = MockSubtensor(self.config.netuid, wallet=self.wallet)
- self.metagraph = MockMetagraph(netuid=self.config.netuid, subtensor=self.subtensor)
+ self.wallet = ct.MockWallet(config=self.config)
+ self.cwtensor = MockCwtensor(self.config.netuid, wallet=self.wallet)
+ self.metagraph = MockMetagraph(netuid=self.config.netuid, cwtensor=self.cwtensor)
else:
- self.wallet = bt.wallet(config=self.config)
- self.subtensor = bt.subtensor(config=self.config)
- self.metagraph = self.subtensor.metagraph(self.config.netuid)
+ self.wallet = ct.Wallet(config=self.config)
+ self.cwtensor = ct.cwtensor(config=self.config)
+ self.metagraph = self.cwtensor.metagraph(self.config.netuid)
- bt.logging.info(f"Wallet: {self.wallet}")
- bt.logging.info(f"Subtensor: {self.subtensor}")
- bt.logging.info(f"Metagraph: {self.metagraph}")
+ ct.logging.info(f"Wallet: {self.wallet}")
+ ct.logging.info(f"Cwtensor: {self.cwtensor}")
+ ct.logging.info(f"Metagraph: {self.metagraph}")
- # Check if the miner is registered on the Bittensor network before proceeding further.
+ # Check if the miner is registered on the Cybertensor network before proceeding further.
self.check_registered()
# Each miner gets a unique identity (UID) in the network for differentiation.
- self.uid = self.metagraph.hotkeys.index(self.wallet.hotkey.ss58_address)
- bt.logging.info(
- f"Running neuron on subnet: {self.config.netuid} with uid {self.uid} using network: {self.subtensor.chain_endpoint}"
+ self.uid = self.metagraph.hotkeys.index(self.wallet.hotkey.address)
+ ct.logging.info(
+ f"Running neuron on subnet: {self.config.netuid} with uid {self.uid} using network: {self.cwtensor.network_config.url}"
)
self.step = 0
@abstractmethod
- def forward(self, synapse: bt.Synapse) -> bt.Synapse:
+ def forward(self, synapse: ct.Synapse) -> ct.Synapse:
...
@abstractmethod
@@ -127,11 +128,11 @@ def sync(self):
def check_registered(self):
# --- Check for registration.
- if not self.subtensor.is_hotkey_registered(
+ if not self.cwtensor.is_hotkey_registered(
netuid=self.config.netuid,
- hotkey_ss58=self.wallet.hotkey.ss58_address,
+ hotkey=self.wallet.hotkey.address,
):
- bt.logging.error(
+ ct.logging.error(
f"Wallet: {self.wallet} is not registered on netuid {self.config.netuid}."
f" Please register the hotkey using `btcli subnets register` before trying again"
)
@@ -169,6 +170,6 @@ def save_state(self):
pass
def load_state(self):
- bt.logging.debug(
+ ct.logging.debug(
"load_state() not implemented for this neuron. You can implement this function to load model checkpoints or other useful data."
)
diff --git a/prompting/base/prompting_miner.py b/prompting/base/prompting_miner.py
index f6b18818..926da9a9 100644
--- a/prompting/base/prompting_miner.py
+++ b/prompting/base/prompting_miner.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -16,9 +17,9 @@
# DEALINGS IN THE SOFTWARE.
import wandb
import typing
-import bittensor as bt
+import cybertensor as ct
-# Bittensor Miner Template:
+# Cybertensor Miner Template:
import prompting
from prompting.protocol import StreamPromptingSynapse
from prompting.base.miner import BaseStreamMinerNeuron
@@ -31,7 +32,7 @@ class BaseStreamPromptingMiner(BaseStreamMinerNeuron):
In particular, you should replace the forward function with your own logic. You may also want to override the blacklist and priority functions according to your needs.
This class inherits from the BaseMinerNeuron class, which in turn inherits from BaseNeuron.
- The BaseNeuron class takes care of routine tasks such as setting up wallet, subtensor, metagraph, logging directory, parsing config, etc.
+ The BaseNeuron class takes care of routine tasks such as setting up wallet, cwtensor, metagraph, logging directory, parsing config, etc.
You can override any of the methods in BaseNeuron if you need to customize the behavior.
This class provides reasonable default behavior for a miner such as blacklisting unrecognized hotkeys, prioritizing requests based on stake, and forwarding requests to the forward function.
@@ -75,12 +76,12 @@ async def blacklist(
"""
if synapse.dendrite.hotkey not in self.metagraph.hotkeys:
# Ignore requests from unrecognized entities.
- bt.logging.trace(
+ ct.logging.trace(
f"Blacklisting unrecognized hotkey {synapse.dendrite.hotkey}"
)
return True, "Unrecognized hotkey"
- bt.logging.trace(
+ ct.logging.trace(
f"Not Blacklisting recognized hotkey {synapse.dendrite.hotkey}"
)
return False, "Hotkey recognized!"
@@ -111,18 +112,18 @@ async def priority(self, synapse: StreamPromptingSynapse) -> float:
priority = float(
self.metagraph.S[caller_uid]
) # Return the stake as the priority.
- bt.logging.trace(
+ ct.logging.trace(
f"Prioritizing {synapse.dendrite.hotkey} with value: ", priority
)
return priority
def init_wandb(self):
- bt.logging.info("Initializing wandb...")
+ ct.logging.info("Initializing wandb...")
- uid = f"uid_{self.metagraph.hotkeys.index(self.wallet.hotkey.ss58_address)}"
+ uid = f"uid_{self.metagraph.hotkeys.index(self.wallet.hotkey.address)}"
net_uid = f"netuid_{self.config.netuid}"
tags = [
- self.wallet.hotkey.ss58_address,
+ self.wallet.hotkey.address,
net_uid,
f"uid_{uid}",
prompting.__version__,
@@ -174,7 +175,7 @@ def log_event(
"prompt": prompt,
"completion": completion,
"system_prompt": system_prompt,
- "uid": self.metagraph.hotkeys.index(self.wallet.hotkey.ss58_address),
+ "uid": self.metagraph.hotkeys.index(self.wallet.hotkey.address),
"stake": self.metagraph.S[self.uid].item(),
"trust": self.metagraph.T[self.uid].item(),
"incentive": self.metagraph.I[self.uid].item(),
@@ -183,11 +184,11 @@ def log_event(
**extra_info,
}
- bt.logging.info("Logging event to wandb...", step_log)
+ ct.logging.info("Logging event to wandb...", step_log)
wandb.log(step_log)
def log_status(self):
m = self.metagraph
- bt.logging.info(
- f"Miner running:: network: {self.subtensor.network} | step: {self.step} | uid: {self.uid} | trust: {m.trust[self.uid]:.3f} | emission {m.emission[self.uid]:.3f}"
+ ct.logging.info(
+ f"Miner running:: network: {self.cwtensor.network} | step: {self.step} | uid: {self.uid} | trust: {m.trust[self.uid]:.3f} | emission {m.emission[self.uid]:.3f}"
)
diff --git a/prompting/base/validator.py b/prompting/base/validator.py
index 893bf205..2b0e4b35 100644
--- a/prompting/base/validator.py
+++ b/prompting/base/validator.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -21,7 +22,7 @@
import asyncio
import argparse
import threading
-import bittensor as bt
+import cybertensor as ct
from typing import List
from traceback import print_exception
@@ -34,7 +35,7 @@
class BaseValidatorNeuron(BaseNeuron):
"""
- Base class for Bittensor validators. Your validator should inherit from this class.
+ Base class for Cybertensor validators. Your validator should inherit from this class.
"""
@classmethod
@@ -52,11 +53,11 @@ def __init__(self, config=None):
if self.config.mock:
self.dendrite = MockDendrite(wallet=self.wallet)
else:
- self.dendrite = bt.dendrite(wallet=self.wallet)
- bt.logging.info(f"Dendrite: {self.dendrite}")
+ self.dendrite = ct.dendrite(wallet=self.wallet)
+ ct.logging.info(f"Dendrite: {self.dendrite}")
# Set up initial scoring weights for validation
- bt.logging.info("Building validation weights.")
+ ct.logging.info("Building validation weights.")
self.scores = torch.zeros(
self.metagraph.n, dtype=torch.float32, device=self.device
)
@@ -68,7 +69,7 @@ def __init__(self, config=None):
if not self.config.neuron.axon_off:
self.serve_axon()
else:
- bt.logging.warning("axon off, not serving ip to chain.")
+ ct.logging.warning("axon off, not serving ip to chain.")
# Create asyncio event loop to manage async tasks.
self.loop = asyncio.get_event_loop()
@@ -82,27 +83,27 @@ def __init__(self, config=None):
def serve_axon(self):
"""Serve axon to enable external connections."""
- bt.logging.info("serving ip to chain...")
+ ct.logging.info("serving ip to chain...")
try:
- self.axon = bt.axon(wallet=self.wallet, config=self.config)
+ self.axon = ct.axon(wallet=self.wallet, config=self.config)
try:
- self.subtensor.serve_axon(
+ self.cwtensor.serve_axon(
netuid=self.config.netuid,
axon=self.axon,
)
except Exception as e:
- bt.logging.error(f"Failed to serve Axon with exception: {e}")
+ ct.logging.error(f"Failed to serve Axon with exception: {e}")
except Exception as e:
- bt.logging.error(f"Failed to create Axon initialize with exception: {e}")
+ ct.logging.error(f"Failed to create Axon initialize with exception: {e}")
def run(self):
"""
- Initiates and manages the main loop for the miner on the Bittensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors.
+ Initiates and manages the main loop for the miner on the Cybertensor network. The main loop handles graceful shutdown on keyboard interrupts and logs unforeseen errors.
This function performs the following primary tasks:
- 1. Check for registration on the Bittensor network.
+ 1. Check for registration on the Cybertensor network.
2. Continuously forwards queries to the miners on the network, rewarding their responses and updating the scores accordingly.
3. Periodically resynchronizes with the chain; updating the metagraph with the latest network state and setting weights.
@@ -110,7 +111,7 @@ def run(self):
Note:
- The function leverages the global configurations set during the initialization of the miner.
- - The miner's axon serves as its interface to the Bittensor network, handling incoming and outgoing requests.
+ - The miner's axon serves as its interface to the Cybertensor network, handling incoming and outgoing requests.
Raises:
KeyboardInterrupt: If the miner is stopped by a manual interruption.
@@ -121,20 +122,20 @@ def run(self):
self.sync()
if not self.config.neuron.axon_off:
- bt.logging.info(
- f"Running validator {self.axon} on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}"
+ ct.logging.info(
+ f"Running validator {self.axon} on network: {self.config.cwtensor.network} with netuid: {self.config.netuid}"
)
else:
- bt.logging.info(
- f"Running validator on network: {self.config.subtensor.chain_endpoint} with netuid: {self.config.netuid}"
+ ct.logging.info(
+ f"Running validator on network: {self.config.cwtensor.network} with netuid: {self.config.netuid}"
)
- bt.logging.info(f"Validator starting at block: {self.block}")
+ ct.logging.info(f"Validator starting at block: {self.block}")
# This loop maintains the validator's operations until intentionally stopped.
try:
while True:
- bt.logging.info(f"step({self.step}) block({self.block})")
+ ct.logging.info(f"step({self.step}) block({self.block})")
forward_timeout = self.config.neuron.forward_max_time
try:
@@ -143,13 +144,13 @@ def run(self):
asyncio.wait_for(task, timeout=forward_timeout)
)
except torch.cuda.OutOfMemoryError as e:
- bt.logging.error(f"Out of memory error: {e}")
+ ct.logging.error(f"Out of memory error: {e}")
continue
except MaxRetryError as e:
- bt.logging.error(f"MaxRetryError: {e}")
+ ct.logging.error(f"MaxRetryError: {e}")
continue
except asyncio.TimeoutError as e:
- bt.logging.error(
+ ct.logging.error(
f"Forward timeout: Task execution exceeded {forward_timeout} seconds and was cancelled.: {e}"
)
continue
@@ -166,13 +167,13 @@ def run(self):
# If someone intentionally stops the validator, it'll safely terminate operations.
except KeyboardInterrupt:
self.axon.stop()
- bt.logging.success("Validator killed by keyboard interrupt.")
+ ct.logging.success("Validator killed by keyboard interrupt.")
sys.exit()
# In case of unforeseen errors, the validator will log the error and quit
except Exception as err:
- bt.logging.error("Error during validation", str(err))
- bt.logging.debug(print_exception(type(err), err, err.__traceback__))
+ ct.logging.error("Error during validation", str(err))
+ ct.logging.debug(print_exception(type(err), err, err.__traceback__))
self.should_exit = True
def run_in_background_thread(self):
@@ -181,23 +182,23 @@ def run_in_background_thread(self):
This method facilitates the use of the validator in a 'with' statement.
"""
if not self.is_running:
- bt.logging.debug("Starting validator in background thread.")
+ ct.logging.debug("Starting validator in background thread.")
self.should_exit = False
self.thread = threading.Thread(target=self.run, daemon=True)
self.thread.start()
self.is_running = True
- bt.logging.debug("Started")
+ ct.logging.debug("Started")
def stop_run_thread(self):
"""
Stops the validator's operations that are running in the background thread.
"""
if self.is_running:
- bt.logging.debug("Stopping validator in background thread.")
+ ct.logging.debug("Stopping validator in background thread.")
self.should_exit = True
self.thread.join(5)
self.is_running = False
- bt.logging.debug("Stopped")
+ ct.logging.debug("Stopped")
def __enter__(self):
self.run_in_background_thread()
@@ -217,11 +218,11 @@ def __exit__(self, exc_type, exc_value, traceback):
None if the context was exited without an exception.
"""
if self.is_running:
- bt.logging.debug("Stopping validator in background thread.")
+ ct.logging.debug("Stopping validator in background thread.")
self.should_exit = True
self.thread.join(5)
self.is_running = False
- bt.logging.debug("Stopped")
+ ct.logging.debug("Stopped")
def set_weights(self):
"""
@@ -230,7 +231,7 @@ def set_weights(self):
# Check if self.scores contains any NaN values and log a warning if it does.
if torch.isnan(self.scores).any():
- bt.logging.warning(
+ ct.logging.warning(
"Scores contain NaN values. This may be due to a lack of responses from miners, or a bug in your reward functions."
)
@@ -238,62 +239,61 @@ def set_weights(self):
# Replace any NaN values with 0.
raw_weights = torch.nn.functional.normalize(self.scores, p=1, dim=0)
- bt.logging.debug("raw_weights", raw_weights)
- bt.logging.debug("raw_weight_uids", self.metagraph.uids.to("cpu"))
- # Process the raw weights to final_weights via subtensor limitations.
+ ct.logging.debug("raw_weights", raw_weights)
+ ct.logging.debug("raw_weight_uids", self.metagraph.uids.to("cpu"))
+ # Process the raw weights to final_weights via cwtensor limitations.
(
processed_weight_uids,
processed_weights,
- ) = bt.utils.weight_utils.process_weights_for_netuid(
+ ) = ct.utils.weight_utils.process_weights_for_netuid(
uids=self.metagraph.uids.to("cpu"),
weights=raw_weights.to("cpu"),
netuid=self.config.netuid,
- subtensor=self.subtensor,
+ cwtensor=self.cwtensor,
metagraph=self.metagraph,
)
- bt.logging.debug("processed_weights", processed_weights)
- bt.logging.debug("processed_weight_uids", processed_weight_uids)
+ ct.logging.debug("processed_weights", processed_weights)
+ ct.logging.debug("processed_weight_uids", processed_weight_uids)
# Convert to uint16 weights and uids.
(
uint_uids,
uint_weights,
- ) = bt.utils.weight_utils.convert_weights_and_uids_for_emit(
+ ) = ct.utils.weight_utils.convert_weights_and_uids_for_emit(
uids=processed_weight_uids, weights=processed_weights
)
- bt.logging.debug("uint_weights", uint_weights)
- bt.logging.debug("uint_uids", uint_uids)
+ ct.logging.debug("uint_weights", uint_weights)
+ ct.logging.debug("uint_uids", uint_uids)
- # Set the weights on chain via our subtensor connection.
- result = self.subtensor.set_weights(
+ # Set the weights on chain via our cwtensor connection.
+ result = self.cwtensor.set_weights(
wallet=self.wallet,
netuid=self.config.netuid,
uids=uint_uids,
weights=uint_weights,
wait_for_finalization=False,
- wait_for_inclusion=False,
version_key=self.spec_version,
)
if result is True:
- bt.logging.info("set_weights on chain successfully!")
+ ct.logging.info("set_weights on chain successfully!")
else:
- bt.logging.error("set_weights failed")
+ ct.logging.error("set_weights failed")
def resync_metagraph(self):
"""Resyncs the metagraph and updates the hotkeys and moving averages based on the new metagraph."""
- bt.logging.info("resync_metagraph()")
+ ct.logging.info("resync_metagraph()")
# Copies state of metagraph before syncing.
previous_metagraph = copy.deepcopy(self.metagraph)
# Sync the metagraph.
- self.metagraph.sync(subtensor=self.subtensor)
+ self.metagraph.sync(cwtensor=self.cwtensor)
# Check if the metagraph axon info has changed.
if previous_metagraph.axons == self.metagraph.axons:
return
- bt.logging.info(
+ ct.logging.info(
"Metagraph updated, re-syncing hotkeys, dendrite pool and moving averages"
)
# Zero out all hotkeys that have been replaced.
@@ -318,7 +318,7 @@ def update_scores(self, rewards: torch.FloatTensor, uids: List[int]):
# Check if rewards contains NaN values.
if torch.isnan(rewards).any():
- bt.logging.warning(f"NaN values detected in rewards: {rewards}")
+ ct.logging.warning(f"NaN values detected in rewards: {rewards}")
# Replace any NaN values in rewards with 0.
rewards = torch.nan_to_num(rewards, 0)
@@ -327,18 +327,18 @@ def update_scores(self, rewards: torch.FloatTensor, uids: List[int]):
step_rewards = self.scores.scatter(
0, torch.tensor(uids).to(self.device), rewards.to(self.device)
).to(self.device)
- bt.logging.debug(f"Scattered rewards: {rewards}")
+ ct.logging.debug(f"Scattered rewards: {rewards}")
# Update scores with rewards produced by this step.
# shape: [ metagraph.n ]
alpha = self.config.neuron.moving_average_alpha
self.scores = alpha * step_rewards + (1 - alpha) * self.scores
self.scores = (self.scores - self.config.neuron.decay_alpha).clamp(min=0)
- bt.logging.debug(f"Updated moving avg scores: {self.scores}")
+ ct.logging.debug(f"Updated moving avg scores: {self.scores}")
def save_state(self):
"""Saves the state of the validator to a file."""
- bt.logging.info("Saving validator state.")
+ ct.logging.info("Saving validator state.")
# Save the state of the validator to file.
torch.save(
@@ -352,7 +352,7 @@ def save_state(self):
def load_state(self):
"""Loads the state of the validator from a file."""
- bt.logging.info("Loading validator state.")
+ ct.logging.info("Loading validator state.")
# Load the state of the validator from file.
state = torch.load(self.config.neuron.full_path + "/state.pt")
diff --git a/prompting/cleaners/all_cleaners.py b/prompting/cleaners/all_cleaners.py
index d48119bf..5f983c97 100644
--- a/prompting/cleaners/all_cleaners.py
+++ b/prompting/cleaners/all_cleaners.py
@@ -1,6 +1,6 @@
from abc import ABC, abstractmethod
from typing import Union
-import bittensor as bt
+import cybertensor as ct
import re
from typing import Union
@@ -20,7 +20,7 @@ def __init__(self, **kwargs) -> None:
pass
def apply(self, generation: str) -> str:
- bt.logging.debug("Pruning unfinished sentence.")
+ ct.logging.debug("Pruning unfinished sentence.")
return generation.strip("\"'")
diff --git a/prompting/cleaners/cleaner.py b/prompting/cleaners/cleaner.py
index e612cf09..c4e63aa6 100644
--- a/prompting/cleaners/cleaner.py
+++ b/prompting/cleaners/cleaner.py
@@ -1,6 +1,6 @@
from typing import List, Dict
-import bittensor as bt
+import cybertensor as ct
from prompting.cleaners.all_cleaners import RemoveQuotes, RemoveRoles, PruneEnding, PrunePostQuestionText, RemoveTags, FirstQuestion
@@ -52,7 +52,7 @@ def apply(self, generation: str) -> str:
return generation
except Exception as E:
- bt.logging.error(
+ ct.logging.error(
f"Failed to apply cleaning pipeline {cleaner['name']}. {E},"
)
return generation
diff --git a/prompting/dendrite.py b/prompting/dendrite.py
index a9df0aa8..6997afa9 100644
--- a/prompting/dendrite.py
+++ b/prompting/dendrite.py
@@ -1,11 +1,11 @@
import torch
-import bittensor as bt
+import cybertensor as ct
from typing import List
class DendriteResponseEvent:
def __init__(
- self, responses: List[bt.Synapse], uids: torch.LongTensor, timeout: float
+ self, responses: List[ct.Synapse], uids: torch.LongTensor, timeout: float
):
self.uids = uids
self.completions = []
diff --git a/prompting/forward.py b/prompting/forward.py
index 170f8525..9b02ceef 100644
--- a/prompting/forward.py
+++ b/prompting/forward.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -22,7 +23,7 @@
import asyncio
import traceback
import numpy as np
-import bittensor as bt
+import cybertensor as ct
from typing import List, Dict, Awaitable
from prompting.agent import HumanAgent
from prompting.dendrite import DendriteResponseEvent
@@ -58,7 +59,7 @@ async def process_response(uid: int, async_generator: Awaitable):
try:
chunk = None # Initialize chunk with a default value
async for chunk in async_generator: # most important loop, as this is where we acquire the final synapse.
- bt.logging.debug(f"\nchunk for uid {uid}: {chunk}")
+ ct.logging.debug(f"\nchunk for uid {uid}: {chunk}")
if chunk is not None:
synapse = chunk # last object yielded is the synapse itself with completion filled
@@ -67,13 +68,13 @@ async def process_response(uid: int, async_generator: Awaitable):
if isinstance(synapse, StreamPromptingSynapse):
return synapse
- bt.logging.debug(
+ ct.logging.debug(
f"Synapse is not StreamPromptingSynapse. Miner uid {uid} completion set to '' "
)
except Exception as e:
- # bt.logging.error(f"Error in generating reference or handling responses: {e}", exc_info=True)
+ # ct.logging.error(f"Error in generating reference or handling responses: {e}", exc_info=True)
traceback_details = traceback.format_exc()
- bt.logging.error(
+ ct.logging.error(
f"Error in generating reference or handling responses for uid {uid}: {e}\n{traceback_details}"
)
@@ -125,7 +126,7 @@ async def handle_response(responses: Dict[int, Awaitable]) -> List[StreamResult]
# If the result is neither an error or a StreamSynapse, log the error and raise a ValueError
else:
- bt.logging.error(f"Unexpected result type for UID {uid}: {result}")
+ ct.logging.error(f"Unexpected result type for UID {uid}: {result}")
raise ValueError(f"Unexpected result type for UID {uid}: {result}")
return mapped_results
@@ -155,15 +156,15 @@ def log_stream_results(stream_results: List[StreamResult]):
if response.exception is None and response.synapse.completion != ""
]
- bt.logging.info(f"Total of non_empty responses: ({len(non_empty_responses)})")
- bt.logging.info(f"Total of empty responses: ({len(empty_responses)})")
- bt.logging.info(
+ ct.logging.info(f"Total of non_empty responses: ({len(non_empty_responses)})")
+ ct.logging.info(f"Total of empty responses: ({len(empty_responses)})")
+ ct.logging.info(
f"Total of failed responses: ({len(failed_responses)}):\n {failed_responses}"
)
for failed_response in failed_responses:
formatted_exception = serialize_exception_to_string(failed_response.exception)
- bt.logging.error(
+ ct.logging.error(
f"Failed response for uid {failed_response.uid}: {formatted_exception}"
)
@@ -187,7 +188,7 @@ async def run_step(
exclude (list, optional): The list of uids to exclude from the query. Defaults to [].
"""
- bt.logging.debug("run_step", agent.task.name)
+ ct.logging.debug("run_step", agent.task.name)
# Record event start time.
start_time = time.time()
@@ -228,7 +229,7 @@ async def run_step(
responses=all_synapses_results, uids=uids, timeout=timeout
)
- bt.logging.info(f"Created DendriteResponseEvent:\n {response_event}")
+ ct.logging.info(f"Created DendriteResponseEvent:\n {response_event}")
# Reward the responses and get the reward result (dataclass)
# This contains a list of RewardEvents but can be exported as a dict (column-wise) for logging etc
reward_result = RewardResult(
@@ -237,15 +238,28 @@ async def run_step(
response_event=response_event,
device=self.device,
)
- bt.logging.info(f"Created RewardResult:\n {reward_result}")
+ ct.logging.info(f"Created RewardResult:\n {reward_result}")
- best_response = response_event.completions[reward_result.rewards.argmax()]
+ try:
+ best_response = response_event.completions[reward_result.rewards.argmax()]
+ except Exception as e:
+ # TODO added during debugging, remove later after testing
+ print(f"An error occured best response: {e}")
+ best_response = []
# The original idea was that the agent is 'satisfied' when it gets a good enough response (e.g. reward critera is met, such as ROUGE>threshold)
- agent.update_progress(
- top_reward=reward_result.rewards.max(),
- top_response=best_response,
- )
+ try:
+ agent.update_progress(
+ top_reward=reward_result.rewards.max(),
+ top_response=best_response,
+ )
+ except Exception as e:
+ # TODO added during debugging, remove later after testing
+ print(f"An error occured update progress: {e}")
+ agent.update_progress(
+ top_reward=0.0,
+ top_response=best_response,
+ )
self.update_scores(reward_result.rewards, uids)
@@ -255,17 +269,31 @@ async def run_step(
for stream_result in stream_results
]
# Log the step event.
- event = {
- "best": best_response,
- "block": self.block,
- "step": self.step,
- "step_time": time.time() - start_time,
- "stream_results_uids": stream_results_uids,
- "stream_results_exceptions": stream_results_exceptions,
- **agent.__state_dict__(full=self.config.neuron.log_full),
- **reward_result.__state_dict__(full=self.config.neuron.log_full),
- **response_event.__state_dict__(),
- }
+ try:
+ event = {
+ "best": best_response,
+ "block": self.block,
+ "step": self.step,
+ "step_time": time.time() - start_time,
+ "stream_results_uids": stream_results_uids,
+ "stream_results_exceptions": stream_results_exceptions,
+ **agent.__state_dict__(full=self.config.neuron.log_full),
+ **reward_result.__state_dict__(full=self.config.neuron.log_full),
+ **response_event.__state_dict__(),
+ }
+ except Exception as e:
+ # TODO added during debugging, remove later after testing
+ event = {
+ "best": best_response,
+ "block": self.block,
+ "step": self.step,
+ "step_time": time.time() - start_time,
+ "stream_results_uids": stream_results_uids,
+ "stream_results_exceptions": stream_results_exceptions,
+ **agent.__state_dict__(full=self.config.neuron.log_full),
+ "reward_result": {"rewards": [[]]},
+ **response_event.__state_dict__(),
+ }
return event
@@ -275,18 +303,18 @@ async def forward(self):
Encapsulates a full conversation between the validator and miners. Contains one or more rounds of request-response.
"""
- bt.logging.info("🚀 Starting forward loop...")
+ ct.logging.info("🚀 Starting forward loop...")
forward_start_time = time.time()
while True:
- bt.logging.info(
+ ct.logging.info(
f"📋 Selecting task... from {self.config.neuron.tasks} with distribution {self.config.neuron.task_p}"
)
# Create a specific task
task_name = np.random.choice(
self.config.neuron.tasks, p=self.config.neuron.task_p
)
- bt.logging.info(f"📋 Creating {task_name} task... ")
+ ct.logging.info(f"📋 Creating {task_name} task... ")
try:
task = create_task(
llm_pipeline=self.llm_pipeline,
@@ -296,13 +324,13 @@ async def forward(self):
)
break
except Exception as e:
- bt.logging.error(
+ ct.logging.error(
f"Failed to create {task_name} task. {sys.exc_info()}. Skipping to next task."
)
continue
# Create random agent with task, topic, profile...
- bt.logging.info(f"🤖 Creating agent for {task_name} task... ")
+ ct.logging.info(f"🤖 Creating agent for {task_name} task... ")
agent = HumanAgent(
task=task, llm_pipeline=self.llm_pipeline, begin_conversation=True
)
@@ -356,7 +384,7 @@ async def forward(self):
except BaseException as e:
unexpected_errors = serialize_exception_to_string(e)
- bt.logging.error(
+ ct.logging.error(
f"Error in run_step: Skipping to next round. \n {unexpected_errors}"
)
diff --git a/prompting/llms/base_llm.py b/prompting/llms/base_llm.py
index 0ed7b139..07820fb0 100644
--- a/prompting/llms/base_llm.py
+++ b/prompting/llms/base_llm.py
@@ -1,4 +1,4 @@
-import bittensor as bt
+import cybertensor as ct
from abc import ABC, abstractmethod
from prompting.cleaners.cleaner import CleanerPipeline
from typing import Any, Dict, List
@@ -39,7 +39,7 @@ def clean_response(self, cleaner: CleanerPipeline, response: str) -> str:
if cleaner is not None:
clean_response = cleaner.apply(generation=response)
if clean_response != response:
- bt.logging.debug(
+ ct.logging.debug(
f"Response cleaned, chars removed: {len(response) - len(clean_response)}..."
)
diff --git a/prompting/llms/hf.py b/prompting/llms/hf.py
index 869b3241..6baf5459 100644
--- a/prompting/llms/hf.py
+++ b/prompting/llms/hf.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -17,7 +18,7 @@
import time
from typing import List, Dict
-import bittensor as bt
+import cybertensor as ct
from transformers import Pipeline, pipeline, AutoTokenizer, TextIteratorStreamer
from prompting.mock import MockPipeline
@@ -60,14 +61,14 @@ def load_hf_pipeline(
return MockPipeline(model_id)
if not device.startswith("cuda"):
- bt.logging.warning("Only crazy people run this on CPU. It is not recommended.")
+ ct.logging.warning("Only crazy people run this on CPU. It is not recommended.")
try:
tokenizer = AutoTokenizer.from_pretrained(
model_id
) # model_id is usually the name of the tokenizer.
except Exception as e:
- bt.logging.error(f"Failed to load tokenizer from model_id: {model_id}.")
+ ct.logging.error(f"Failed to load tokenizer from model_id: {model_id}.")
raise e
streamer = CustomTextIteratorStreamer(tokenizer=tokenizer)
@@ -194,7 +195,7 @@ def stream(
messages = self.messages + [{"content": message, "role": role}]
prompt = self._make_prompt(messages)
- bt.logging.debug("Starting LLM streaming process...")
+ ct.logging.debug("Starting LLM streaming process...")
streamer = CustomTextIteratorStreamer(tokenizer=self.llm_pipeline.tokenizer)
_ = self.llm_pipeline(prompt, streamer=streamer, **self.model_kwargs)
@@ -217,7 +218,7 @@ def forward(self, messages: List[Dict[str, str]]):
response = response.replace(composed_prompt, "").strip()
- bt.logging.info(
+ ct.logging.info(
f"{self.__class__.__name__} generated the following output:\n{response}"
)
return response
diff --git a/prompting/llms/utils.py b/prompting/llms/utils.py
index 8de32ba8..19dfa664 100644
--- a/prompting/llms/utils.py
+++ b/prompting/llms/utils.py
@@ -1,6 +1,6 @@
import re
import torch
-import bittensor as bt
+import cybertensor as ct
def contains_gpu_index_in_device(device: str) -> bool:
@@ -18,8 +18,8 @@ def calculate_single_gpu_requirements(device: str, max_allowed_memory_allocation
device=device_with_gpu_index
)
- bt.logging.info(f"Available free memory: {round(global_free / 10e8, 2)} GB")
- bt.logging.info(f"Total gpu memory {round(total_gpu_memory / 10e8, 2)} GB")
+ ct.logging.info(f"Available free memory: {round(global_free / 10e8, 2)} GB")
+ ct.logging.info(f"Total gpu memory {round(total_gpu_memory / 10e8, 2)} GB")
if global_free < max_allowed_memory_allocation_in_bytes:
raise torch.cuda.CudaError(
@@ -27,7 +27,7 @@ def calculate_single_gpu_requirements(device: str, max_allowed_memory_allocation
)
gpu_utilization = round(max_allowed_memory_allocation_in_bytes / global_free, 2)
- bt.logging.info(
+ ct.logging.info(
f'{gpu_utilization * 100}% of the GPU memory will be utilized for loading the model to device "{device}".'
)
@@ -44,8 +44,8 @@ def calculate_multiple_gpu_requirements(device: str, gpus: int, max_allowed_memo
total_free_memory += global_free
total_gpu_memory += total_memory
- bt.logging.info(f"Total available free memory across all visible {gpus} GPUs: {round(total_free_memory / 10e8, 2)} GB")
- bt.logging.info(f"Total GPU memory across all visible GPUs: {gpus} {round(total_gpu_memory / 10e8, 2)} GB")
+ ct.logging.info(f"Total available free memory across all visible {gpus} GPUs: {round(total_free_memory / 10e8, 2)} GB")
+ ct.logging.info(f"Total GPU memory across all visible GPUs: {gpus} {round(total_gpu_memory / 10e8, 2)} GB")
if total_free_memory < max_allowed_memory_allocation_in_bytes:
raise torch.cuda.CudaError(
@@ -53,7 +53,7 @@ def calculate_multiple_gpu_requirements(device: str, gpus: int, max_allowed_memo
)
gpu_utilization = round(max_allowed_memory_allocation_in_bytes / total_free_memory, 2)
- bt.logging.info(
+ ct.logging.info(
f"{gpu_utilization * 100}% of the total GPU memory across all GPUs will be utilized for loading the model."
)
diff --git a/prompting/llms/vllm_llm.py b/prompting/llms/vllm_llm.py
index 382982d4..f69e6fe1 100644
--- a/prompting/llms/vllm_llm.py
+++ b/prompting/llms/vllm_llm.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -17,7 +18,7 @@
import gc
import time
import torch
-import bittensor as bt
+import cybertensor as ct
from typing import List, Dict
from vllm import LLM, SamplingParams
from prompting.cleaners.cleaner import CleanerPipeline
@@ -60,13 +61,12 @@ def load_vllm_pipeline(model_id: str, device: str, gpus: int, max_allowed_memory
llm.llm_engine.tokenizer.eos_token_id = 128009
return llm
except Exception as e:
- bt.logging.error(
+ ct.logging.error(
f"Error loading the VLLM pipeline within {max_allowed_memory_in_gb}GB: {e}"
)
raise e
-
class vLLMPipeline(BasePipeline):
def __init__(self, model_id: str, llm_max_allowed_memory_in_gb: int, device: str = None, gpus: int = 1, mock: bool = False):
super().__init__()
@@ -154,7 +154,7 @@ def forward(self, messages: List[Dict[str, str]]):
composed_prompt = self._make_prompt(messages)
response = self.llm_pipeline(composed_prompt, **self.model_kwargs)
- bt.logging.info(
+ ct.logging.info(
f"{self.__class__.__name__} generated the following output:\n{response}"
)
diff --git a/prompting/miners/echo.py b/prompting/miners/echo.py
index b27c8205..1aa9e1f3 100644
--- a/prompting/miners/echo.py
+++ b/prompting/miners/echo.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -18,7 +19,7 @@
from functools import partial
from starlette.types import Send
-# Bittensor Miner Template:
+# Cybertensor Miner Template:
from prompting.protocol import StreamPromptingSynapse
# import base miner class which takes care of most of the boilerplate
diff --git a/prompting/miners/hf_miner.py b/prompting/miners/hf_miner.py
index 021361d9..ebf6b875 100644
--- a/prompting/miners/hf_miner.py
+++ b/prompting/miners/hf_miner.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -17,12 +18,12 @@
import time
import torch
import argparse
-import bittensor as bt
+import cybertensor as ct
from functools import partial
from starlette.types import Send
from typing import Awaitable
-# Bittensor Miner Template:
+# Cybertensor Miner Template:
from prompting.protocol import StreamPromptingSynapse
from prompting.llms import HuggingFaceLLM, HuggingFacePipeline, load_hf_pipeline
@@ -36,7 +37,7 @@ class HuggingFaceMiner(BaseStreamPromptingMiner):
This requires a GPU with at least 20GB of memory.
To run this miner from the project root directory:
- python neurons/miners/huggingface/miner.py --wallet.name --wallet.hotkey --neuron.model_id --subtensor.network --netuid --axon.port --axon.external_port --logging.debug True --neuron.model_id HuggingFaceH4/zephyr-7b-beta --neuron.system_prompt "Hello, I am a chatbot. I am here to help you with your questions." --neuron.max_tokens 64 --neuron.do_sample True --neuron.temperature 0.9 --neuron.top_k 50 --neuron.top_p 0.95 --wandb.on True --wandb.entity sn1 --wandb.project_name miners_experiments
+ python neurons/miners/huggingface/miner.py --wallet.name --wallet.hotkey --neuron.model_id --cwtensor.network --netuid --axon.port --axon.external_port --logging.debug True --neuron.model_id HuggingFaceH4/zephyr-7b-beta --neuron.system_prompt "Hello, I am a chatbot. I am here to help you with your questions." --neuron.max_tokens 64 --neuron.do_sample True --neuron.temperature 0.9 --neuron.top_k 50 --neuron.top_p 0.95 --wandb.on True --wandb.entity sn1 --wandb.project_name miners_experiments
"""
@classmethod
@@ -51,14 +52,14 @@ def __init__(self, config=None):
model_kwargs = None
if self.config.neuron.load_in_8bit:
- bt.logging.info("Loading 8 bit quantized model...")
+ ct.logging.info("Loading 8 bit quantized model...")
model_kwargs = dict(
torch_dtype=torch.float16,
load_in_8bit=True,
)
if self.config.neuron.load_in_4bit:
- bt.logging.info("Loading 4 bit quantized model...")
+ ct.logging.info("Loading 4 bit quantized model...")
model_kwargs = dict(
torch_dtype=torch.float32,
load_in_4bit=True,
@@ -104,14 +105,14 @@ async def _forward(
timeout_threshold (float): The amount of time that the forward call is allowed to run. If timeout is reached, streaming stops and
validators recieve a partial response.
streamer (CustomTextIteratorStreamer): Iterator that holds tokens within a background Queue to be returned when sampled.
- send (Send): bittensor aiohttp send function to send the response back to the validator.
+ send (Send): cybertensor aiohttp send function to send the response back to the validator.
"""
buffer = []
temp_completion = "" # for wandb logging
timeout_reached = False
system_message = ""
- bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
+ ct.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
try:
streamer = HuggingFaceLLM(
@@ -124,7 +125,7 @@ async def _forward(
top_p=self.config.neuron.top_p,
).stream(message=prompt)
- bt.logging.debug("Starting streaming loop...")
+ ct.logging.debug("Starting streaming loop...")
synapse_message = synapse.messages[-1]
for token in streamer:
system_message += token
@@ -134,7 +135,7 @@ async def _forward(
if synapse_message in system_message:
# Cleans system message and challenge from model response
- bt.logging.warning(
+ ct.logging.warning(
f"Discarding initial system_prompt / user prompt inputs from generation..."
)
buffer = []
@@ -142,14 +143,14 @@ async def _forward(
continue
if time.time() - init_time > timeout_threshold:
- bt.logging.debug(f"⏰ Timeout reached, stopping streaming")
+ ct.logging.debug(f"⏰ Timeout reached, stopping streaming")
timeout_reached = True
break
if len(buffer) == self.config.neuron.streaming_batch_size:
joined_buffer = "".join(buffer)
temp_completion += joined_buffer
- # bt.logging.debug(f"Streamed tokens: {joined_buffer}")
+ # ct.logging.debug(f"Streamed tokens: {joined_buffer}")
await send(
{
@@ -165,7 +166,7 @@ async def _forward(
): # Don't send the last buffer of data if timeout.
joined_buffer = "".join(buffer)
temp_completion += joined_buffer
- # bt.logging.debug(f"Streamed tokens: {joined_buffer}")
+ # ct.logging.debug(f"Streamed tokens: {joined_buffer}")
await send(
{
@@ -176,20 +177,20 @@ async def _forward(
)
except Exception as e:
- bt.logging.error(f"Error in forward: {e}")
+ ct.logging.error(f"Error in forward: {e}")
if self.config.neuron.stop_on_forward_exception:
self.should_exit = True
finally:
# _ = task.result() # wait for thread to finish
- bt.logging.debug("Finishing streaming loop...")
- bt.logging.debug("-" * 50)
- bt.logging.debug(f"---->>> Received message:")
- bt.logging.debug(synapse.messages[0])
- bt.logging.debug("-" * 50)
- bt.logging.debug(f"<<<----- Returned message:")
- bt.logging.debug(temp_completion)
- bt.logging.debug("-" * 50)
+ ct.logging.debug("Finishing streaming loop...")
+ ct.logging.debug("-" * 50)
+ ct.logging.debug(f"---->>> Received message:")
+ ct.logging.debug(synapse.messages[0])
+ ct.logging.debug("-" * 50)
+ ct.logging.debug(f"<<<----- Returned message:")
+ ct.logging.debug(temp_completion)
+ ct.logging.debug("-" * 50)
synapse_latency = time.time() - init_time
@@ -201,7 +202,7 @@ async def _forward(
system_prompt=self.system_prompt,
)
- # bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
+ # ct.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
prompt = synapse.messages[-1]
init_time = time.time()
diff --git a/prompting/miners/mock.py b/prompting/miners/mock.py
index 6839245c..7a02411b 100644
--- a/prompting/miners/mock.py
+++ b/prompting/miners/mock.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -18,7 +19,7 @@
from functools import partial
from starlette.types import Send
-# Bittensor Miner Template:
+# Cybertensor Miner Template:
from prompting.protocol import StreamPromptingSynapse
# import base miner class which takes care of most of the boilerplate
diff --git a/prompting/miners/openai_miner.py b/prompting/miners/openai_miner.py
index 73bf1718..2763bb14 100644
--- a/prompting/miners/openai_miner.py
+++ b/prompting/miners/openai_miner.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -17,13 +18,13 @@
import time
import os
-import bittensor as bt
+import cybertensor as ct
import argparse
from starlette.types import Send
from functools import partial
from typing import Dict, Awaitable
-# Bittensor Miner Template:
+# Cybertensor Miner Template:
from prompting.base.prompting_miner import BaseStreamPromptingMiner
from prompting.protocol import StreamPromptingSynapse
@@ -54,7 +55,7 @@ def add_args(cls, parser: argparse.ArgumentParser):
def __init__(self, config=None):
super().__init__(config=config)
- bt.logging.info(f"Initializing with model {self.config.neuron.model_id}...")
+ ct.logging.info(f"Initializing with model {self.config.neuron.model_id}...")
if self.config.wandb.on:
self.identity_tags = ("openai_miner",) + (self.config.neuron.model_id,)
@@ -96,14 +97,14 @@ async def _forward(
buffer.append(token)
if time.time() - init_time > timeout_threshold:
- bt.logging.debug(f"⏰ Timeout reached, stopping streaming")
+ ct.logging.debug(f"⏰ Timeout reached, stopping streaming")
timeout_reached = True
break
if len(buffer) == self.config.neuron.streaming_batch_size:
joined_buffer = "".join(buffer)
temp_completion += joined_buffer
- bt.logging.debug(f"Streamed tokens: {joined_buffer}")
+ ct.logging.debug(f"Streamed tokens: {joined_buffer}")
await send(
{
@@ -127,7 +128,7 @@ async def _forward(
)
except Exception as e:
- bt.logging.error(f"Error in forward: {e}")
+ ct.logging.error(f"Error in forward: {e}")
if self.config.neuron.stop_on_forward_exception:
self.should_exit = True
@@ -141,7 +142,7 @@ async def _forward(
system_prompt=self.system_prompt,
)
- bt.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
+ ct.logging.debug(f"📧 Message received, forwarding synapse: {synapse}")
prompt = ChatPromptTemplate.from_messages(
[("system", self.system_prompt), ("user", "{input}")]
diff --git a/prompting/miners/phrase.py b/prompting/miners/phrase.py
index 5062fa64..9e1f3495 100644
--- a/prompting/miners/phrase.py
+++ b/prompting/miners/phrase.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -16,11 +17,11 @@
# DEALINGS IN THE SOFTWARE.
import typing
import argparse
-import bittensor as bt
+import cybertensor as ct
from functools import partial
from starlette.types import Send
-# Bittensor Miner Template:
+# Cybertensor Miner Template:
from prompting.protocol import StreamPromptingSynapse
# import base miner class which takes care of most of the boilerplate
diff --git a/prompting/miners/utils.py b/prompting/miners/utils.py
index 3c30b62e..739ea78b 100644
--- a/prompting/miners/utils.py
+++ b/prompting/miners/utils.py
@@ -1,4 +1,4 @@
-import bittensor as bt
+import cybertensor as ct
class OpenAIUtils:
@@ -6,10 +6,10 @@ def __init__(self) -> None:
pass
def get_cost_logging(self, cb):
- bt.logging.info(f"Total Tokens: {cb.total_tokens}")
- bt.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
- bt.logging.info(f"Completion Tokens: {cb.completion_tokens}")
- bt.logging.info(f"Total Cost (USD): ${round(cb.total_cost,4)}")
+ ct.logging.info(f"Total Tokens: {cb.total_tokens}")
+ ct.logging.info(f"Prompt Tokens: {cb.prompt_tokens}")
+ ct.logging.info(f"Completion Tokens: {cb.completion_tokens}")
+ ct.logging.info(f"Total Cost (USD): ${round(cb.total_cost,4)}")
self.accumulated_total_tokens += cb.total_tokens
self.accumulated_prompt_tokens += cb.prompt_tokens
diff --git a/prompting/mock.py b/prompting/mock.py
index e6058ddf..e6b071e4 100644
--- a/prompting/mock.py
+++ b/prompting/mock.py
@@ -2,7 +2,7 @@
import torch
import asyncio
import random
-import bittensor as bt
+import cybertensor as ct
from prompting.protocol import StreamPromptingSynapse, PromptingSynapse
from functools import partial
@@ -75,10 +75,10 @@ def preprocess(self, **kwargs):
pass
-class MockSubtensor(bt.MockSubtensor):
+class MockCwtensor(ct.MockCwtensor):
def __init__(self, netuid, n=16, wallet=None):
super().__init__()
- # reset the underlying subtensor state
+ # reset the underlying cwtensor state
self.chain_state = None
self.setup()
@@ -89,8 +89,8 @@ def __init__(self, netuid, n=16, wallet=None):
if wallet is not None:
self.force_register_neuron(
netuid=netuid,
- hotkey=wallet.hotkey.ss58_address,
- coldkey=wallet.coldkey.ss58_address,
+ hotkey=wallet.hotkey.address,
+ coldkey=wallet.coldkey.address,
balance=100000,
stake=100000,
)
@@ -106,15 +106,15 @@ def __init__(self, netuid, n=16, wallet=None):
)
-class MockMetagraph(bt.metagraph):
+class MockMetagraph(ct.metagraph):
DEFAULT_IP = "127.0.0.0"
DEFAULT_PORT = 8091
- def __init__(self, subtensor, netuid=1, network="mock"):
+ def __init__(self, cwtensor, netuid=1, network="mock"):
super().__init__(netuid=netuid, network=network, sync=False)
- self.subtensor = subtensor
- self.sync(subtensor=self.subtensor)
+ self.cwtensor = cwtensor
+ self.sync(cwtensor=self.cwtensor)
for axon in self.axons:
axon.ip = self.DEFAULT_IP
@@ -178,7 +178,7 @@ def _forward(self, prompt: str, start_time: float, sample: Any):
yield buffer, continue_streaming
except Exception as e:
- bt.logging.error(f"Error in forward: {e}")
+ ct.logging.error(f"Error in forward: {e}")
prompt = synapse.messages[-1]
token_streamer = partial(_forward, self, prompt, start_time)
@@ -186,9 +186,9 @@ def _forward(self, prompt: str, start_time: float, sample: Any):
return token_streamer
-class MockDendrite(bt.dendrite):
+class MockDendrite(ct.dendrite):
"""
- Replaces a real bittensor network request with a mock request that just returns some static
+ Replaces a real cybertensor network request with a mock request that just returns some static
completion for all axons that are passed and adds some random delay.
"""
@@ -201,10 +201,10 @@ def __init__(self, wallet):
async def call(
self,
i: int,
- synapse: bt.Synapse = bt.Synapse(),
+ synapse: ct.Synapse = ct.Synapse(),
timeout: float = 12.0,
deserialize: bool = True,
- ) -> bt.Synapse:
+ ) -> ct.Synapse:
"""Simulated call method to fill synapses with mock data."""
process_time = random.random() * (self.MAX_TIME - self.MIN_TIME) + self.MIN_TIME
@@ -236,7 +236,7 @@ async def call_stream(
"""
Yields:
object: Each yielded object contains a chunk of the arbitrary response data from the Axon.
- bittensor.Synapse: After the AsyncGenerator has been exhausted, yields the final filled Synapse.
+ cybertensor.Synapse: After the AsyncGenerator has been exhausted, yields the final filled Synapse.
Communications delay is simulated in the MockStreamMiner.forward method. Therefore, we can
compute the process_time directly here.
@@ -281,8 +281,8 @@ async def call_stream(
async def forward(
self,
- axons: List[bt.axon],
- synapse: bt.Synapse = bt.Synapse(),
+ axons: List[ct.axon],
+ synapse: ct.Synapse = ct.Synapse(),
timeout: float = 12,
deserialize: bool = True,
run_async: bool = True,
@@ -301,7 +301,7 @@ async def query_all_axons(is_stream: bool):
"""Queries all axons for responses."""
async def single_axon_response(
- i: int, target_axon: Union[bt.AxonInfo, bt.axon]
+ i: int, target_axon: Union[ct.AxonInfo, ct.axon]
):
"""Queries a single axon for a response."""
@@ -309,7 +309,7 @@ async def single_axon_response(
target_axon = (
target_axon.info()
- if isinstance(target_axon, bt.axon)
+ if isinstance(target_axon, ct.axon)
else target_axon
)
@@ -355,4 +355,4 @@ def __str__(self) -> str:
Returns:
str: The string representation of the Dendrite object in the format "dendrite()".
"""
- return "MockDendrite({})".format(self.keypair.ss58_address)
+ return "MockDendrite({})".format(self.keypair.address)
diff --git a/prompting/protocol.py b/prompting/protocol.py
index 1367a4f4..fb4c9c58 100644
--- a/prompting/protocol.py
+++ b/prompting/protocol.py
@@ -1,5 +1,6 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -16,7 +17,7 @@
# DEALINGS IN THE SOFTWARE.
import pydantic
-import bittensor as bt
+import cybertensor as ct
from typing import List, AsyncIterator
from starlette.responses import StreamingResponse
@@ -24,7 +25,7 @@
import pdb
-class PromptingSynapse(bt.Synapse):
+class PromptingSynapse(ct.Synapse):
"""
The PromptingSynapse subclass of the Synapse class encapsulates the functionalities related to prompting scenarios.
@@ -128,10 +129,10 @@ def deserialize(self) -> "PromptingSynapse":
)
-class StreamPromptingSynapse(bt.StreamingSynapse):
+class StreamPromptingSynapse(ct.StreamingSynapse):
"""
StreamPromptingSynapse is a specialized implementation of the `StreamingSynapse` tailored for prompting functionalities within
- the Bittensor network. This class is intended to interact with a streaming response that contains a sequence of tokens,
+ the Cybertensor network. This class is intended to interact with a streaming response that contains a sequence of tokens,
which represent prompts or messages in a certain scenario.
As a developer, when using or extending the `StreamPromptingSynapse` class, you should be primarily focused on the structure
@@ -196,7 +197,7 @@ async def process_streaming_response(
) -> AsyncIterator[str]:
"""
`process_streaming_response` is an asynchronous method designed to process the incoming streaming response from the
- Bittensor network. It's the heart of the StreamPromptingSynapse class, ensuring that streaming tokens, which represent
+ Cybertensor network. It's the heart of the StreamPromptingSynapse class, ensuring that streaming tokens, which represent
prompts or messages, are decoded and appropriately managed.
As the streaming response is consumed, the tokens are decoded from their 'utf-8' encoded format, split based on
@@ -234,7 +235,7 @@ def extract_response_json(self, response: StreamingResponse) -> dict:
Beyond just extracting the JSON data, the method also processes and structures the data for easier consumption
and understanding. For instance, it extracts specific headers related to dendrite and axon, offering insights
- about the Bittensor network's internal processes. The method ultimately returns a dictionary with a structured
+ about the Cybertensor network's internal processes. The method ultimately returns a dictionary with a structured
view of the extracted data.
Args:
diff --git a/prompting/rewards/date.py b/prompting/rewards/date.py
index 9fcb5bc1..e5f769e5 100644
--- a/prompting/rewards/date.py
+++ b/prompting/rewards/date.py
@@ -5,7 +5,7 @@
import numpy as np
from typing import List
from prompting.rewards import BaseRewardModel, BatchRewardOutput, RewardModelTypeEnum
-import bittensor as bt
+import cybertensor as ct
class DateRewardModel(BaseRewardModel):
diff --git a/prompting/rewards/reward.py b/prompting/rewards/reward.py
index bf5d2bc0..469c5b38 100644
--- a/prompting/rewards/reward.py
+++ b/prompting/rewards/reward.py
@@ -1,6 +1,6 @@
import torch
import time
-import bittensor as bt
+import cybertensor as ct
from typing import List
from abc import ABC, abstractmethod
from dataclasses import dataclass
@@ -135,10 +135,14 @@ def __post_init__(self):
f"rewards.shape {self.rewards.shape} != timings.shape {self.timings.shape}"
)
- self.rewards_normalized = (self.rewards - self.rewards.min()) / (
- self.rewards.max() - self.rewards.min() + 1e-6
- )
-
+ try:
+ self.rewards_normalized = (self.rewards - self.rewards.min()) / (
+ self.rewards.max() - self.rewards.min() + 1e-6
+ )
+ except RuntimeError as e:
+ # TODO added during debugging, remove later after testing
+ print(f"An error occurred at rewards_normalized: {e}")
+ self.rewards_normalized = 0.0
class BaseRewardModel(ABC):
@property
diff --git a/prompting/tasks/debugging.py b/prompting/tasks/debugging.py
index 1e20e9ec..e7862045 100644
--- a/prompting/tasks/debugging.py
+++ b/prompting/tasks/debugging.py
@@ -1,5 +1,5 @@
import random
-import bittensor as bt
+import cybertensor as ct
from dataclasses import dataclass
from prompting.tasks import Task
import difflib
@@ -48,7 +48,7 @@ def remove(code, n, sep=" ", min_length=1, max_length=10):
],
n,
)
- bt.logging.info(
+ ct.logging.info(
f"Removing the following {len(indices)} chunks: {[chunks[i] for i in indices]} at indices {indices}"
)
@@ -68,7 +68,7 @@ def swap(code, sep=" ", min_length=1, max_length=10):
2,
)
- bt.logging.info(
+ ct.logging.info(
f"Swapping chunk {chunks[indices[0]]!r} at index {indices[0]} with chunk {chunks[indices[1]]!r} at index {indices[1]}"
)
diff --git a/prompting/tasks/math.py b/prompting/tasks/math.py
index c1abf208..902342d3 100644
--- a/prompting/tasks/math.py
+++ b/prompting/tasks/math.py
@@ -1,5 +1,5 @@
import sys
-import bittensor as bt
+import cybertensor as ct
from dataclasses import dataclass
from prompting.tasks import Task
from .challenge_templates import MathChallengeTemplate
diff --git a/prompting/tasks/qa.py b/prompting/tasks/qa.py
index 2fa29308..43243c8e 100644
--- a/prompting/tasks/qa.py
+++ b/prompting/tasks/qa.py
@@ -1,4 +1,4 @@
-import bittensor as bt
+import cybertensor as ct
from dataclasses import dataclass
from prompting.tasks import Task
@@ -86,7 +86,7 @@ def __init__(self, llm_pipeline, context, create_reference=True, history=None):
self.query_system_prompt = QUERY_SYSTEM_PROMPT
if history:
self.query_prompt = FOLLOWUP_PROMPT_TEMPLATE.format(context=context.content, history=history)
- bt.logging.warning(f'Using history!!\n{history=}\n\n{context=}\n\n{self.query_prompt=}')
+ ct.logging.warning(f'Using history!!\n{history=}\n\n{context=}\n\n{self.query_prompt=}')
else:
self.query_prompt = QUERY_PROMPT_TEMPLATE.format(context=context.content)
diff --git a/prompting/tasks/task.py b/prompting/tasks/task.py
index 041798d3..51c20b37 100644
--- a/prompting/tasks/task.py
+++ b/prompting/tasks/task.py
@@ -1,5 +1,5 @@
import time
-import bittensor as bt
+import cybertensor as ct
from abc import ABC
from dataclasses import dataclass, asdict
from enum import Enum
@@ -17,6 +17,8 @@
def make_system_prompt():
return CHATTENSOR_SYSTEM_PROMPT.format(date=time.strftime("%B %d, %Y"))
+
+
class TaskEvaluationType(Enum):
REWARD_STACK = "reward"
FILTER_STACK = "filter"
@@ -94,7 +96,7 @@ def generate_reference(self, pipeline: BasePipeline, clean=True) -> str:
if not self.static_reference:
if not self.clean_reference:
clean = False
- bt.logging.info("🤖 Generating reference...")
+ ct.logging.info("🤖 Generating reference...")
self.reference = self.generate(
system=make_system_prompt(),
prompt=self.reference_prompt,
@@ -109,7 +111,7 @@ def generate_query(self, pipeline: BasePipeline, clean=True) -> str:
"""Generates a query to be used for generating the challenge"""
t0 = time.time()
if not self.static_query:
- bt.logging.info("🤖 Generating query...")
+ ct.logging.info("🤖 Generating query...")
self.query = self.generate(
system=self.query_system_prompt, #Could possibly add the chattensor system prompt to query but I don't think it adds anything
prompt=self.query_prompt,
diff --git a/prompting/tasks/translate.py b/prompting/tasks/translate.py
index 17c79c4f..e3cfb810 100644
--- a/prompting/tasks/translate.py
+++ b/prompting/tasks/translate.py
@@ -1,5 +1,5 @@
import tqdm
-import bittensor as bt
+import cybertensor as ct
import argostranslate.package
import argostranslate.translate
import random
@@ -33,18 +33,18 @@ def is_package_installed(from_code, to_code, packages):
if pkg.from_code in SUPPORTED_LANGUAGES and pkg.to_code in SUPPORTED_LANGUAGES
]
- bt.logging.info(f"Supported language pairs: {supported_language_pairs}")
+ ct.logging.info(f"Supported language pairs: {supported_language_pairs}")
# Check for installed packages
pbar = tqdm.tqdm(supported_language_pairs, desc="Checking installed packages")
for package in pbar:
if not is_package_installed(package.from_code, package.to_code, installed_packages):
- bt.logging.info(f"Installing package from {package.from_code} to {package.to_code}")
+ ct.logging.info(f"Installing package from {package.from_code} to {package.to_code}")
package_path = str(package.download())
argostranslate.package.install_from_path(package_path)
- bt.logging.success(f'Package successfully installed at {package_path}')
+ ct.logging.success(f'Package successfully installed at {package_path}')
else:
- bt.logging.info(f"Package from {package.from_code} to {package.to_code} is already installed, skipping...")
+ ct.logging.info(f"Package from {package.from_code} to {package.to_code} is already installed, skipping...")
return supported_language_pairs
diff --git a/prompting/tools/datasets/base.py b/prompting/tools/datasets/base.py
index ee453560..4cdba9d3 100644
--- a/prompting/tools/datasets/base.py
+++ b/prompting/tools/datasets/base.py
@@ -1,6 +1,7 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
# Copyright © 2023 Opentensor Foundation
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -21,7 +22,7 @@
import functools
from abc import ABC, abstractmethod
from typing import Dict
-import bittensor as bt
+import cybertensor as ct
from ..selector import Selector
from prompting.shared.context import Context
@@ -66,7 +67,7 @@ def next(
if info:
break
- bt.logging.debug(
+ ct.logging.debug(
f"Could not find any samples which meet {self.__class__.__name__} requirements after {tries} tries. Retrying... ({self.max_tries - tries} tries remaining.)"
)
diff --git a/prompting/tools/datasets/code.py b/prompting/tools/datasets/code.py
index bdc2948b..da04076f 100644
--- a/prompting/tools/datasets/code.py
+++ b/prompting/tools/datasets/code.py
@@ -1,6 +1,7 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
# Copyright © 2023 Opentensor Foundation
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -22,7 +23,7 @@
import requests
import itertools
-import bittensor as bt
+import cybertensor as ct
from bs4 import BeautifulSoup
from .base import Dataset
@@ -671,7 +672,7 @@ def get_stack_answer(self, question):
response_answers.raise_for_status()
answers = response_answers.json()["items"]
if not answers:
- bt.logging.warning("No answers found for the question!")
+ ct.logging.warning("No answers found for the question!")
highest_voted_answer = answers[0] # The first answer is the highest voted
soup = BeautifulSoup(highest_voted_answer["body"], "html.parser")
@@ -679,7 +680,7 @@ def get_stack_answer(self, question):
return full_content
def next(self):
- bt.logging.debug("Retrieving data from prompting.dataset...")
+ ct.logging.debug("Retrieving data from prompting.dataset...")
t0 = time.time()
info = self.get_stack_question()
info["fetch_time"] = time.time() - t0
diff --git a/prompting/tools/datasets/math.py b/prompting/tools/datasets/math.py
index b0dfc7ef..e570083f 100644
--- a/prompting/tools/datasets/math.py
+++ b/prompting/tools/datasets/math.py
@@ -1,6 +1,7 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
# Copyright © 2023 Opentensor Foundation
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -20,7 +21,7 @@
import random
import itertools
import mathgenerator
-import bittensor as bt
+import cybertensor as ct
from sympy.parsing.latex import parse_latex
from typing import Dict, Union, List, Tuple
@@ -56,7 +57,7 @@ def get(
Returns:
Dict: _description_
"""
- bt.logging.info(f"Getting math problem {name!r}")
+ ct.logging.info(f"Getting math problem {name!r}")
max_tries = 10
for _ in range(max_tries):
info = mathgenerator.generate_context(name, **kwargs)
diff --git a/prompting/tools/datasets/review.py b/prompting/tools/datasets/review.py
index cfadbcf8..12570978 100644
--- a/prompting/tools/datasets/review.py
+++ b/prompting/tools/datasets/review.py
@@ -1,7 +1,7 @@
import random
import functools
-import bittensor as bt
+import cybertensor as ct
from typing import Dict, Union, List, Tuple
from .base import TemplateDataset
diff --git a/prompting/tools/datasets/wiki.py b/prompting/tools/datasets/wiki.py
index a125630a..aa3aae4e 100644
--- a/prompting/tools/datasets/wiki.py
+++ b/prompting/tools/datasets/wiki.py
@@ -1,6 +1,7 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
# Copyright © 2023 Opentensor Foundation
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -20,7 +21,7 @@
import sys
import random
import datetime
-import bittensor as bt
+import cybertensor as ct
import wikipedia as wiki
from typing import Dict, Union, List, Tuple
from queue import Queue, Full, Empty
@@ -53,7 +54,7 @@ def _get_page(
return page
except wiki.DisambiguationError as e:
- bt.logging.debug(f"{e.__class__.__name__} loading page {title!r}: {e}")
+ ct.logging.debug(f"{e.__class__.__name__} loading page {title!r}: {e}")
# exc info contains a tuple of (requested_title: str, possible_matches: List[str])
pages = sys.exc_info()[1].args[1]
if not type(pages) == list:
@@ -62,7 +63,7 @@ def _get_page(
return _get_page(title, auto_suggest=auto_suggest, redirect=redirect)
except wiki.PageError as e:
- bt.logging.warning(f"{e.__class__.__name__} loading page {title!r}: {e}")
+ ct.logging.warning(f"{e.__class__.__name__} loading page {title!r}: {e}")
if not auto_suggest:
return _get_page(title, auto_suggest=True, redirect=redirect)
return None
@@ -113,7 +114,7 @@ def process_page(
sections[key] = content.splitlines()
if not sections:
- bt.logging.debug(f"No valid sections found in page {page.title!r} ({page.url})")
+ ct.logging.debug(f"No valid sections found in page {page.title!r} ({page.url})")
return sections
@@ -230,7 +231,7 @@ def get(
try:
CACHED_ARTICLES.put(context, block=False)
except Full:
- bt.logging.debug("Cache is full. Skipping article until cache is emptied.")
+ ct.logging.debug("Cache is full. Skipping article until cache is emptied.")
return context
def search(self, name, results=3, selector: Selector = None) -> Dict:
@@ -306,7 +307,7 @@ def _random_date(self) -> str:
return context
except Empty:
- bt.logging.debug("Cache is empty. Skipping date until cache is filled.")
+ ct.logging.debug("Cache is empty. Skipping date until cache is filled.")
return None
def get(
diff --git a/prompting/utils/config.py b/prompting/utils/config.py
index 2f4c148f..7c754bd9 100644
--- a/prompting/utils/config.py
+++ b/prompting/utils/config.py
@@ -1,6 +1,7 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
# Copyright © 2023 Opentensor Foundation
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -19,25 +20,25 @@
import os
import torch
import argparse
-import bittensor as bt
+import cybertensor as ct
from loguru import logger
from prompting.tasks import TASKS
-def check_config(cls, config: "bt.Config"):
+def check_config(cls, config: "ct.Config"):
r"""Checks/validates the config namespace object."""
- bt.logging.check_config(config)
+ ct.logging.check_config(config)
full_path = os.path.expanduser(
"{}/{}/{}/netuid{}/{}".format(
- config.logging.logging_dir, # TODO: change from ~/.bittensor/miners to ~/.bittensor/neurons
+ config.logging.logging_dir, # TODO: change from ~/.cybertensor/miners to ~/.cybertensor/neurons
config.wallet.name,
config.wallet.hotkey,
config.netuid,
config.neuron.name,
)
)
- bt.logging.info(f"Logging path: {full_path}")
+ ct.logging.info(f"Logging path: {full_path}")
config.neuron.full_path = os.path.expanduser(full_path)
if not os.path.exists(config.neuron.full_path):
os.makedirs(config.neuron.full_path, exist_ok=True)
@@ -83,7 +84,7 @@ def add_args(cls, parser):
"--neuron.llm_max_allowed_memory_in_gb",
type=int,
help="The max gpu memory utilization set for initializing the model. This parameter currently reflects on the property `gpu_memory_utilization` of vllm",
- default=60,
+ default=8,
)
parser.add_argument(
@@ -307,7 +308,7 @@ def add_validator_args(cls, parser):
"--neuron.timeout",
type=float,
help="The timeout for each forward call in seconds.",
- default=10,
+ default=60,
)
parser.add_argument(
@@ -328,7 +329,8 @@ def add_validator_args(cls, parser):
"--neuron.sample_size",
type=int,
help="The number of miners to query in a single step.",
- default=50,
+ # default=50,
+ default=10,
)
parser.add_argument(
@@ -363,10 +365,10 @@ def add_validator_args(cls, parser):
)
parser.add_argument(
- "--neuron.vpermit_tao_limit",
+ "--neuron.vpermit_limit",
type=int,
- help="The maximum number of TAO allowed to query a validator with a vpermit.",
- default=4096,
+ help="The maximum number of token allowed to query a validator with a vpermit.",
+ default=100000000000000,
)
parser.add_argument(
@@ -410,9 +412,9 @@ def config(cls):
Returns the configuration object specific to this miner or validator after adding relevant arguments.
"""
parser = argparse.ArgumentParser()
- bt.wallet.add_args(parser)
- bt.subtensor.add_args(parser)
- bt.logging.add_args(parser)
- bt.axon.add_args(parser)
+ ct.Wallet.add_args(parser)
+ ct.cwtensor.add_args(parser)
+ ct.logging.add_args(parser)
+ ct.axon.add_args(parser)
cls.add_args(parser)
- return bt.config(parser)
+ return ct.Config(parser)
diff --git a/prompting/utils/logging.py b/prompting/utils/logging.py
index 381d68b6..95f5c55a 100644
--- a/prompting/utils/logging.py
+++ b/prompting/utils/logging.py
@@ -2,7 +2,7 @@
import os
import copy
import wandb
-import bittensor as bt
+import cybertensor as ct
from dataclasses import asdict, dataclass
from datetime import datetime
from typing import List
@@ -27,7 +27,7 @@ class Log:
def export_logs(logs: List[Log]):
- bt.logging.info("📝 Exporting logs...")
+ ct.logging.info("📝 Exporting logs...")
# Create logs folder if it doesn't exist
if not os.path.exists("logs"):
@@ -62,7 +62,7 @@ def should_reinit_wandb(self):
def init_wandb(self, reinit=False):
"""Starts a new wandb run."""
tags = [
- self.wallet.hotkey.ss58_address,
+ self.wallet.hotkey.address,
prompting.__version__,
str(prompting.__spec_version__),
f"netuid_{self.metagraph.netuid}",
@@ -92,7 +92,7 @@ def init_wandb(self, reinit=False):
tags=tags,
notes=self.config.wandb.notes,
)
- bt.logging.success(
+ ct.logging.success(
prefix="Started a new wandb run",
sufix=f" {self.wandb.name} ",
)
diff --git a/prompting/utils/misc.py b/prompting/utils/misc.py
index fbfbf06a..d15e4be9 100644
--- a/prompting/utils/misc.py
+++ b/prompting/utils/misc.py
@@ -1,6 +1,7 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
# Copyright © 2023 Opentensor Foundation
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -19,7 +20,7 @@
import time
import asyncio
import traceback
-import bittensor as bt
+import cybertensor as ct
from math import floor
from typing import Callable, Any
from functools import lru_cache, update_wrapper
@@ -110,7 +111,7 @@ def ttl_get_block(self) -> int:
Note: self here is the miner or validator instance
"""
- return self.subtensor.get_current_block()
+ return self.cwtensor.get_current_block()
def async_log(func):
@@ -118,14 +119,14 @@ async def wrapper(*args, **kwargs):
start_time = time.time()
task_id = id(asyncio.current_task())
func_name = func.__name__
- bt.logging.debug(f"Starting {func_name} on task {task_id} at {start_time}")
+ ct.logging.debug(f"Starting {func_name} on task {task_id} at {start_time}")
# Execute the wrapped function
result = await func(*args, **kwargs)
end_time = time.time()
execution_time = end_time - start_time
- bt.logging.debug(
+ ct.logging.debug(
f"Completed {func_name} on task {task_id} in {execution_time} seconds"
)
diff --git a/prompting/utils/uids.py b/prompting/utils/uids.py
index 15f8acee..dcda3429 100644
--- a/prompting/utils/uids.py
+++ b/prompting/utils/uids.py
@@ -1,21 +1,21 @@
import torch
import random
-import bittensor as bt
+import cybertensor as ct
from typing import List
def check_uid_availability(
- metagraph: "bt.metagraph.Metagraph",
+ metagraph: "ct.metagraph.Metagraph",
uid: int,
- vpermit_tao_limit: int,
+ vpermit_limit: int,
coldkeys: set = None,
ips: set = None,
) -> bool:
- """Check if uid is available. The UID should be available if it is serving and has less than vpermit_tao_limit stake
+ """Check if uid is available. The UID should be available if it is serving and has less than vpermit_limit stake
Args:
- metagraph (:obj: bt.metagraph.Metagraph): Metagraph object
+ metagraph (:obj: ct.metagraph.Metagraph): Metagraph object
uid (int): uid to be checked
- vpermit_tao_limit (int): Validator permit tao limit
+ vpermit_limit (int): Validator permit token limit
coldkeys (set): Set of coldkeys to exclude
ips (set): Set of ips to exclude
Returns:
@@ -23,13 +23,13 @@ def check_uid_availability(
"""
# Filter non serving axons.
if not metagraph.axons[uid].is_serving:
- bt.logging.debug(f"uid: {uid} is not serving")
+ ct.logging.debug(f"uid: {uid} is not serving")
return False
# Filter validator permit > 1024 stake.
- if metagraph.validator_permit[uid] and metagraph.S[uid] > vpermit_tao_limit:
- bt.logging.debug(
- f"uid: {uid} has vpermit and stake ({metagraph.S[uid]}) > {vpermit_tao_limit}"
+ if metagraph.validator_permit[uid] and metagraph.S[uid] > vpermit_limit:
+ ct.logging.debug(
+ f"uid: {uid} has vpermit and stake ({metagraph.S[uid]}) > {vpermit_limit}"
)
return False
@@ -46,6 +46,7 @@ def check_uid_availability(
def get_random_uids(self, k: int, exclude: List[int] = None) -> torch.LongTensor:
"""Returns k available random uids from the metagraph.
Args:
+ self (template.base.neuron.BaseNeuron): Neuron
k (int): Number of uids to return.
exclude (List[int]): List of uids to exclude from the random sampling.
Returns:
@@ -54,38 +55,28 @@ def get_random_uids(self, k: int, exclude: List[int] = None) -> torch.LongTensor
If `k` is larger than the number of available `uids`, set `k` to the number of available `uids`.
"""
candidate_uids = []
- coldkeys = set()
- ips = set()
- for uid in range(self.metagraph.n.item()):
- if uid == self.uid:
- continue
+ avail_uids = []
+ for uid in range(self.metagraph.n.item()):
uid_is_available = check_uid_availability(
- self.metagraph,
- uid,
- self.config.neuron.vpermit_tao_limit,
- coldkeys,
- ips,
+ self.metagraph, uid, self.config.neuron.vpermit_limit
)
- if not uid_is_available:
- continue
-
- if self.config.neuron.query_unique_coldkeys:
- coldkeys.add(self.metagraph.axons[uid].coldkey)
+ uid_is_not_excluded = exclude is None or uid not in exclude
- if self.config.neuron.query_unique_ips:
- ips.add(self.metagraph.axons[uid].ip)
+ if uid_is_available:
+ avail_uids.append(uid)
+ if uid_is_not_excluded:
+ candidate_uids.append(uid)
- if exclude is None or uid not in exclude:
- candidate_uids.append(uid)
+ # Check if candidate_uids contain enough for querying, if not grab all available uids
+ available_uids = candidate_uids
+ # If k is larger than the number of available uids, set k to the number of available uids.
+ k = min(k, len(available_uids))
- # Check if candidate_uids contain enough for querying, if not grab all avaliable uids
- if 0 < len(candidate_uids) < k:
- bt.logging.warning(
- f"Requested {k} uids but only {len(candidate_uids)} were available. To disable this warning reduce the sample size (--neuron.sample_size)"
+ if len(candidate_uids) < k:
+ available_uids += random.sample(
+ [uid for uid in avail_uids if uid not in candidate_uids],
+ k - len(candidate_uids),
)
- return torch.tensor(candidate_uids)
- elif len(candidate_uids) >= k:
- return torch.tensor(random.sample(candidate_uids, k))
- else:
- raise ValueError(f"No eligible uids were found. Cannot return {k} uids")
+ uids = torch.tensor(random.sample(available_uids, k))
+ return uids
diff --git a/prompting/validator.py b/prompting/validator.py
index 030bbd44..fd593696 100644
--- a/prompting/validator.py
+++ b/prompting/validator.py
@@ -1,4 +1,4 @@
-import bittensor as bt
+import cybertensor as ct
from prompting.forward import forward
from prompting.llms import vLLMPipeline
from prompting.base.validator import BaseValidatorNeuron
@@ -14,7 +14,7 @@ class Validator(BaseValidatorNeuron):
def __init__(self, config=None):
super(Validator, self).__init__(config=config)
- bt.logging.info("load_state()")
+ ct.logging.info("load_state()")
self.load_state()
self.llm_pipeline = vLLMPipeline(
@@ -53,7 +53,7 @@ async def forward(self):
def __enter__(self):
if self.config.no_background_thread:
- bt.logging.warning("Running validator in main thread.")
+ ct.logging.warning("Running validator in main thread.")
self.run()
else:
self.run_in_background_thread()
@@ -74,8 +74,8 @@ def __exit__(self, exc_type, exc_value, traceback):
None if the context was exited without an exception.
"""
if self.is_running:
- bt.logging.debug("Stopping validator in background thread.")
+ ct.logging.debug("Stopping validator in background thread.")
self.should_exit = True
self.thread.join(5)
self.is_running = False
- bt.logging.debug("Stopped")
+ ct.logging.debug("Stopped")
diff --git a/requirements.txt b/requirements.txt
index 9a91e8df..ae52cf83 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,27 +1,32 @@
-angle_emb
-bittensor==6.10.1
-bs4
+angle_emb==0.4.4
+cybertensor==0.2.4
+bs4==0.0.2
click==8.1.3
datasets==2.14.6
deprecation==2.1.0
torch==2.1.1
torchmetrics
-transformers==4.36.2
+transformers==4.41.2
+simpletransformers==0.70.1
pre-commit==3.3.2
git+https://github.com/synapse-alpha/mathgenerator.git@main#egg=mathgenerator
-numpy==1.22.0
-rouge
+numpy==1.23.5
+rouge==1.0.1
scipy==1.10.1
sentencepiece
wandb==0.15.10
tenacity
antlr4-python3-runtime==4.11
-wikipedia
+wikipedia==1.4.0
openai==1.9.0
-langchain==0.1.5
+langchain==0.1.14
langchainhub==0.1.14
python-dotenv
-wikipedia_sections
+wikipedia_sections==2.0.0
vllm
-loguru
-argostranslate
\ No newline at end of file
+loguru==0.7.0
+argostranslate==1.9.6
+anyio==4.4.0
+aiohttp==3.9.0b0
+aiosignal==1.3.1
+autoawq==0.2.5
\ No newline at end of file
diff --git a/scripts/client.py b/scripts/client.py
index d67a63ff..b072d412 100644
--- a/scripts/client.py
+++ b/scripts/client.py
@@ -1,7 +1,7 @@
import argparse
import asyncio
-import bittensor as bt
+import cybertensor as ct
import time
from typing import List, Awaitable
@@ -33,7 +33,7 @@ async def handle_response(
chunk_start_time = time.time()
async for chunk in resp:
chunk_time = round(time.time() - chunk_start_time, 3)
- bt.logging.info(
+ ct.logging.info(
f"UID: {uids[uid_num]}. chunk {ii}({chunk_time}s) for resp: {chunk} "
)
ii += 1
@@ -41,7 +41,7 @@ async def handle_response(
chunk_times.append(chunk_time)
chunk_start_time = time.time()
- bt.logging.success(
+ ct.logging.success(
f"UID {uids[uid_num]} took {(time.time() - start_time):.3f} seconds\n"
)
@@ -65,15 +65,15 @@ async def query_stream_miner(
)
# create a wallet instance with provided wallet name and hotkey
- wallet = bt.wallet(name=wallet_name, hotkey=hotkey)
+ wallet = ct.Wallet(name=wallet_name, hotkey=hotkey)
# instantiate the metagraph with provided network and netuid
- metagraph = bt.metagraph(netuid=netuid, network=network, sync=True, lite=False)
+ metagraph = ct.metagraph(netuid=netuid, network=network, sync=True, lite=False)
# Create a Dendrite instance to handle client-side communication.
- dendrite = bt.dendrite(wallet=wallet)
+ dendrite = ct.dendrite(wallet=wallet)
- bt.logging.info(f"Synapse: {syn}")
+ ct.logging.info(f"Synapse: {syn}")
async def main():
try:
@@ -90,7 +90,7 @@ async def main():
return await handle_response(uids, responses)
except Exception as e:
- bt.logging.error(f"Exception during query to uids: {uids}: {e}")
+ ct.logging.error(f"Exception during query to uids: {uids}: {e}")
return None
await main()
@@ -98,7 +98,7 @@ async def main():
if __name__ == "__main__":
parser = argparse.ArgumentParser(
- description="Query a Bittensor synapse with given parameters."
+ description="Query a Cybertensor synapse with given parameters."
)
parser.add_argument(
diff --git a/scripts/run.py b/scripts/run.py
index e0a0ea66..c78f02b4 100644
--- a/scripts/run.py
+++ b/scripts/run.py
@@ -98,7 +98,7 @@
# Construct the PM2 start command
command = (
f"pm2 start {neuron['file']} --interpreter python3 --name {neuron['hotkey']}:{neuron['type']} --"
- + f" --wallet.name {coldkey} --wallet.hotkey {neuron['hotkey']} --subtensor.network {network} --netuid {netuid}"
+ + f" --wallet.name {coldkey} --wallet.hotkey {neuron['hotkey']} --cwtensor.network {network} --netuid {netuid}"
+ f" --axon.port {neuron['port']} --logging.debug {neuron.get('config')}"
)
print(command)
diff --git a/scripts/setup_ubuntu_machine.sh b/scripts/setup_ubuntu_machine.sh
index f6ee9c20..57d81234 100644
--- a/scripts/setup_ubuntu_machine.sh
+++ b/scripts/setup_ubuntu_machine.sh
@@ -7,8 +7,8 @@ apt-get install sudo
# Install python3-pip
sudo apt install -y python3-pip
-# Upgrade bittensor
-python3 -m pip install --upgrade bittensor
+# Upgrade cybertensor
+python3 -m pip install --upgrade cybertensor
apt install tree
@@ -18,7 +18,7 @@ sudo apt update && sudo apt install jq && sudo apt install npm && sudo npm insta
# echo 'export OPENAI_API_KEY=YOUR_OPEN_AI_KEY' >> ~/.bashrc
# Clone the repository
-git clone https://github.com/opentensor/prompting.git
+git clone https://github.com/cybercongress/prompting.git
# Change to the prompting directory
cd prompting
diff --git a/setup.py b/setup.py
index 4a80a081..5579897b 100644
--- a/setup.py
+++ b/setup.py
@@ -1,6 +1,7 @@
# The MIT License (MIT)
# Copyright © 2024 Yuma Rao
# Copyright © 2023 Opentensor Foundation
+# Copyright © 2024 cyber~Congress
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
@@ -68,16 +69,16 @@ def read_requirements(path):
setup(
name="prompting",
version=version_string,
- description="SN1: An incentive mechanism for internet-scale conversational intelligence",
+ description="ML Verse SN1: An incentive mechanism for internet-scale conversational intelligence",
long_description=long_description,
long_description_content_type="text/markdown",
- url="https://github.com/opentensor/prompting",
- author="bittensor.com",
+ url="https://github.com/cybercongress/prompting",
+ author="cyberCongress",
packages=find_packages(),
include_package_data=True,
author_email="",
license="MIT",
- python_requires=">=3.8",
+ python_requires=">=3.9.6, <3.12",
install_requires=requirements,
classifiers=[
"Development Status :: 3 - Alpha",
diff --git a/tests/test_mock.py b/tests/test_mock.py
index 12e5dfa6..14051bd0 100644
--- a/tests/test_mock.py
+++ b/tests/test_mock.py
@@ -1,47 +1,47 @@
import pytest
import asyncio
-import bittensor as bt
-from prompting.mock import MockDendrite, MockMetagraph, MockSubtensor
+import cybertensor as ct
+from prompting.mock import MockDendrite, MockMetagraph, MockCwtensor
from prompting.protocol import PromptingSynapse
-wallet = bt.MockWallet()
+wallet = ct.MockWallet()
wallet.create(coldkey_use_password=False)
@pytest.mark.parametrize("netuid", [1, 2, 3])
@pytest.mark.parametrize("n", [2, 4, 8, 16, 32, 64])
@pytest.mark.parametrize("wallet", [wallet, None])
-def test_mock_subtensor(netuid, n, wallet):
- subtensor = MockSubtensor(netuid=netuid, n=n, wallet=wallet)
- neurons = subtensor.neurons(netuid=netuid)
+def test_mock_cwtensor(netuid, n, wallet):
+ cwtensor = MockCwtensor(netuid=netuid, n=n, wallet=wallet)
+ neurons = cwtensor.neurons(netuid=netuid)
# Check netuid
- assert subtensor.subnet_exists(netuid)
+ assert cwtensor.subnet_exists(netuid)
# Check network
- assert subtensor.network == "mock"
- assert subtensor.chain_endpoint == "mock_endpoint"
+ assert cwtensor.network == "mock"
+ assert cwtensor.chain_endpoint == "mock_endpoint"
# Check number of neurons
assert len(neurons) == (n + 1 if wallet is not None else n)
# Check wallet
if wallet is not None:
- assert subtensor.is_hotkey_registered(
- netuid=netuid, hotkey_ss58=wallet.hotkey.ss58_address
+ assert cwtensor.is_hotkey_registered(
+ netuid=netuid, hotkey=wallet.hotkey.address
)
for neuron in neurons:
- assert type(neuron) == bt.NeuronInfo
- assert subtensor.is_hotkey_registered(netuid=netuid, hotkey_ss58=neuron.hotkey)
+ assert type(neuron) == ct.NeuronInfo
+ assert cwtensor.is_hotkey_registered(netuid=netuid, hotkey=neuron.hotkey)
@pytest.mark.parametrize("n", [16, 32, 64])
def test_mock_metagraph(n):
- mock_subtensor = MockSubtensor(netuid=1, n=n)
- mock_metagraph = MockMetagraph(subtensor=mock_subtensor)
+ mock_cwtensor = MockCwtensor(netuid=1, n=n)
+ mock_metagraph = MockMetagraph(cwtensor=mock_cwtensor)
# Check axons
axons = mock_metagraph.axons
assert len(axons) == n
# Check ip and port
for axon in axons:
- assert type(axon) == bt.AxonInfo
+ assert type(axon) == ct.AxonInfo
assert axon.ip == mock_metagraph.DEFAULT_IP
assert axon.port == mock_metagraph.DEFAULT_PORT
@@ -59,12 +59,12 @@ def test_mock_neuron():
@pytest.mark.parametrize("max_time", [0.1, 0.15, 0.2])
@pytest.mark.parametrize("n", [4, 16, 64])
def test_mock_dendrite_timings(timeout, min_time, max_time, n):
- mock_wallet = bt.MockWallet(config=None)
+ mock_wallet = ct.MockWallet(config=None)
mock_dendrite = MockDendrite(mock_wallet)
mock_dendrite.MIN_TIME = min_time
mock_dendrite.MAX_TIME = max_time
- mock_subtensor = MockSubtensor(netuid=1, n=n)
- mock_metagraph = MockMetagraph(subtensor=mock_subtensor)
+ mock_cwtensor = MockCwtensor(netuid=1, n=n)
+ mock_metagraph = MockMetagraph(cwtensor=mock_cwtensor)
axons = mock_metagraph.axons
async def run():
@@ -80,7 +80,7 @@ async def run():
responses = asyncio.run(run())
for synapse in responses:
assert (
- hasattr(synapse, "dendrite") and type(synapse.dendrite) == bt.TerminalInfo
+ hasattr(synapse, "dendrite") and type(synapse.dendrite) == ct.TerminalInfo
)
dendrite = synapse.dendrite
diff --git a/tests/test_streaming.py b/tests/test_streaming.py
index bcee0d75..8bfe2cfb 100644
--- a/tests/test_streaming.py
+++ b/tests/test_streaming.py
@@ -1,9 +1,9 @@
import pytest
-import bittensor as bt
+import cybertensor as ct
import asyncio
from typing import List, AsyncGenerator
-from prompting.mock import MockDendrite, MockMetagraph, MockSubtensor
+from prompting.mock import MockDendrite, MockMetagraph, MockCwtensor
from prompting.protocol import StreamPromptingSynapse
@@ -42,10 +42,10 @@ async def handle_response(responses) -> List[StreamPromptingSynapse]:
def test_mock_streaming(timeout: float):
netuid = 1
- mock_wallet = bt.MockWallet()
+ mock_wallet = ct.MockWallet()
mock_dendrite = MockDendrite(wallet=mock_wallet)
- mock_subtensor = MockSubtensor(netuid=netuid, wallet=mock_wallet)
- mock_metagraph = MockMetagraph(netuid=netuid, subtensor=mock_subtensor)
+ mock_cwtensor = MockCwtensor(netuid=netuid, wallet=mock_wallet)
+ mock_metagraph = MockMetagraph(netuid=netuid, cwtensor=mock_cwtensor)
streaming = True
messages = [