diff --git a/.github/workflows/mor-agents-build-linux.yml b/.github/workflows/mor-agents-build-linux.yml
new file mode 100644
index 0000000..3d7dcac
--- /dev/null
+++ b/.github/workflows/mor-agents-build-linux.yml
@@ -0,0 +1,179 @@
+name: MOR Agents Build Linux
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: ubuntu-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: 'recursive'
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ pip install pyinstaller
+
+ - name: Build with PyInstaller
+ run: |
+ pyinstaller --name="MORagents" --add-data "images/moragents.png:images" main.py
+
+ - name: Create Debian package
+ run: |
+ mkdir -p debian/DEBIAN
+ mkdir -p debian/usr/bin
+ mkdir -p debian/usr/share/applications
+ mkdir -p debian/usr/share/icons/hicolor/256x256/apps
+ cp -r dist/MORagents/* debian/usr/bin/
+ cp images/moragents.png debian/usr/share/icons/hicolor/256x256/apps/moragents.png
+ echo "[Desktop Entry]
+ Name=MORagents
+ Exec=/usr/bin/MORagents
+ Icon=moragents
+ Type=Application
+ Categories=Utility;" > debian/usr/share/applications/moragents.desktop
+ echo "Package: moragents
+ Version: 1.0
+ Section: utils
+ Priority: optional
+ Architecture: amd64
+ Maintainer: LachsBagel
+ Description: MORagents application
+ MORagents is a desktop application for AI agents." > debian/DEBIAN/control
+
+ dpkg-deb --build debian moragents.deb
+
+ - name: Create setup script
+ run: |
+ cat << EOF > moragents-setup.sh
+ #!/bin/bash
+ set -e
+
+ # Colors for output
+ RED='\033[0;31m'
+ GREEN='\033[0;32m'
+ YELLOW='\033[0;33m'
+ NC='\033[0m' # No Color
+
+ # Function to check if a command exists
+ command_exists() {
+ command -v "$1" >/dev/null 2>&1
+ }
+
+ # Function to add current user to docker group
+ add_user_to_docker_group() {
+ local current_user=\$(whoami)
+ if [ "\$current_user" != "root" ]; then
+ echo -e "\${YELLOW}Adding current user to docker group...${NC}"
+ sudo usermod -aG docker "\$current_user"
+ echo -e "\${GREEN}User added to docker group. Please log out and log back in for changes to take effect.${NC}"
+ else
+ echo -e "\${YELLOW}Running as root. Skipping user addition to docker group.${NC}"
+ fi
+ }
+
+ # Function to wait for Ollama service to be ready
+ wait_for_ollama() {
+ echo -e "\${YELLOW}Waiting for Ollama service to be ready...${NC}"
+ for i in {1..30}; do
+ if curl -s http://localhost:11434/api/tags >/dev/null; then
+ echo -e "\${GREEN}Ollama service is ready.${NC}"
+ return 0
+ fi
+ sleep 2
+ done
+ echo -e "\${RED}Timed out waiting for Ollama service.${NC}"
+ return 1
+ }
+
+ # Function to pull Ollama model with retries
+ pull_ollama_model() {
+ local model=\$1
+ local max_attempts=3
+ local attempt=1
+
+ while [ \$attempt -le \$max_attempts ]; do
+ echo -e "\${YELLOW}Pulling Ollama model \$model (Attempt \$attempt)...${NC}"
+ if ollama pull \$model; then
+ echo -e "\${GREEN}Successfully pulled \$model.${NC}"
+ return 0
+ fi
+ echo -e "\${YELLOW}Failed to pull \$model. Retrying...${NC}"
+ sleep 5
+ attempt=\$((attempt + 1))
+ done
+
+ echo -e "\${RED}Failed to pull \$model after \$max_attempts attempts.${NC}"
+ return 1
+ }
+
+ # Install curl if not present
+ if ! command_exists curl; then
+ echo -e "\${YELLOW}Installing curl...${NC}"
+ sudo apt-get update
+ sudo apt-get install -y curl
+ fi
+
+ # Install Docker if not present
+ if ! command_exists docker; then
+ echo -e "\${YELLOW}Installing Docker...${NC}"
+ curl -fsSL https://get.docker.com -o get-docker.sh
+ sudo sh get-docker.sh
+ add_user_to_docker_group
+ sudo systemctl enable docker
+ sudo systemctl start docker
+ else
+ echo -e "\${GREEN}Docker is already installed.${NC}"
+ fi
+
+ # Install Ollama
+ echo -e "\${YELLOW}Installing Ollama...${NC}"
+ curl -fsSL https://ollama.com/install.sh | sh
+
+ # Start Ollama service
+ echo -e "\${YELLOW}Starting Ollama service...${NC}"
+ nohup ollama serve > /dev/null 2>&1 &
+
+ # Wait for Ollama service to be ready
+ wait_for_ollama
+
+ # Pull Ollama models
+ echo -e "\${YELLOW}Pulling Ollama models...${NC}"
+ pull_ollama_model llama3.1
+ pull_ollama_model nomic-embed-text
+
+ # Pull necessary Docker images
+ echo -e "\${YELLOW}Pulling Docker images...${NC}"
+ sudo docker pull lachsbagel/moragents_dockers-nginx:amd64-0.1.0
+ sudo docker pull lachsbagel/moragents_dockers-agents:amd64-0.1.0
+
+ # Start Docker containers
+ echo -e "\${YELLOW}Starting Docker containers...${NC}"
+ sudo docker run -d --name agents -p 8080:5000 --restart always -v /var/lib/agents -v /app/src lachsbagel/moragents_dockers-agents:amd64-0.1.0
+ sudo docker run -d --name nginx -p 3333:80 lachsbagel/moragents_dockers-nginx:amd64-0.1.0
+
+ echo -e "\${GREEN}Setup complete!${NC}"
+ EOF
+
+ chmod +x moragents-setup.sh
+
+ - name: Upload Debian Package and Setup Script
+ uses: actions/upload-artifact@v4
+ with:
+ name: MORagentsSetup-Linux
+ path: |
+ moragents.deb
+ moragents-setup.sh
\ No newline at end of file
diff --git a/.github/workflows/mor-agents-build-mac-arm.yml b/.github/workflows/mor-agents-build-mac-arm.yml
new file mode 100644
index 0000000..0c9ea7f
--- /dev/null
+++ b/.github/workflows/mor-agents-build-mac-arm.yml
@@ -0,0 +1,54 @@
+name: MOR Agents Build macOS ARM
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: macos-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: 'recursive'
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ pip install pyinstaller
+
+ - name: Build with PyInstaller
+ run: |
+ pyinstaller --windowed --name="MORagents" --icon="images/moragents.icns" --osx-entitlements-file "build_assets/macOS/MORagents.entitlements" main.py
+
+ - name: Move .app to expected location
+ run: |
+ mv dist/MORagents.app build_assets/macOS/
+
+ - name: Install Packages app
+ run: |
+ wget http://s.sudre.free.fr/files/Packages_1211_dev.dmg
+ hdiutil attach Packages_1211_dev.dmg
+ sudo installer -pkg /Volumes/Packages\ 1.2.11/packages/Packages.pkg -target /
+ hdiutil detach /Volumes/Packages\ 1.2.11
+
+ - name: Create installer package
+ run: |
+ cd build_assets/macOS
+ /usr/local/bin/packagesbuild --verbose --project MorpheusPackagesSudre.pkgproj
+
+ - name: Upload Installer
+ uses: actions/upload-artifact@v4
+ with:
+ name: MORagentsSetup-macOS
+ path: ./build_assets/macOS/MORAgentsInstaller.pkg
\ No newline at end of file
diff --git a/.github/workflows/mor-agents-build-mac-intel.yml b/.github/workflows/mor-agents-build-mac-intel.yml
new file mode 100644
index 0000000..caa9124
--- /dev/null
+++ b/.github/workflows/mor-agents-build-mac-intel.yml
@@ -0,0 +1,54 @@
+name: MOR Agents Build macOS Intel
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+ branches: [ main ]
+ workflow_dispatch:
+
+jobs:
+ build:
+ runs-on: macos-latest
+
+ steps:
+ - uses: actions/checkout@v4
+ with:
+ submodules: 'recursive'
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: '3.12'
+
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install -r requirements.txt
+ pip install pyinstaller
+
+ - name: Build with PyInstaller
+ run: |
+ pyinstaller --windowed --name="MORagents" --icon="images/moragents.icns" --osx-entitlements-file "build_assets/macOS/MORagents.entitlements" main.py
+
+ - name: Move .app to expected location
+ run: |
+ mv dist/MORagents.app build_assets/macOS/
+
+ - name: Install Packages app
+ run: |
+ wget http://s.sudre.free.fr/files/Packages_1211_dev.dmg
+ hdiutil attach Packages_1211_dev.dmg
+ sudo installer -pkg /Volumes/Packages\ 1.2.11/packages/Packages.pkg -target /
+ hdiutil detach /Volumes/Packages\ 1.2.11
+
+ - name: Create installer package
+ run: |
+ cd build_assets/macOS
+ /usr/local/bin/packagesbuild --verbose --project MorpheusPackagesSudreIntel.pkgproj
+
+ - name: Upload Installer
+ uses: actions/upload-artifact@v4
+ with:
+ name: MORagentsSetup-macOS
+ path: ./build_assets/macOS/MORAgentsInstaller.pkg
\ No newline at end of file
diff --git a/README.md b/README.md
index a346d73..f4cd8ed 100644
--- a/README.md
+++ b/README.md
@@ -3,64 +3,71 @@
## Morpheus Install for Local Web3 Agent Interaction
-![UI 1](images/moragents_chatpdf.png)
+![UI 1](images/tweet_sizzler.png)
![UI 2](images/wallet_integration.png)
-![UI 3](images/successful_swap.png)
+![UI 3](images/mor_rewards.png)
-![UI 4](images/agent_clarify.png)
+![UI 4](images/price_fetcher.png)
+
+![UI 5](images/moragents_chatpdf.jpg)
---
### Features
-- Chat with local PDF files
-- Swap ERC Compatible Tokens
-- Fetch Price, Market Cap, and TVL of coins and tokens supported on CoinGecko
-- Web interface works in your preferred browser:
- - Chrome
- - Brave
+#### Upload a PDF with paperclip icon, then ask questions about the PDF 📄
+ - "Can you give me a summary?"
+ - "What's the main point of the document?"
+#### Swap ERC Compatible Tokens 🪙 -> 🪙
+ - "Swap 0.01 ETH for USDC"
+#### Fetch Price, Market Cap, and TVL of coins and tokens supported on CoinGecko 📈
+ - "What's the price of ETH?"
+ - "What's the market cap of BTC?"
+#### Check MOR rewards 🏆
+ - "How many MOR rewards do I have?"
+#### Write Sizzling Tweets 🌶️ No Content Moderation 😅
+ - "Write a based tweet about Crypto and AI"
- with your favorite wallet extensions:
- - MetaMask
- - Rainbow
- - Coinbase Wallet
- - WalletConnect
+**Works with your favorite wallet extensions in your existing browser**
---
## Install
-### macOS on M1/2/3 etc. (arm64)
+### macOS
>Assumes minimum 16GB RAM
#### Steps to Install
-1. Download and install [Docker Desktop](https://www.docker.com/products/docker-desktop/)
- 1. Follow default settings, can skip surveys, then leave docker desktop running. You can minimize it.
-2. Download and install [MORagents009.pkg](https://drive.proton.me/urls/762Z6QFNH4#68MKubcGeDtf)
- > SHA256 5200350bba351a40cfac5552476bad1bb67d32ff069a4d9ebc0b3556367673b7 MORagents009.pkg
-3. Wait several minutes for background files to download and then your browser should automatically open to http://localhost:3333
- > Note: After installation is complete, the MORagents app icon will bounce for several minutes on your dock, and then stop. This is normal behavior as it's downloading a large 9GB file in the background. You can open "Activity Monitor" and in the Network tab see that it's downloading.
+1. Download Installer
+ 1. For Mac on Apple Silicon M1/2/3 etc. (arm64)
+ 1. Download and run MORagents installer [MORagents010-apple.pkg](https://drive.proton.me/urls/G9JZYZ508R#gmDk0i6UFLSG)
+ > SHA256 a4846c83ced5d35740f884a144cf518764edfc17b576b808cd77a8fe2da6ebf2 MORagents010-apple.pkg
+ 2. For Mac on Intel (x86_64)
+ 1. Download and install [Docker Desktop](https://desktop.docker.com/mac/main/amd64/Docker.dmg)
+ 2. Download and run MORagents installer [MORagents010-intel.pkg](https://drive.proton.me/urls/HPFMSN40GM#Pa90tgOzYn9g)
+ > SHA256 46b0e927aaca27cf08d9a95b73d01bc07a92cb5a8b05cf69faaf71566712a781 MORagents010-intel.pkg
+2. Wait several minutes for background files to download and then your browser should automatically open to http://localhost:3333
+ > Note: After installation is complete, the MORagents app icon will bounce for several minutes on your dock, and then stop. This is normal behavior as it's downloading a couple large 9GB files in the background. You can open "Activity Monitor" and in the Network tab see that it's downloading.
#### Future Usage
- Open the "MORagents" app from Mac search bar.
- For easier access: Right-click MORagents icon on dock -> Options -> Keep in Dock
#### Troubleshooting
-If the app shows connections errors in connecting to agents. Please ensure Docker Desktop is running, then close and reopen **MORagents** from desktop.
-
-
-### macOS Intel (x86_64)
-*coming soon*
-
+- If the app shows connections errors in connecting to agents. Please ensure Docker Desktop is running, then close and reopen **MORagents** from desktop.
+- If installation is unsuccessful, run the following in your Terminal and open the MORagents....pkg again
+ ```shell
+ $ xcode-select --install
+ ```
---
### Windows (x86_64)
>Assumes minimum 16GB RAM
#### Steps
-1. Use Chrome to download [MORagentsSetupWindows009.zip](https://drive.proton.me/urls/8X58WAH80G#ib5r3K4WalDA)
- > SHA256 6b8bd78571df2f5e8c6e516102aa05b1121d0214fdfb75a2be16146c22e0d2c52 MORagentsSetupWindows009.zip
-2. Go to downloaded **MORagentsSetupWindows009(.zip)** file and double click to open
+1. Download [MORagentsSetupWindows010.zip](https://drive.proton.me/urls/QXRZR77AJ0#U0ZRbd2rDbXT)
+ > SHA256 0ca1879d3f103938a49852d2d2f82a36bc0ebc44ed94400fcee3b883e2cbb2f6 MORagentsSetupWindows010.zip
+2. Go to downloaded **MORagentsSetupWindows010(.zip)** file and double click to open
3. Double click **MORagentsSetup.exe**
1. You may need to click "More info" -> "Run anyway"
2. If that still doesn't work, try temporarily disabling your antivirus and open the .exe again
@@ -79,6 +86,13 @@ If the app shows connections errors in connecting to agents. Please ensure Docke
*Coming soon*
+---
+# Adding a New Agent
+
+See [Agents README](submodules/moragents_dockers/README.md) section: "Steps to Add a New Agent".
+
+This will allow you to add custom agents which will be automatically invoked based on relevant user queries.
+
---
### Build it Yourself
diff --git a/build_assets/linux/install_moragents.sh b/build_assets/linux/install_moragents.sh
new file mode 100644
index 0000000..6aa8e87
--- /dev/null
+++ b/build_assets/linux/install_moragents.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+
+# Colors for output
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;33m'
+NC='\033[0m' # No Color
+
+# Check if script is run as root
+if [ "$EUID" -ne 0 ]; then
+ echo -e "${RED}Please run as root${NC}"
+ exit 1
+fi
+
+echo -e "${GREEN}Welcome to the MORagents installer for Linux${NC}"
+
+# Install the .deb package
+echo -e "${YELLOW}Installing MORagents...${NC}"
+dpkg -i moragents.deb
+apt-get install -f -y
+
+# Run the setup script
+echo -e "${YELLOW}Running MORagents setup...${NC}"
+./moragents-setup.sh
+
+echo -e "${GREEN}Installation complete!${NC}"
+echo "You can now start MORagents from your application menu or by running 'MORagents' in the terminal."
+echo -e "${YELLOW}NOTE: Please log out and log back in for Docker group changes to take effect.${NC}"
\ No newline at end of file
diff --git a/build_assets/macOS/MorpheusPackagesSudre.pkgproj b/build_assets/macOS/MorpheusPackagesSudre.pkgproj
new file mode 100644
index 0000000..439f41b
--- /dev/null
+++ b/build_assets/macOS/MorpheusPackagesSudre.pkgproj
@@ -0,0 +1,1932 @@
+
+
+
+
+ PACKAGES
+
+
+ MUST-CLOSE-APPLICATION-ITEMS
+
+ MUST-CLOSE-APPLICATIONS
+
+ PACKAGE_FILES
+
+ DEFAULT_INSTALL_LOCATION
+ /
+ HIERARCHY
+
+ CHILDREN
+
+
+ CHILDREN
+
+
+ BUNDLE_CAN_DOWNGRADE
+
+ BUNDLE_POSTINSTALL_PATH
+
+ PATH_TYPE
+ 1
+
+ BUNDLE_PREINSTALL_PATH
+
+ PATH_TYPE
+ 1
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ MORagents.app
+ PATH_TYPE
+ 1
+ PERMISSIONS
+ 493
+ TYPE
+ 3
+ UID
+ 0
+
+
+ GID
+ 80
+ PATH
+ Applications
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 509
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Application Support
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Automator
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Documentation
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Extensions
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Filesystems
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Frameworks
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Input Methods
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Internet Plug-Ins
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Keyboard Layouts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchAgents
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchDaemons
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PreferencePanes
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Preferences
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Printers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PrivilegedHelperTools
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1005
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickLook
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickTime
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Screen Savers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Scripts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Services
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Widgets
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ Library
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Shared
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1023
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 80
+ PATH
+ Users
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ /
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+ PAYLOAD_TYPE
+ 0
+ PRESERVE_EXTENDED_ATTRIBUTES
+
+ SHOW_INVISIBLE
+
+ SPLIT_FORKS
+
+ TREAT_MISSING_FILES_AS_WARNING
+
+ VERSION
+ 5
+
+ PACKAGE_SCRIPTS
+
+ POSTINSTALL_PATH
+
+ PATH
+ postinstall.sh
+ PATH_TYPE
+ 1
+
+ PREINSTALL_PATH
+
+ PATH
+ preinstall.sh
+ PATH_TYPE
+ 1
+
+ RESOURCES
+
+
+ PACKAGE_SETTINGS
+
+ AUTHENTICATION
+ 1
+ CONCLUSION_ACTION
+ 0
+ FOLLOW_SYMBOLIC_LINKS
+
+ IDENTIFIER
+ com.morpheus.pkg.MORAgents
+ LOCATION
+ 0
+ NAME
+ MORAgents
+ OVERWRITE_PERMISSIONS
+
+ PAYLOAD_SIZE
+ -1
+ REFERENCE_PATH
+
+ RELOCATABLE
+
+ USE_HFS+_COMPRESSION
+
+ VERSION
+ 1.0
+
+ TYPE
+ 0
+ UUID
+ 2954F3A7-88A3-4C44-9062-BD8EA2D9DB60
+
+
+ MUST-CLOSE-APPLICATION-ITEMS
+
+ MUST-CLOSE-APPLICATIONS
+
+ PACKAGE_FILES
+
+ DEFAULT_INSTALL_LOCATION
+ /
+ HIERARCHY
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Applications
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 509
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Application Support
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Automator
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Documentation
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Extensions
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Filesystems
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Frameworks
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Input Methods
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Internet Plug-Ins
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Keyboard Layouts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchAgents
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchDaemons
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PreferencePanes
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Preferences
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Printers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PrivilegedHelperTools
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1005
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickLook
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickTime
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Screen Savers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Scripts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Services
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Widgets
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ Library
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Shared
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1023
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 80
+ PATH
+ Users
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ /
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+ PAYLOAD_TYPE
+ 0
+ PRESERVE_EXTENDED_ATTRIBUTES
+
+ SHOW_INVISIBLE
+
+ SPLIT_FORKS
+
+ TREAT_MISSING_FILES_AS_WARNING
+
+ VERSION
+ 5
+
+ PACKAGE_SCRIPTS
+
+ POSTINSTALL_PATH
+
+ PATH_TYPE
+ 1
+
+ PREINSTALL_PATH
+
+ PATH
+ preinstall_docker.sh
+ PATH_TYPE
+ 1
+
+ RESOURCES
+
+
+ PACKAGE_SETTINGS
+
+ AUTHENTICATION
+ 1
+ CONCLUSION_ACTION
+ 0
+ FOLLOW_SYMBOLIC_LINKS
+
+ IDENTIFIER
+ com.morpheus.pkg.Docker
+ LOCATION
+ 0
+ NAME
+ Docker
+ OVERWRITE_PERMISSIONS
+
+ PAYLOAD_SIZE
+ -1
+ REFERENCE_PATH
+
+ RELOCATABLE
+
+ USE_HFS+_COMPRESSION
+
+ VERSION
+ 1.0
+
+ TYPE
+ 0
+ UUID
+ 17D68BA0-D319-4CB2-9C33-5FBF7CD96392
+
+
+ MUST-CLOSE-APPLICATION-ITEMS
+
+ MUST-CLOSE-APPLICATIONS
+
+ PACKAGE_FILES
+
+ DEFAULT_INSTALL_LOCATION
+ /
+ HIERARCHY
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Applications
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 509
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Application Support
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Automator
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Documentation
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Extensions
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Filesystems
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Frameworks
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Input Methods
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Internet Plug-Ins
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Keyboard Layouts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchAgents
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchDaemons
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PreferencePanes
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Preferences
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Printers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PrivilegedHelperTools
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1005
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickLook
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickTime
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Screen Savers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Scripts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Services
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Widgets
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ Library
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Shared
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1023
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 80
+ PATH
+ Users
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ /
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+ PAYLOAD_TYPE
+ 0
+ PRESERVE_EXTENDED_ATTRIBUTES
+
+ SHOW_INVISIBLE
+
+ SPLIT_FORKS
+
+ TREAT_MISSING_FILES_AS_WARNING
+
+ VERSION
+ 5
+
+ PACKAGE_SCRIPTS
+
+ POSTINSTALL_PATH
+
+ PATH_TYPE
+ 1
+
+ PREINSTALL_PATH
+
+ PATH
+ preinstall_ollama.sh
+ PATH_TYPE
+ 1
+
+ RESOURCES
+
+
+ PACKAGE_SETTINGS
+
+ AUTHENTICATION
+ 1
+ CONCLUSION_ACTION
+ 0
+ FOLLOW_SYMBOLIC_LINKS
+
+ IDENTIFIER
+ com.morpheus.pkg.OllamaBrew
+ LOCATION
+ 0
+ NAME
+ OllamaBrew
+ OVERWRITE_PERMISSIONS
+
+ PAYLOAD_SIZE
+ -1
+ REFERENCE_PATH
+
+ RELOCATABLE
+
+ USE_HFS+_COMPRESSION
+
+ VERSION
+ 1.0
+
+ TYPE
+ 0
+ UUID
+ A6B4C36F-B0FC-417F-9B18-59E66B8D78DC
+
+
+ PROJECT
+
+ PROJECT_COMMENTS
+
+ NOTES
+
+
+
+ PROJECT_PRESENTATION
+
+ BACKGROUND
+
+ APPAREANCES
+
+ DARK_AQUA
+
+ LIGHT_AQUA
+
+
+ SHARED_SETTINGS_FOR_ALL_APPAREANCES
+
+
+ INSTALLATION TYPE
+
+ HIERARCHIES
+
+ INSTALLER
+
+ LIST
+
+
+ CHILDREN
+
+ DESCRIPTION
+
+ OPTIONS
+
+ HIDDEN
+
+ STATE
+ 1
+
+ PACKAGE_UUID
+ 2954F3A7-88A3-4C44-9062-BD8EA2D9DB60
+ TITLE
+
+ TYPE
+ 0
+ UUID
+ F1F121E0-7C16-4710-B85B-C10E4F7715DC
+
+
+ CHILDREN
+
+ DESCRIPTION
+
+ OPTIONS
+
+ HIDDEN
+
+ STATE
+ 1
+
+ PACKAGE_UUID
+ 17D68BA0-D319-4CB2-9C33-5FBF7CD96392
+ TITLE
+
+ TYPE
+ 0
+ UUID
+ 4C046B91-0499-4942-A1F5-C9517C6FFFA0
+
+
+ CHILDREN
+
+ DESCRIPTION
+
+ OPTIONS
+
+ HIDDEN
+
+ STATE
+ 1
+
+ PACKAGE_UUID
+ A6B4C36F-B0FC-417F-9B18-59E66B8D78DC
+ TITLE
+
+ TYPE
+ 0
+ UUID
+ 7120666F-EAF2-49DF-A382-C69590CD0905
+
+
+ REMOVED
+
+
+
+ MODE
+ 0
+
+ INSTALLATION_STEPS
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewIntroductionController
+ INSTALLER_PLUGIN
+ Introduction
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewReadMeController
+ INSTALLER_PLUGIN
+ ReadMe
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewLicenseController
+ INSTALLER_PLUGIN
+ License
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewDestinationSelectController
+ INSTALLER_PLUGIN
+ TargetSelect
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewInstallationTypeController
+ INSTALLER_PLUGIN
+ PackageSelection
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewInstallationController
+ INSTALLER_PLUGIN
+ Install
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewSummaryController
+ INSTALLER_PLUGIN
+ Summary
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ INTRODUCTION
+
+ LOCALIZATIONS
+
+
+ LANGUAGE
+ English
+ VALUE
+
+ PATH
+ welcome.html
+ PATH_TYPE
+ 1
+
+
+
+
+ LICENSE
+
+ LOCALIZATIONS
+
+
+ LANGUAGE
+ English
+ VALUE
+
+ PATH
+ license.html
+ PATH_TYPE
+ 1
+
+
+
+ MODE
+ 0
+
+ README
+
+ LOCALIZATIONS
+
+
+ SUMMARY
+
+ LOCALIZATIONS
+
+
+ TITLE
+
+ LOCALIZATIONS
+
+
+ LANGUAGE
+ English
+ VALUE
+ MORAgents
+
+
+
+
+ PROJECT_REQUIREMENTS
+
+ LIST
+
+ RESOURCES
+
+ ROOT_VOLUME_ONLY
+
+
+ PROJECT_SETTINGS
+
+ BUILD_FORMAT
+ 0
+ BUILD_PATH
+
+ PATH
+ .
+ PATH_TYPE
+ 1
+
+ EXCLUDED_FILES
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .DS_Store
+ TYPE
+ 0
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Remove .DS_Store files
+ PROXY_TOOLTIP
+ Remove ".DS_Store" files created by the Finder.
+ STATE
+
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .pbdevelopment
+ TYPE
+ 0
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Remove .pbdevelopment files
+ PROXY_TOOLTIP
+ Remove ".pbdevelopment" files created by ProjectBuilder or Xcode.
+ STATE
+
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ CVS
+ TYPE
+ 1
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .cvsignore
+ TYPE
+ 0
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .cvspass
+ TYPE
+ 0
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .svn
+ TYPE
+ 1
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .git
+ TYPE
+ 1
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .gitignore
+ TYPE
+ 0
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Remove SCM metadata
+ PROXY_TOOLTIP
+ Remove helper files and folders used by the CVS, SVN or Git Source Code Management systems.
+ STATE
+
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ classes.nib
+ TYPE
+ 0
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ designable.db
+ TYPE
+ 0
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ info.nib
+ TYPE
+ 0
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Optimize nib files
+ PROXY_TOOLTIP
+ Remove "classes.nib", "info.nib" and "designable.nib" files within .nib bundles.
+ STATE
+
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ Resources Disabled
+ TYPE
+ 1
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Remove Resources Disabled folders
+ PROXY_TOOLTIP
+ Remove "Resources Disabled" folders.
+ STATE
+
+
+
+ SEPARATOR
+
+
+
+ NAME
+ MORAgentsInstaller
+ PAYLOAD_ONLY
+
+ TREAT_MISSING_PRESENTATION_DOCUMENTS_AS_WARNING
+
+
+
+ TYPE
+ 0
+ VERSION
+ 2
+
+
diff --git a/build_assets/macOS/MorpheusPackagesSudreIntel.pkgproj b/build_assets/macOS/MorpheusPackagesSudreIntel.pkgproj
new file mode 100644
index 0000000..12e7ead
--- /dev/null
+++ b/build_assets/macOS/MorpheusPackagesSudreIntel.pkgproj
@@ -0,0 +1,1932 @@
+
+
+
+
+ PACKAGES
+
+
+ MUST-CLOSE-APPLICATION-ITEMS
+
+ MUST-CLOSE-APPLICATIONS
+
+ PACKAGE_FILES
+
+ DEFAULT_INSTALL_LOCATION
+ /
+ HIERARCHY
+
+ CHILDREN
+
+
+ CHILDREN
+
+
+ BUNDLE_CAN_DOWNGRADE
+
+ BUNDLE_POSTINSTALL_PATH
+
+ PATH_TYPE
+ 1
+
+ BUNDLE_PREINSTALL_PATH
+
+ PATH_TYPE
+ 1
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ MORagents.app
+ PATH_TYPE
+ 1
+ PERMISSIONS
+ 493
+ TYPE
+ 3
+ UID
+ 0
+
+
+ GID
+ 80
+ PATH
+ Applications
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 509
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Application Support
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Automator
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Documentation
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Extensions
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Filesystems
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Frameworks
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Input Methods
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Internet Plug-Ins
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Keyboard Layouts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchAgents
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchDaemons
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PreferencePanes
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Preferences
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Printers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PrivilegedHelperTools
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1005
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickLook
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickTime
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Screen Savers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Scripts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Services
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Widgets
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ Library
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Shared
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1023
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 80
+ PATH
+ Users
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ /
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+ PAYLOAD_TYPE
+ 0
+ PRESERVE_EXTENDED_ATTRIBUTES
+
+ SHOW_INVISIBLE
+
+ SPLIT_FORKS
+
+ TREAT_MISSING_FILES_AS_WARNING
+
+ VERSION
+ 5
+
+ PACKAGE_SCRIPTS
+
+ POSTINSTALL_PATH
+
+ PATH
+ postinstall_intel.sh
+ PATH_TYPE
+ 1
+
+ PREINSTALL_PATH
+
+ PATH
+ preinstall.sh
+ PATH_TYPE
+ 1
+
+ RESOURCES
+
+
+ PACKAGE_SETTINGS
+
+ AUTHENTICATION
+ 1
+ CONCLUSION_ACTION
+ 0
+ FOLLOW_SYMBOLIC_LINKS
+
+ IDENTIFIER
+ com.morpheus.pkg.MORAgents
+ LOCATION
+ 0
+ NAME
+ MORAgents
+ OVERWRITE_PERMISSIONS
+
+ PAYLOAD_SIZE
+ -1
+ REFERENCE_PATH
+
+ RELOCATABLE
+
+ USE_HFS+_COMPRESSION
+
+ VERSION
+ 1.0
+
+ TYPE
+ 0
+ UUID
+ 2954F3A7-88A3-4C44-9062-BD8EA2D9DB60
+
+
+ MUST-CLOSE-APPLICATION-ITEMS
+
+ MUST-CLOSE-APPLICATIONS
+
+ PACKAGE_FILES
+
+ DEFAULT_INSTALL_LOCATION
+ /
+ HIERARCHY
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Applications
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 509
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Application Support
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Automator
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Documentation
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Extensions
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Filesystems
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Frameworks
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Input Methods
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Internet Plug-Ins
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Keyboard Layouts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchAgents
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchDaemons
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PreferencePanes
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Preferences
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Printers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PrivilegedHelperTools
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1005
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickLook
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickTime
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Screen Savers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Scripts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Services
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Widgets
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ Library
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Shared
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1023
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 80
+ PATH
+ Users
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ /
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+ PAYLOAD_TYPE
+ 0
+ PRESERVE_EXTENDED_ATTRIBUTES
+
+ SHOW_INVISIBLE
+
+ SPLIT_FORKS
+
+ TREAT_MISSING_FILES_AS_WARNING
+
+ VERSION
+ 5
+
+ PACKAGE_SCRIPTS
+
+ POSTINSTALL_PATH
+
+ PATH_TYPE
+ 1
+
+ PREINSTALL_PATH
+
+ PATH
+ preinstall_docker_intel.sh
+ PATH_TYPE
+ 1
+
+ RESOURCES
+
+
+ PACKAGE_SETTINGS
+
+ AUTHENTICATION
+ 1
+ CONCLUSION_ACTION
+ 0
+ FOLLOW_SYMBOLIC_LINKS
+
+ IDENTIFIER
+ com.morpheus.pkg.Docker
+ LOCATION
+ 0
+ NAME
+ Docker
+ OVERWRITE_PERMISSIONS
+
+ PAYLOAD_SIZE
+ -1
+ REFERENCE_PATH
+
+ RELOCATABLE
+
+ USE_HFS+_COMPRESSION
+
+ VERSION
+ 1.0
+
+ TYPE
+ 0
+ UUID
+ 17D68BA0-D319-4CB2-9C33-5FBF7CD96392
+
+
+ MUST-CLOSE-APPLICATION-ITEMS
+
+ MUST-CLOSE-APPLICATIONS
+
+ PACKAGE_FILES
+
+ DEFAULT_INSTALL_LOCATION
+ /
+ HIERARCHY
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Applications
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 509
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Application Support
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Automator
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Documentation
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Extensions
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Filesystems
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Frameworks
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Input Methods
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Internet Plug-Ins
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Keyboard Layouts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchAgents
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ LaunchDaemons
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PreferencePanes
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Preferences
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 80
+ PATH
+ Printers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ PrivilegedHelperTools
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1005
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickLook
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ QuickTime
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Screen Savers
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Scripts
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Services
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Widgets
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ Library
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ CHILDREN
+
+
+ CHILDREN
+
+ GID
+ 0
+ PATH
+ Shared
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 1023
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 80
+ PATH
+ Users
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+
+ GID
+ 0
+ PATH
+ /
+ PATH_TYPE
+ 0
+ PERMISSIONS
+ 493
+ TYPE
+ 1
+ UID
+ 0
+
+ PAYLOAD_TYPE
+ 0
+ PRESERVE_EXTENDED_ATTRIBUTES
+
+ SHOW_INVISIBLE
+
+ SPLIT_FORKS
+
+ TREAT_MISSING_FILES_AS_WARNING
+
+ VERSION
+ 5
+
+ PACKAGE_SCRIPTS
+
+ POSTINSTALL_PATH
+
+ PATH_TYPE
+ 1
+
+ PREINSTALL_PATH
+
+ PATH
+ preinstall_ollama.sh
+ PATH_TYPE
+ 1
+
+ RESOURCES
+
+
+ PACKAGE_SETTINGS
+
+ AUTHENTICATION
+ 1
+ CONCLUSION_ACTION
+ 0
+ FOLLOW_SYMBOLIC_LINKS
+
+ IDENTIFIER
+ com.morpheus.pkg.OllamaBrew
+ LOCATION
+ 0
+ NAME
+ OllamaBrew
+ OVERWRITE_PERMISSIONS
+
+ PAYLOAD_SIZE
+ -1
+ REFERENCE_PATH
+
+ RELOCATABLE
+
+ USE_HFS+_COMPRESSION
+
+ VERSION
+ 1.0
+
+ TYPE
+ 0
+ UUID
+ A6B4C36F-B0FC-417F-9B18-59E66B8D78DC
+
+
+ PROJECT
+
+ PROJECT_COMMENTS
+
+ NOTES
+
+
+
+ PROJECT_PRESENTATION
+
+ BACKGROUND
+
+ APPAREANCES
+
+ DARK_AQUA
+
+ LIGHT_AQUA
+
+
+ SHARED_SETTINGS_FOR_ALL_APPAREANCES
+
+
+ INSTALLATION TYPE
+
+ HIERARCHIES
+
+ INSTALLER
+
+ LIST
+
+
+ CHILDREN
+
+ DESCRIPTION
+
+ OPTIONS
+
+ HIDDEN
+
+ STATE
+ 1
+
+ PACKAGE_UUID
+ 2954F3A7-88A3-4C44-9062-BD8EA2D9DB60
+ TITLE
+
+ TYPE
+ 0
+ UUID
+ F1F121E0-7C16-4710-B85B-C10E4F7715DC
+
+
+ CHILDREN
+
+ DESCRIPTION
+
+ OPTIONS
+
+ HIDDEN
+
+ STATE
+ 1
+
+ PACKAGE_UUID
+ 17D68BA0-D319-4CB2-9C33-5FBF7CD96392
+ TITLE
+
+ TYPE
+ 0
+ UUID
+ 4C046B91-0499-4942-A1F5-C9517C6FFFA0
+
+
+ CHILDREN
+
+ DESCRIPTION
+
+ OPTIONS
+
+ HIDDEN
+
+ STATE
+ 1
+
+ PACKAGE_UUID
+ A6B4C36F-B0FC-417F-9B18-59E66B8D78DC
+ TITLE
+
+ TYPE
+ 0
+ UUID
+ 7120666F-EAF2-49DF-A382-C69590CD0905
+
+
+ REMOVED
+
+
+
+ MODE
+ 0
+
+ INSTALLATION_STEPS
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewIntroductionController
+ INSTALLER_PLUGIN
+ Introduction
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewReadMeController
+ INSTALLER_PLUGIN
+ ReadMe
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewLicenseController
+ INSTALLER_PLUGIN
+ License
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewDestinationSelectController
+ INSTALLER_PLUGIN
+ TargetSelect
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewInstallationTypeController
+ INSTALLER_PLUGIN
+ PackageSelection
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewInstallationController
+ INSTALLER_PLUGIN
+ Install
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ ICPRESENTATION_CHAPTER_VIEW_CONTROLLER_CLASS
+ ICPresentationViewSummaryController
+ INSTALLER_PLUGIN
+ Summary
+ LIST_TITLE_KEY
+ InstallerSectionTitle
+
+
+ INTRODUCTION
+
+ LOCALIZATIONS
+
+
+ LANGUAGE
+ English
+ VALUE
+
+ PATH
+ welcome.html
+ PATH_TYPE
+ 1
+
+
+
+
+ LICENSE
+
+ LOCALIZATIONS
+
+
+ LANGUAGE
+ English
+ VALUE
+
+ PATH
+ license.html
+ PATH_TYPE
+ 1
+
+
+
+ MODE
+ 0
+
+ README
+
+ LOCALIZATIONS
+
+
+ SUMMARY
+
+ LOCALIZATIONS
+
+
+ TITLE
+
+ LOCALIZATIONS
+
+
+ LANGUAGE
+ English
+ VALUE
+ MORAgents
+
+
+
+
+ PROJECT_REQUIREMENTS
+
+ LIST
+
+ RESOURCES
+
+ ROOT_VOLUME_ONLY
+
+
+ PROJECT_SETTINGS
+
+ BUILD_FORMAT
+ 0
+ BUILD_PATH
+
+ PATH
+ .
+ PATH_TYPE
+ 1
+
+ EXCLUDED_FILES
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .DS_Store
+ TYPE
+ 0
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Remove .DS_Store files
+ PROXY_TOOLTIP
+ Remove ".DS_Store" files created by the Finder.
+ STATE
+
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .pbdevelopment
+ TYPE
+ 0
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Remove .pbdevelopment files
+ PROXY_TOOLTIP
+ Remove ".pbdevelopment" files created by ProjectBuilder or Xcode.
+ STATE
+
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ CVS
+ TYPE
+ 1
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .cvsignore
+ TYPE
+ 0
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .cvspass
+ TYPE
+ 0
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .svn
+ TYPE
+ 1
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .git
+ TYPE
+ 1
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ .gitignore
+ TYPE
+ 0
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Remove SCM metadata
+ PROXY_TOOLTIP
+ Remove helper files and folders used by the CVS, SVN or Git Source Code Management systems.
+ STATE
+
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ classes.nib
+ TYPE
+ 0
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ designable.db
+ TYPE
+ 0
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ info.nib
+ TYPE
+ 0
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Optimize nib files
+ PROXY_TOOLTIP
+ Remove "classes.nib", "info.nib" and "designable.nib" files within .nib bundles.
+ STATE
+
+
+
+ PATTERNS_ARRAY
+
+
+ REGULAR_EXPRESSION
+
+ STRING
+ Resources Disabled
+ TYPE
+ 1
+
+
+ PROTECTED
+
+ PROXY_NAME
+ Remove Resources Disabled folders
+ PROXY_TOOLTIP
+ Remove "Resources Disabled" folders.
+ STATE
+
+
+
+ SEPARATOR
+
+
+
+ NAME
+ MORAgentsInstaller
+ PAYLOAD_ONLY
+
+ TREAT_MISSING_PRESENTATION_DOCUMENTS_AS_WARNING
+
+
+
+ TYPE
+ 0
+ VERSION
+ 2
+
+
diff --git a/build_assets/macOS/Packaging_Instructions_macOS.md b/build_assets/macOS/Packaging_Instructions_macOS.md
index cb8103f..b3e596e 100644
--- a/build_assets/macOS/Packaging_Instructions_macOS.md
+++ b/build_assets/macOS/Packaging_Instructions_macOS.md
@@ -28,17 +28,17 @@ Future usage only requires you to run MORagents from your searchbar.
## Signing
```sh
- productsign --sign "Developer ID Installer: Liquid Tensor LLC (ZQN244GMTD)" MORagents.pkg MORagents009.pkg
+ productsign --sign "Developer ID Installer: Liquid Tensor LLC (ZQN244GMTD)" MORagents.pkg MORagents010-[apple\|intel].pkg
```
## Notarize
```sh
-xcrun notarytool submit MORagents009.pkg --keychain-profile "NotaryProfile" --wait
+xcrun notarytool submit MORagents010-[apple\|intel].pkg --keychain-profile "NotaryProfile" --wait
```
## Staple
```sh
-xcrun stapler staple MORagents009.pkg
+xcrun stapler staple MORagents010-[apple\|intel].pkg
```
---
diff --git a/build_assets/macOS/postinstall.sh b/build_assets/macOS/postinstall.sh
old mode 100644
new mode 100755
index 60703a0..e6c4e53
--- a/build_assets/macOS/postinstall.sh
+++ b/build_assets/macOS/postinstall.sh
@@ -1,35 +1,85 @@
#!/bin/bash
+# Function to log messages
+log_message() {
+ echo "$(date '+%Y-%m-%d %H:%M:%S') - $1"
+}
+
# Function to check if an application is running
is_app_running() {
app_name=$1
pgrep -x "$app_name" >/dev/null
}
-# Open Docker Desktop
-open -a "Docker.app"
+# Function to attempt opening Docker with retries
+open_docker_with_retry() {
+ max_attempts=5
+ attempt=1
+ while [ $attempt -le $max_attempts ]; do
+ log_message "Attempt $attempt to open Docker.app"
+ if [ -d "/Applications/Docker.app" ]; then
+ if open -a "Docker.app" 2>/dev/null; then
+ log_message "Docker.app opened successfully"
+ return 0
+ else
+ log_message "Failed to open Docker.app, waiting before retry..."
+ fi
+ else
+ log_message "Docker.app not found in /Applications"
+ fi
+ sleep 10
+ attempt=$((attempt+1))
+ done
+ log_message "Failed to open Docker.app after $max_attempts attempts"
+ return 1
+}
+
+# Main script starts here
+log_message "Starting post-install script"
+
+# Wait for a bit to ensure Docker installation is complete
+log_message "Waiting for 30 seconds to ensure Docker installation is complete..."
+sleep 30
+
+# Attempt to open Docker
+if ! open_docker_with_retry; then
+ log_message "Warning: Could not open Docker.app. It may need to be opened manually."
+fi
# Set the timeout duration (in seconds)
timeout=300 # 5 minutes
# Wait for Docker Desktop to be running
-echo "Waiting for Docker Desktop to start..."
+log_message "Waiting for Docker Desktop to start..."
start_time=$(date +%s)
while ! is_app_running "Docker Desktop"; do
current_time=$(date +%s)
elapsed_time=$((current_time - start_time))
if [ $elapsed_time -ge $timeout ]; then
- echo "Error: Docker Desktop did not start within the specified timeout."
- exit 1
+ log_message "Warning: Docker Desktop did not start within the specified timeout."
+ break
fi
- sleep 1
+ sleep 5
done
-echo "Docker Desktop is running."
+
+if is_app_running "Docker Desktop"; then
+ log_message "Docker Desktop is running."
+else
+ log_message "Warning: Docker Desktop is not running. It may need to be started manually."
+fi
# Open MORAgents.app
-open -a "MORAgents.app"
+if [ -d "/Applications/MORAgents.app" ]; then
+ if open -a "MORAgents.app" 2>/dev/null; then
+ log_message "MORAgents.app opened successfully"
+ else
+ log_message "Warning: Failed to open MORAgents.app. It may need to be opened manually."
+ fi
+else
+ log_message "Error: MORAgents.app not found in /Applications"
+fi
-echo "Post-install script completed."
-exit 0
+log_message "Post-install script completed."
+exit 0
\ No newline at end of file
diff --git a/build_assets/macOS/postinstall_intel.sh b/build_assets/macOS/postinstall_intel.sh
new file mode 100644
index 0000000..0dab78e
--- /dev/null
+++ b/build_assets/macOS/postinstall_intel.sh
@@ -0,0 +1,91 @@
+#!/bin/bash
+
+# Function to log messages
+log_message() {
+ echo "$(date '+%Y-%m-%d %H:%M:%S') - $1"
+}
+
+# Check if running on Intel Mac
+if [ "$(uname -m)" != "x86_64" ]; then
+ log_message "Error: This script is for Intel-based Macs only."
+ exit 1
+fi
+
+# Function to check if an application is running
+is_app_running() {
+ app_name=$1
+ pgrep -x "$app_name" >/dev/null
+}
+
+# Function to attempt opening Docker with retries
+open_docker_with_retry() {
+ max_attempts=5
+ attempt=1
+ while [ $attempt -le $max_attempts ]; do
+ log_message "Attempt $attempt to open Docker.app"
+ if [ -d "/Applications/Docker.app" ]; then
+ if open -a "Docker.app" 2>/dev/null; then
+ log_message "Docker.app opened successfully"
+ return 0
+ else
+ log_message "Failed to open Docker.app, waiting before retry..."
+ fi
+ else
+ log_message "Docker.app not found in /Applications"
+ fi
+ sleep 10
+ attempt=$((attempt+1))
+ done
+ log_message "Failed to open Docker.app after $max_attempts attempts"
+ return 1
+}
+
+# Main script starts here
+log_message "Starting post-install script"
+
+# Wait for a bit to ensure Docker installation is complete
+log_message "Waiting for 30 seconds to ensure Docker installation is complete..."
+sleep 30
+
+# Attempt to open Docker
+if ! open_docker_with_retry; then
+ log_message "Warning: Could not open Docker.app. It may need to be opened manually."
+fi
+
+# Set the timeout duration (in seconds)
+timeout=300 # 5 minutes
+
+# Wait for Docker Desktop to be running
+log_message "Waiting for Docker Desktop to start..."
+start_time=$(date +%s)
+while ! is_app_running "Docker Desktop"; do
+ current_time=$(date +%s)
+ elapsed_time=$((current_time - start_time))
+
+ if [ $elapsed_time -ge $timeout ]; then
+ log_message "Warning: Docker Desktop did not start within the specified timeout."
+ break
+ fi
+
+ sleep 5
+done
+
+if is_app_running "Docker Desktop"; then
+ log_message "Docker Desktop is running."
+else
+ log_message "Warning: Docker Desktop is not running. It may need to be started manually."
+fi
+
+# Open MORAgents.app
+if [ -d "/Applications/MORAgents.app" ]; then
+ if open -a "MORAgents.app" 2>/dev/null; then
+ log_message "MORAgents.app opened successfully"
+ else
+ log_message "Warning: Failed to open MORAgents.app. It may need to be opened manually."
+ fi
+else
+ log_message "Error: MORAgents.app not found in /Applications"
+fi
+
+log_message "Post-install script completed."
+exit 0
\ No newline at end of file
diff --git a/build_assets/macOS/preinstall_docker.sh b/build_assets/macOS/preinstall_docker.sh
new file mode 100755
index 0000000..f110bbd
--- /dev/null
+++ b/build_assets/macOS/preinstall_docker.sh
@@ -0,0 +1,55 @@
+#!/bin/bash
+
+# Function to log messages
+log_message() {
+ echo "$(date '+%Y-%m-%d %H:%M:%S') - $1"
+}
+
+# Set variables
+DOCKER_DMG_URL="https://desktop.docker.com/mac/main/arm64/Docker.dmg"
+DOCKER_DMG="Docker.dmg"
+VOLUME_NAME="Docker"
+INSTALL_PATH="/Volumes/$VOLUME_NAME/Docker.app/Contents/MacOS/install"
+
+# Download Docker
+log_message "Downloading Docker..."
+if curl -L "$DOCKER_DMG_URL" -o "$DOCKER_DMG"; then
+ log_message "Docker download completed."
+else
+ log_message "Error: Failed to download Docker."
+ exit 1
+fi
+
+# Mount the DMG
+log_message "Mounting Docker DMG..."
+if hdiutil attach "$DOCKER_DMG"; then
+ log_message "Docker DMG mounted successfully."
+else
+ log_message "Error: Failed to mount Docker DMG."
+ exit 1
+fi
+
+# Run the installer
+log_message "Running Docker installer..."
+if "$INSTALL_PATH" --accept-license; then
+ log_message "Docker installation completed."
+else
+ log_message "Error: Docker installation failed."
+ hdiutil detach "/Volumes/$VOLUME_NAME"
+ exit 1
+fi
+
+# Detach the DMG
+log_message "Detaching Docker DMG..."
+if hdiutil detach "/Volumes/$VOLUME_NAME"; then
+ log_message "Docker DMG detached successfully."
+else
+ log_message "Warning: Failed to detach Docker DMG. This is not critical."
+fi
+
+# Clean up
+log_message "Cleaning up..."
+rm -f "$DOCKER_DMG"
+
+log_message "Docker preinstall script completed successfully."
+exit 0
\ No newline at end of file
diff --git a/build_assets/macOS/preinstall_docker_intel.sh b/build_assets/macOS/preinstall_docker_intel.sh
new file mode 100644
index 0000000..544f0c8
--- /dev/null
+++ b/build_assets/macOS/preinstall_docker_intel.sh
@@ -0,0 +1,61 @@
+#!/bin/bash
+
+# Function to log messages
+log_message() {
+ echo "$(date '+%Y-%m-%d %H:%M:%S') - $1"
+}
+
+# Check if running on Intel Mac
+if [ "$(uname -m)" != "x86_64" ]; then
+ log_message "Error: This script is for Intel-based Macs only."
+ exit 1
+fi
+
+# Set variables
+DOCKER_DMG_URL="https://desktop.docker.com/mac/main/amd64/Docker.dmg"
+DOCKER_DMG="Docker.dmg"
+VOLUME_NAME="Docker"
+INSTALL_PATH="/Volumes/$VOLUME_NAME/Docker.app/Contents/MacOS/install"
+
+# Download Docker
+log_message "Downloading Docker for Intel Mac..."
+if curl -L "$DOCKER_DMG_URL" -o "$DOCKER_DMG"; then
+ log_message "Docker download completed."
+else
+ log_message "Error: Failed to download Docker."
+ exit 1
+fi
+
+# Mount the DMG
+log_message "Mounting Docker DMG..."
+if hdiutil attach "$DOCKER_DMG"; then
+ log_message "Docker DMG mounted successfully."
+else
+ log_message "Error: Failed to mount Docker DMG."
+ exit 1
+fi
+
+# Run the installer
+log_message "Running Docker installer..."
+if "$INSTALL_PATH" --accept-license; then
+ log_message "Docker installation completed."
+else
+ log_message "Error: Docker installation failed."
+ hdiutil detach "/Volumes/$VOLUME_NAME"
+ exit 1
+fi
+
+# Detach the DMG
+log_message "Detaching Docker DMG..."
+if hdiutil detach "/Volumes/$VOLUME_NAME"; then
+ log_message "Docker DMG detached successfully."
+else
+ log_message "Warning: Failed to detach Docker DMG. This is not critical."
+fi
+
+# Clean up
+log_message "Cleaning up..."
+rm -f "$DOCKER_DMG"
+
+log_message "Docker preinstall script completed successfully."
+exit 0
\ No newline at end of file
diff --git a/build_assets/macOS/preinstall_ollama.sh b/build_assets/macOS/preinstall_ollama.sh
index 412743c..dfcb2e7 100644
--- a/build_assets/macOS/preinstall_ollama.sh
+++ b/build_assets/macOS/preinstall_ollama.sh
@@ -7,5 +7,5 @@ chmod +x ollama
sudo mv ollama /usr/local/bin/
nohup /usr/local/bin/ollama serve > /dev/null 2>&1 &
-/usr/local/bin/ollama pull llama3
+/usr/local/bin/ollama pull llama3.1
/usr/local/bin/ollama pull nomic-embed-text
diff --git a/build_assets/macOS/welcome.html b/build_assets/macOS/welcome.html
index 585d5ea..18ca86d 100644
--- a/build_assets/macOS/welcome.html
+++ b/build_assets/macOS/welcome.html
@@ -3,7 +3,7 @@
- Welcome to MORagents v0.0.9 Installer
+ Welcome to MORagents v0.1.0 Installer
-
Welcome to MORagents v0.0.9 Installer
+
Welcome to MORagents v0.1.0 Installer
Thank you for choosing to install MORagents on your system. This installer will guide you through the process of setting up MORagents and its dependencies.
The installer will perform the following steps:
diff --git a/config.py b/config.py
index 00ed5df..43bae40 100644
--- a/config.py
+++ b/config.py
@@ -10,26 +10,38 @@
# Run as bundled executable if condition is met, else run as regular Python script
repo_root = sys._MEIPASS if getattr(sys, 'frozen', False) else os.path.dirname(__file__)
elif os_name == "Linux":
- raise RuntimeError(
- f"MORagents needs Linux support! Would you like to help?\n"
- f"https://github.com/MorpheusAIs/moragents/issues/27")
+ repo_root = os.path.dirname(__file__)
+else:
+ raise RuntimeError(f"Unsupported OS: {os_name}")
class AgentDockerConfig:
- MACOS_IMAGE_NAMES = [
- "lachsbagel/moragents_dockers-nginx:apple-0.0.9",
- "lachsbagel/moragents_dockers-agents:apple-0.0.9"
+ MACOS_APPLE_IMAGE_NAMES = [
+ "lachsbagel/moragents_dockers-nginx:apple-0.1.0",
+ "lachsbagel/moragents_dockers-agents:apple-0.1.0"
+ ]
+ MACOS_INTEL_IMAGE_NAMES = [
+ "lachsbagel/moragents_dockers-nginx:amd64-0.1.0",
+ "lachsbagel/moragents_dockers-agents:amd64-0.1.0"
]
WINDOWS_IMAGE_NAMES = [
- "lachsbagel/moragents_dockers-nginx:amd64-0.0.9",
- "lachsbagel/moragents_dockers-agents:amd64-0.0.9"
+ "lachsbagel/moragents_dockers-nginx:amd64-0.1.0",
+ "lachsbagel/moragents_dockers-agents:amd64-0.1.0"
+ ]
+ LINUX_IMAGE_NAMES = [ # TODO, may need linux specific tagged images
+ "lachsbagel/moragents_dockers-nginx:amd64-0.1.0",
+ "lachsbagel/moragents_dockers-agents:amd64-0.1.0"
]
-
+
@staticmethod
def get_current_image_names():
- if os_name == "macOS":
- return AgentDockerConfig.MACOS_IMAGE_NAMES
+ if os_name == "macOS" and arch == "ARM64":
+ return AgentDockerConfig.MACOS_APPLE_IMAGE_NAMES
+ elif os_name == "macOS" and arch == "x86_64":
+ return AgentDockerConfig.MACOS_INTEL_IMAGE_NAMES
elif os_name == "Windows":
return AgentDockerConfig.WINDOWS_IMAGE_NAMES
+ elif os_name == "Linux":
+ return AgentDockerConfig.LINUX_IMAGE_NAMES
else:
raise RuntimeError(f"Unsupported OS: {os_name}")
@@ -37,5 +49,9 @@ class AgentDockerConfigDeprecate:
OLD_IMAGE_NAMES = [
"morpheus/price_fetcher_agent:latest",
"moragents_dockers-nginx:latest",
- "moragents_dockers-agents:latest"
- ]
\ No newline at end of file
+ "moragents_dockers-agents:latest",
+ "lachsbagel/moragents_dockers-nginx:apple-0.0.9",
+ "lachsbagel/moragents_dockers-agents:apple-0.0.9",
+ "lachsbagel/moragents_dockers-nginx:amd64-0.0.9",
+ "lachsbagel/moragents_dockers-agents:amd64-0.0.9"
+ ]
diff --git a/images/agent_clarify.png b/images/agent_clarify.png
deleted file mode 100644
index 76e462f..0000000
Binary files a/images/agent_clarify.png and /dev/null differ
diff --git a/images/mor_rewards.png b/images/mor_rewards.png
new file mode 100644
index 0000000..2e249d6
Binary files /dev/null and b/images/mor_rewards.png differ
diff --git a/images/moragents.png b/images/moragents.png
new file mode 100644
index 0000000..151585f
Binary files /dev/null and b/images/moragents.png differ
diff --git a/images/moragents_chatpdf.jpg b/images/moragents_chatpdf.jpg
new file mode 100644
index 0000000..a42c7de
Binary files /dev/null and b/images/moragents_chatpdf.jpg differ
diff --git a/images/moragents_chatpdf.png b/images/moragents_chatpdf.png
deleted file mode 100644
index d9777ce..0000000
Binary files a/images/moragents_chatpdf.png and /dev/null differ
diff --git a/images/price_fetcher.png b/images/price_fetcher.png
new file mode 100644
index 0000000..b0336a1
Binary files /dev/null and b/images/price_fetcher.png differ
diff --git a/images/successful_swap.png b/images/successful_swap.png
deleted file mode 100644
index f51d511..0000000
Binary files a/images/successful_swap.png and /dev/null differ
diff --git a/images/tweet_sizzler.png b/images/tweet_sizzler.png
new file mode 100644
index 0000000..862c7b4
Binary files /dev/null and b/images/tweet_sizzler.png differ
diff --git a/images/wallet_integration.png b/images/wallet_integration.png
index 24f7fc9..6254967 100644
Binary files a/images/wallet_integration.png and b/images/wallet_integration.png differ
diff --git a/main.py b/main.py
index f94c23e..e69c7fb 100644
--- a/main.py
+++ b/main.py
@@ -4,6 +4,7 @@
from runtime_setup_macos import main as macos_setup
from runtime_setup_windows import main as windows_setup
+from runtime_setup_linux import main as linux_setup
from utils.logger_config import setup_logger
from utils.host_utils import get_os_and_arch
@@ -11,7 +12,7 @@
logger = setup_logger(__name__)
-if __name__ == '__main__':
+if __name__ == "__main__":
try:
os_name, arch = get_os_and_arch()
@@ -21,9 +22,7 @@
elif os_name == "Windows":
windows_setup()
elif os_name == "Linux":
- raise RuntimeError(
- f"MORagents needs Linux support! Would you like to help?\n"
- f"https://github.com/MorpheusAIs/moragents/issues/27")
+ linux_setup()
except Exception as e:
logging.critical(f"Error during Docker setup: {str(e)}")
@@ -31,5 +30,5 @@
time.sleep(7)
- url = 'http://localhost:3333/'
+ url = "http://localhost:3333/"
webbrowser.open(url)
diff --git a/requirements.txt b/requirements.txt
index f168009..70969cd 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -4,3 +4,4 @@ customtkinter
requests
setuptools
pyinstaller
+torch
diff --git a/runtime_setup_linux.py b/runtime_setup_linux.py
new file mode 100644
index 0000000..3439dd4
--- /dev/null
+++ b/runtime_setup_linux.py
@@ -0,0 +1,167 @@
+import os
+import shutil
+import subprocess
+
+from utils.logger_config import setup_logger
+from config import AgentDockerConfig, AgentDockerConfigDeprecate
+
+logger = setup_logger(__name__)
+
+def get_docker_path():
+ docker_paths = [
+ '/usr/bin/docker', # Common Linux path
+ '/usr/local/bin/docker', # Alternative Linux path
+ shutil.which('docker')
+ ]
+ for docker_path in docker_paths:
+ if docker_path and os.path.exists(docker_path):
+ return docker_path
+
+ logger.error("Docker executable not found in PATH.")
+ return None
+
+def check_docker_installed(docker_path):
+ try:
+ subprocess.run([docker_path, "--version"],
+ check=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ logger.info(f"Docker was found at {docker_path}")
+ return True
+ except (subprocess.CalledProcessError, TypeError) as e:
+ logger.error(f"Error checking Docker installation: {str(e)}")
+ return False
+
+def delete_docker_image(docker_path, image_name):
+ try:
+ # List all images
+ list_command = [docker_path, "images", "--format", "{{.Repository}}:{{.Tag}}"]
+ output = subprocess.check_output(list_command, universal_newlines=True)
+ images = output.strip().split("\n")
+
+ # Find the image with the specified name
+ if image_name in images:
+ # Remove the image
+ remove_command = [docker_path, "rmi", "-f", image_name]
+ subprocess.run(remove_command, check=True)
+ logger.info(f"Image '{image_name}' deleted successfully.")
+ else:
+ pass
+
+ except subprocess.CalledProcessError as e:
+ logger.warning(f"Error deleting image: {e}")
+
+def list_containers_for_image(docker_path, image_name):
+ try:
+ output = subprocess.check_output(
+ [docker_path, "ps", "-a", "--filter", f"ancestor={image_name}", "--format", "{{.ID}}"])
+ containers = output.decode().strip().split("\n")
+ return [container for container in containers if container]
+ except subprocess.CalledProcessError as e:
+ logger.error(f"Failed to list containers for image '{image_name}': {e}")
+ return []
+
+def remove_container(docker_path, container):
+ try:
+ subprocess.run([docker_path, "rm", "-f", container], check=True, stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL)
+ except subprocess.CalledProcessError as e:
+ logger.error(f"Failed to remove container '{container}': {e}")
+
+def docker_image_present_on_host(docker_path, image_name):
+ try:
+ subprocess.run([docker_path, "inspect", image_name], check=True, stdout=subprocess.DEVNULL,
+ stderr=subprocess.DEVNULL)
+ return True
+ except (subprocess.CalledProcessError, TypeError) as e:
+ return False
+
+def remove_containers_for_image(docker_path, image_name):
+ containers = list_containers_for_image(docker_path, image_name)
+ for container in containers:
+ remove_container(docker_path, container)
+ logger.info(f"Removed container '{container}' for image '{image_name}'")
+
+def remove_containers_by_name(docker_path, container_name):
+ try:
+ list_command = [docker_path, "ps", "-a", "--format", "{{.Names}}"]
+ output = subprocess.check_output(list_command, universal_newlines=True)
+ containers = output.strip().split("\n")
+
+ if container_name in containers:
+ remove_command = [docker_path, "rm", "-f", container_name]
+ subprocess.run(remove_command, check=True)
+ logger.info(f"Removed container '{container_name}'")
+ else:
+ logger.info(f"Container '{container_name}' not found")
+ except subprocess.CalledProcessError as e:
+ logger.error(f"Error removing container '{container_name}': {str(e)}")
+
+def migration_remove_old_images(docker_path):
+ for image_name in AgentDockerConfigDeprecate.OLD_IMAGE_NAMES:
+ if docker_image_present_on_host(docker_path, image_name):
+ delete_docker_image(docker_path, image_name)
+ logger.info(f"Deleted image '{image_name} from previous release")
+
+def pull_docker_images(docker_path):
+ for image in AgentDockerConfig.get_current_image_names():
+ try:
+ subprocess.run([docker_path, "pull", image], check=True)
+ logger.info(f"Successfully pulled image: {image}")
+ except subprocess.CalledProcessError as e:
+ logger.error(f"Failed to pull image {image}: {e}")
+ raise
+
+def start_ollama_server():
+ ollama_path = '/usr/local/bin/ollama' # This path might need to be adjusted for Linux
+
+ try:
+ # Start Ollama server
+ logger.info("Starting Ollama server...")
+ subprocess.Popen([ollama_path, "serve"], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
+ logger.info("Ollama server started successfully.")
+ except Exception as e:
+ logger.info("Failed to start Ollama server.")
+ logger.error(f"Failed to start Ollama server: {e}")
+
+def docker_setup():
+ docker_path = get_docker_path()
+ logger.info(f"Docker path: {docker_path}")
+
+ if not check_docker_installed(docker_path):
+ logger.critical("Docker is not installed.")
+ raise RuntimeError("Docker is not installed.")
+
+ # Remove old images and containers
+ logger.info("Checking whether old images need removal.")
+ migration_remove_old_images(docker_path)
+
+ for image_name in AgentDockerConfig.get_current_image_names():
+ remove_containers_for_image(docker_path, image_name)
+
+ remove_containers_by_name(docker_path, "agents")
+ remove_containers_by_name(docker_path, "nginx")
+
+ # Pull the latest images
+ pull_docker_images(docker_path)
+
+ # Spin up Agent container
+ subprocess.run([
+ docker_path, "run", "-d", "--name", "agents",
+ "-p", "8080:5000", "--restart", "always",
+ "-v", "/var/lib/agents:/var/lib/agents", "-v", "/app/src:/app/src", # Adjusted volume paths for Linux
+ AgentDockerConfig.get_current_image_names()[1] # agents image
+ ], check=True)
+
+ # Spin up Nginx container
+ subprocess.run([
+ docker_path, "run", "-d", "--name", "nginx", "-p", "3333:80",
+ AgentDockerConfig.get_current_image_names()[0] # nginx image
+ ], check=True)
+
+def main():
+ # main() called every time the app is opened (from main.py). Put all app open code here.
+ logger.info("Starting app...")
+ start_ollama_server()
+ docker_setup()
+
+if __name__ == "__main__":
+ main()
\ No newline at end of file
diff --git a/runtime_setup_macos.py b/runtime_setup_macos.py
index 70f2d48..fda8f2e 100644
--- a/runtime_setup_macos.py
+++ b/runtime_setup_macos.py
@@ -16,6 +16,7 @@ def get_docker_path():
logger.error("Docker executable not found in PATH.")
return None
+
def check_docker_installed(docker_path):
try:
subprocess.run([docker_path, "--version"],
diff --git a/submodules/__init__.py b/submodules/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/submodules/benchmarks/__init__.py b/submodules/benchmarks/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/submodules/benchmarks/claim_agent_benchmarks/README.md b/submodules/benchmarks/claim_agent_benchmarks/README.md
new file mode 100644
index 0000000..16c72ce
--- /dev/null
+++ b/submodules/benchmarks/claim_agent_benchmarks/README.md
@@ -0,0 +1,23 @@
+# Benchmarking & Testing Reward Claiming Agent Guide
+
+NOTE: this is made for the router compatible moragents repo
+
+## How to Run the Tests:
+1) In the parent directory:
+- ```cd submodules/moragents_dockers/agents```
+
+2) ```docker build -t agent .```
+
+2. NOTE: If you are using Apple Silicon then you may experience problems due to the base image not being build for arm64. We have included a separate Dockerfile in order to deal with this issue, run:
+
+- ```docker build . -t agent -f Dockerfile-apple```
+
+3) To run the agent:
+
+- ```docker run --name agent -p 5000:5000 agent```
+
+4) Check if the agent is up and running on docker or not
+5) If it is running, navigate to `submodules/claim_agent_benchmarks`
+6) run `pytest benchmarks.py`
+
+NOTE: If needed use your own alchemy mainnet RPC instead of the default cloudflare one in `config.py` and please `pip install pytest web3`
\ No newline at end of file
diff --git a/submodules/benchmarks/claim_agent_benchmarks/__init__.py b/submodules/benchmarks/claim_agent_benchmarks/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/submodules/benchmarks/claim_agent_benchmarks/adapters/__init__.py b/submodules/benchmarks/claim_agent_benchmarks/adapters/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/submodules/benchmarks/claim_agent_benchmarks/adapters/claim_adapter.py b/submodules/benchmarks/claim_agent_benchmarks/adapters/claim_adapter.py
new file mode 100644
index 0000000..97afe42
--- /dev/null
+++ b/submodules/benchmarks/claim_agent_benchmarks/adapters/claim_adapter.py
@@ -0,0 +1,14 @@
+import requests
+import json
+
+class ClaimAdapter:
+ def __init__(self, url, headers):
+ self.url = url
+ self.headers = headers
+
+ def ask_agent(self, payload):
+ response = requests.post(self.url, headers=self.headers, data=json.dumps(payload))
+ if response.status_code == 200:
+ return response.json()
+ else:
+ raise Exception(f"Request failed with status code {response.status_code}: {response.text}")
diff --git a/submodules/benchmarks/claim_agent_benchmarks/benchmarks.py b/submodules/benchmarks/claim_agent_benchmarks/benchmarks.py
new file mode 100644
index 0000000..d550827
--- /dev/null
+++ b/submodules/benchmarks/claim_agent_benchmarks/benchmarks.py
@@ -0,0 +1,30 @@
+import pytest
+from helpers import request_claim, provide_receiver_address, confirm_transaction
+from submodules.benchmarks.claim_agent_benchmarks.config import Config
+
+
+def test_claim_process():
+ for i, wallet_data in enumerate(Config.WALLET_ADDRESSES):
+ wallet_address = wallet_data["wallet"]
+ receiver_address = wallet_data["receiver"]
+
+ print(f"Testing for wallet {wallet_address} (Test {i + 1})")
+
+ # Step 1: Request to claim rewards
+ response = request_claim(wallet_address)
+ assert "Please provide the receiver address" in response['content']
+ print(f"Step 1 passed for wallet {wallet_address}: Request to claim MOR rewards")
+
+ # Step 2: Provide the receiver address
+ response = provide_receiver_address(wallet_address, receiver_address)
+ assert "Please confirm if you want to proceed" in response['content']
+ print(f"Step 2 passed for wallet {wallet_address}: Provided receiver address")
+
+ # Step 3: Confirm the transaction
+ response = confirm_transaction(wallet_address)
+ assert "Transaction data prepared" in response['content']
+ print(f"Step 3 passed for wallet {wallet_address}: Transaction confirmed")
+
+
+if __name__ == "__main__":
+ pytest.main()
diff --git a/submodules/benchmarks/claim_agent_benchmarks/config.py b/submodules/benchmarks/claim_agent_benchmarks/config.py
new file mode 100644
index 0000000..36fa731
--- /dev/null
+++ b/submodules/benchmarks/claim_agent_benchmarks/config.py
@@ -0,0 +1,14 @@
+class Config:
+ URL = 'http://127.0.0.1:5000/'
+ HEADERS = {'Content-Type': 'application/json'}
+
+ # Test wallet addresses and receiver addresses
+ WALLET_ADDRESSES = [
+ {"wallet": "0x48d0EAc727A7e478f792F16527012452a000f2bd",
+ "receiver": "0x48d0EAc727A7e478f792F16527012452a000f2bd"}
+ ]
+
+ PROMPTS = {
+ "claim_request": "I want to claim my MOR rewards from pool id 1",
+ "proceed": "proceed"
+ }
diff --git a/submodules/benchmarks/claim_agent_benchmarks/helpers.py b/submodules/benchmarks/claim_agent_benchmarks/helpers.py
new file mode 100644
index 0000000..22a4fef
--- /dev/null
+++ b/submodules/benchmarks/claim_agent_benchmarks/helpers.py
@@ -0,0 +1,25 @@
+from submodules.benchmarks.claim_agent_benchmarks.config import Config
+from adapters.claim_adapter import ClaimAdapter
+
+claim_adapter = ClaimAdapter(Config.URL, Config.HEADERS)
+
+def request_claim(wallet_address):
+ payload = {
+ "prompt": {"role": "user", "content": Config.PROMPTS["claim_request"]},
+ "wallet_address": wallet_address
+ }
+ return claim_adapter.ask_agent(payload)
+
+def provide_receiver_address(wallet_address, receiver_address):
+ payload = {
+ "prompt": {"role": "user", "content": receiver_address},
+ "wallet_address": wallet_address
+ }
+ return claim_adapter.ask_agent(payload)
+
+def confirm_transaction(wallet_address):
+ payload = {
+ "prompt": {"role": "user", "content": Config.PROMPTS["proceed"]},
+ "wallet_address": wallet_address
+ }
+ return claim_adapter.ask_agent(payload)
diff --git a/submodules/benchmarks/claim_agent_benchmarks/user_flow.py b/submodules/benchmarks/claim_agent_benchmarks/user_flow.py
new file mode 100644
index 0000000..f7482df
--- /dev/null
+++ b/submodules/benchmarks/claim_agent_benchmarks/user_flow.py
@@ -0,0 +1,46 @@
+import requests
+import json
+
+url = 'http://127.0.0.1:5000/'
+
+headers = {
+ 'Content-Type': 'application/json',
+}
+
+def ask_agent(payload):
+ response = requests.post(url, headers=headers, data=json.dumps(payload))
+
+ if response.status_code == 200:
+ print("Raw response data:")
+ print(response.text)
+ return response.json()
+ else:
+ print("Raw error data:")
+ print(response.text)
+ raise Exception(f"Request failed with status code {response.status_code}")
+
+# Step 1: Request to claim MOR rewards
+payload = {
+ "prompt": {"role": "user", "content": "I want to claim my MOR rewards from pool id 1"},
+ "wallet_address": "0x48d0EAc727A7e478f792F16527012452a000f2bd"
+}
+response = ask_agent(payload)
+
+# Step 2: Provide the receiver address
+receiver_address = "0x48d0EAc727A7e478f792F16527012452a000f2bd"
+payload = {
+ "prompt": {"role": "user", "content": receiver_address},
+ "wallet_address": "0x48d0EAc727A7e478f792F16527012452a000f2bd"
+}
+response = ask_agent(payload)
+
+# Step 3: Confirm the transaction
+payload = {
+ "prompt": {"role": "user", "content": "proceed"},
+ "wallet_address": "0x48d0EAc727A7e478f792F16527012452a000f2bd"
+}
+response = ask_agent(payload)
+
+# Final step: Print the final raw response after confirming
+print("Final raw response data:")
+print(json.dumps(response, indent=2))
diff --git a/submodules/benchmarks/price_fetching/__init__.py b/submodules/benchmarks/price_fetching/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/submodules/benchmarks/price_fetching/adapters/__init__.py b/submodules/benchmarks/price_fetching/adapters/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/submodules/benchmarks/adapters/base_adapter.py b/submodules/benchmarks/price_fetching/adapters/base_adapter.py
similarity index 100%
rename from submodules/benchmarks/adapters/base_adapter.py
rename to submodules/benchmarks/price_fetching/adapters/base_adapter.py
diff --git a/submodules/benchmarks/adapters/coingecko_adapter.py b/submodules/benchmarks/price_fetching/adapters/coingecko_adapter.py
similarity index 100%
rename from submodules/benchmarks/adapters/coingecko_adapter.py
rename to submodules/benchmarks/price_fetching/adapters/coingecko_adapter.py
diff --git a/submodules/benchmarks/adapters/defillama_adapter.py b/submodules/benchmarks/price_fetching/adapters/defillama_adapter.py
similarity index 100%
rename from submodules/benchmarks/adapters/defillama_adapter.py
rename to submodules/benchmarks/price_fetching/adapters/defillama_adapter.py
diff --git a/submodules/benchmarks/benchmarks.py b/submodules/benchmarks/price_fetching/benchmarks.py
similarity index 95%
rename from submodules/benchmarks/benchmarks.py
rename to submodules/benchmarks/price_fetching/benchmarks.py
index d239015..728a9b4 100644
--- a/submodules/benchmarks/benchmarks.py
+++ b/submodules/benchmarks/price_fetching/benchmarks.py
@@ -1,7 +1,7 @@
import time
import argparse
from helpers import ask_data_agent, compare_usd_values, extract_agent_usd_value
-from config import coins, price_prompts, mcap_prompts, price_error_tolerance, mcap_error_tolerance, loop_delay
+from submodules.benchmarks.price_fetching.config import coins, price_prompts, mcap_prompts, price_error_tolerance, mcap_error_tolerance, loop_delay
from adapters.coingecko_adapter import CoingeckoAdapter
from adapters.defillama_adapter import DefillamaAdapter
diff --git a/submodules/benchmarks/config.py b/submodules/benchmarks/price_fetching/config.py
similarity index 100%
rename from submodules/benchmarks/config.py
rename to submodules/benchmarks/price_fetching/config.py
diff --git a/submodules/benchmarks/helpers.py b/submodules/benchmarks/price_fetching/helpers.py
similarity index 100%
rename from submodules/benchmarks/helpers.py
rename to submodules/benchmarks/price_fetching/helpers.py
diff --git a/submodules/benchmarks/readme.md b/submodules/benchmarks/price_fetching/readme.md
similarity index 90%
rename from submodules/benchmarks/readme.md
rename to submodules/benchmarks/price_fetching/readme.md
index eef0967..81fa5f2 100644
--- a/submodules/benchmarks/readme.md
+++ b/submodules/benchmarks/price_fetching/readme.md
@@ -8,7 +8,7 @@
## Running
-## 0. Start [ DataAgent Docker Service](../moragents_dockers/agents/src/data_agent/README.md)
+## 0. Start [ DataAgent Docker Service](../../moragents_dockers/agents/src/data_agent/README.md)
## 1. Modify `config.py` with new prompts, coins & error tolerances
## 2. `cd submodules/benchmarks`
## 3. Run `pip install -r requirements.txt`
diff --git a/submodules/benchmarks/requirements.txt b/submodules/benchmarks/price_fetching/requirements.txt
similarity index 100%
rename from submodules/benchmarks/requirements.txt
rename to submodules/benchmarks/price_fetching/requirements.txt
diff --git a/submodules/benchmarks/reward_check_agent_benchmarks/README.md b/submodules/benchmarks/reward_check_agent_benchmarks/README.md
new file mode 100644
index 0000000..9469618
--- /dev/null
+++ b/submodules/benchmarks/reward_check_agent_benchmarks/README.md
@@ -0,0 +1,23 @@
+# Benchmarking & Testing Reward Checking Agent Guide
+
+NOTE: this is made for the router compatible moragents repo
+
+## How to Run the Tests:
+1) In the parent directory:
+- ```cd submodules/moragents_dockers/agents```
+
+2) ```docker build -t agent .```
+
+2. NOTE: If you are using Apple Silicon then you may experience problems due to the base image not being build for arm64. We have included a separate Dockerfile in order to deal with this issue, run:
+
+- ```docker build . -t agent -f Dockerfile-apple```
+
+3) To run the agent:
+
+- ```docker run --name agent -p 5000:5000 agent```
+
+4) Check if the agent is up and running on docker or not
+5) If it is running, navigate to `submodules/reward_check_agent_benchmarks`
+6) run `benchmarks.py`
+
+NOTE: If needed use your own alchemy mainnet RPC instead of the default cloudflare one in `config.py` and please `pip install pytest web3`
\ No newline at end of file
diff --git a/submodules/benchmarks/reward_check_agent_benchmarks/__init__.py b/submodules/benchmarks/reward_check_agent_benchmarks/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/submodules/benchmarks/reward_check_agent_benchmarks/adapters/__init__.py b/submodules/benchmarks/reward_check_agent_benchmarks/adapters/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/submodules/benchmarks/reward_check_agent_benchmarks/adapters/reward_check_adapter.py b/submodules/benchmarks/reward_check_agent_benchmarks/adapters/reward_check_adapter.py
new file mode 100644
index 0000000..538c39f
--- /dev/null
+++ b/submodules/benchmarks/reward_check_agent_benchmarks/adapters/reward_check_adapter.py
@@ -0,0 +1,12 @@
+from submodules.moragents_dockers.agents.src.claim_agent.src.tools import get_current_user_reward
+
+class RewardCheckAdapter:
+ def __init__(self):
+ pass
+
+ @property
+ def name(self) -> str:
+ return "RewardCheckAdapter"
+
+ def get_reward(self, pool_id: int, wallet_address: str) -> float:
+ return get_current_user_reward(wallet_address, pool_id)
\ No newline at end of file
diff --git a/submodules/benchmarks/reward_check_agent_benchmarks/benchmarks.py b/submodules/benchmarks/reward_check_agent_benchmarks/benchmarks.py
new file mode 100644
index 0000000..b2d526b
--- /dev/null
+++ b/submodules/benchmarks/reward_check_agent_benchmarks/benchmarks.py
@@ -0,0 +1,47 @@
+from helpers import ask_claim_agent, get_current_user_reward, extract_reward_value_from_response
+from submodules.benchmarks.reward_check_agent_benchmarks.config import test_cases, reward_check_prompts
+
+
+def run_reward_check_tests():
+ total_tests = len(test_cases)
+ passed_tests = 0
+
+ for i, test_case in enumerate(test_cases, 1):
+ pool_id = test_case["pool_id"]
+ wallet_address = test_case["wallet_address"]
+
+ # Iterate over each prompt
+ for prompt_template in reward_check_prompts:
+ prompt = prompt_template.format(wallet_address, pool_id)
+ print("-" * 100)
+ print(f"Running test case {i}/{total_tests}: {prompt}")
+
+ # Get the agent's response
+ agent_response = ask_claim_agent(prompt)
+ print(f"Agent response: {agent_response}")
+
+ # Extract the reward value from the agent's response
+ agent_reward_value = extract_reward_value_from_response(agent_response)
+ print(f"Agent Returned Reward Value: {agent_reward_value}")
+
+ # Get the real reward value from the blockchain
+ blockchain_reward_value = float(get_current_user_reward(wallet_address, pool_id))
+ print(f"Blockchain Returned Reward Value: {blockchain_reward_value}")
+
+ # Compare the values with a tolerance of 10%
+ tolerance = 0.10
+ if abs(agent_reward_value - blockchain_reward_value) / blockchain_reward_value <= tolerance:
+ print(f"Test case {i} passed.")
+ passed_tests += 1
+ i += 1
+ print("-" * 100)
+ else:
+ print(f"Test case {i} failed. Agent returned {agent_reward_value}, expected {blockchain_reward_value}.")
+ print("-" * 100)
+ i += 1
+
+ print(f"\n{passed_tests}/{total_tests} test cases passed.")
+
+
+if __name__ == "__main__":
+ run_reward_check_tests()
diff --git a/submodules/benchmarks/reward_check_agent_benchmarks/config.py b/submodules/benchmarks/reward_check_agent_benchmarks/config.py
new file mode 100644
index 0000000..e684a3f
--- /dev/null
+++ b/submodules/benchmarks/reward_check_agent_benchmarks/config.py
@@ -0,0 +1,72 @@
+import logging
+
+# Logging configuration
+logging.basicConfig(level=logging.INFO)
+
+
+# Configuration object
+class Config:
+ WEB3RPCURL = {
+ "1": "https://cloudflare-eth.com",
+ }
+
+ DISTRIBUTION_PROXY_ADDRESS = "0x47176B2Af9885dC6C4575d4eFd63895f7Aaa4790"
+ DISTRIBUTION_ABI = [
+ {
+ "inputs": [
+ {
+ "internalType": "uint256",
+ "name": "poolId_",
+ "type": "uint256"
+ },
+ {
+ "internalType": "address",
+ "name": "user_",
+ "type": "address"
+ }
+ ],
+ "name": "getCurrentUserReward",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ }
+ ]
+
+
+test_cases = [
+ {
+ "pool_id": 1,
+ "wallet_address": "0x62aF7c48Cf412162465A8CaFdE44dFb17bA96038",
+ },
+ {
+ "pool_id": 1,
+ "wallet_address": "0xC3B82270Db1b77B4bE28a83d0963e02c38A9d13f",
+ },
+ {
+ "pool_id": 1,
+ "wallet_address": "0x03aa1e85487a5c3c509bc584ad9490a41d248011",
+ },
+ {
+ "pool_id": 1,
+ "wallet_address": "0xEb4E7939C3bCC0635b8531e3C0a6bD42de95cfeF",
+ },
+ {
+ "pool_id": 1,
+ "wallet_address": "0x5CD4C60f0e566dCa1Ae8456C36a63bc7A8D803de",
+ }
+]
+
+reward_check_prompts = [
+ "Check MOR rewards for wallet_address: {} in pool_id {}",
+ "Check the MOR rewards for address: {} in pool_id {}",
+ "Check my MOR Rewards for address: {} for pool_id {}",
+ "Check the MOR rewards accrued for wallet_address: {} in pool_id {}"
+ "Check the MOR rewards assigned to wallet_address: {} in pool_id {}",
+
+]
diff --git a/submodules/benchmarks/reward_check_agent_benchmarks/helpers.py b/submodules/benchmarks/reward_check_agent_benchmarks/helpers.py
new file mode 100644
index 0000000..45b0525
--- /dev/null
+++ b/submodules/benchmarks/reward_check_agent_benchmarks/helpers.py
@@ -0,0 +1,50 @@
+import requests
+import re
+from web3 import Web3
+from submodules.benchmarks.reward_check_agent_benchmarks.config import Config
+
+url = 'http://127.0.0.1:5000/'
+
+headers = {
+ 'Content-Type': 'application/json',
+}
+
+def ask_claim_agent(prompt: str):
+ payload = {
+ "prompt": {
+ "role": "user",
+ "content": prompt
+ }
+ }
+
+ response = requests.post(url, headers=headers, json=payload)
+ if response.status_code == 200:
+ return response.json()['content']
+ else:
+ raise Exception(f"Request failed with status code {response.status_code}: {response.text}")
+
+def get_current_user_reward(wallet_address, pool_id):
+ web3 = Web3(Web3.HTTPProvider(Config.WEB3RPCURL["1"]))
+ distribution_contract = web3.eth.contract(
+ address=web3.to_checksum_address(Config.DISTRIBUTION_PROXY_ADDRESS),
+ abi=Config.DISTRIBUTION_ABI
+ )
+
+ try:
+ if not web3.is_connected():
+ raise Exception("Unable to connect to Ethereum network")
+
+ reward = distribution_contract.functions.getCurrentUserReward(
+ pool_id,
+ web3.to_checksum_address(wallet_address)
+ ).call()
+ formatted_reward = web3.from_wei(reward, 'ether')
+ return round(formatted_reward, 4)
+ except Exception as e:
+ raise Exception(f"Error occurred while fetching the reward: {str(e)}")
+
+def extract_reward_value_from_response(response: str) -> float:
+ match = re.search(r'(\d+\.\d+) MOR', response)
+ if match:
+ return float(match.group(1))
+ raise ValueError("Could not extract a reward value from the agent's response")
diff --git a/submodules/moragents_dockers/README.md b/submodules/moragents_dockers/README.md
index 45fa221..38327fc 100644
--- a/submodules/moragents_dockers/README.md
+++ b/submodules/moragents_dockers/README.md
@@ -1,22 +1,18 @@
# Moragents
-This repo contains multiple agents and a dapp that enables you to interact with the agents, all running locally and containerized with Docker.
+## Overview
+This project is a Flask-based AI chat application featuring intelligent responses from various language models and embeddings. It includes file uploading, cryptocurrency swapping, and a delegator system to manage multiple agents. The application, along with a dApp for agent interaction, runs locally and is containerized with Docker.
+## Pre-requisites
+* [Download Ollama](https://ollama.com/ )for your operating system
+* Then after finishing installation pull these two models:
-## Dependencies
-
-* Docker
-* Ollama
-
-Pull the required models in ollama
-
-```ollama pull llama3```
+```ollama pull llama3.1```
```ollama pull nomic-embed-text```
-
-## Installation
+## Run with Docker Compose
Docker compose will build and run two containers. One will be for the agents, the other will be for the UI.
@@ -40,6 +36,7 @@ Open in the browser: ```http://localhost:3333/```
Docker build will download the model. The first time that one of the agents are called, the model will be loaded into memory and this instance will be shared between all agents.
## Agents
+Five agents are included:
### Data Agent
@@ -64,9 +61,165 @@ A typical flow looks like this:
- The agent requests any missing information, e.g. in this case the amount is missing
- Once all the information hase been collected, the agent looks up the assets on the current chain, retrieves contract addresses and generates a quote if available.
- The quote is shown to the user, who may either proceed or cancel
-- If the user accepts the quote, the swap may proceed. The back-end will generate transactions which will be sent to the front-end to be signed by the user's wallet.
+- If the user accepts the quote, the swap may proceed. The back-end will generate transactions which will be sent to the front-end to be signed by the user's wallet.
- If the allowance for the token being sold is too low, an approval transaction will be generated first
-### RAG Agent
+## RAG Agent
+This agent will answer questions about an uploaded PDF file.
+
+## Tweet Sizzler Agent
+This agent will let you generate tweets, edit with a WSYWIG.
+Provided you enter API creds in the Settings you can also directly post to your X account.
+
+## MOR Rewards Agent
+Ask the agent to check your MOR rewards and it will retrieve claimable MOR stats from both capital and coder pools.
+
+---
+
+# Delegator
+The Delegator handles user queries by analyzing the prompt and delegating it to the appropriate agent.
+
+## API Endpoints
+
+1. **Chat Functionality**
+ - Endpoint: `POST /`
+ - Handles chat interactions, delegating to appropriate agents when necessary.
+
+2. **Message History**
+ - Endpoint: `GET /messages`
+ - Retrieves chat message history.
+
+3. **Clear Messages**
+ - Endpoint: `GET /clear_messages`
+ - Clears the chat message history.
-This agent will answer questions about an uploaded PDF file.
+4. **Swap Operations**
+ - Endpoints:
+ - `POST /tx_status`: Check transaction status
+ - `POST /allowance`: Get allowance
+ - `POST /approve`: Approve transaction
+ - `POST /swap`: Perform swap
+
+5. **File Upload**
+ - Endpoint: `POST /upload`
+ - Handles file uploads for RAG (Retrieval-Augmented Generation) purposes.
+
+# Adding a New Agent
+
+## Overview
+
+Each agent is configured in the [agents/src/config.py](agents/src/config.py) file, which specifies the agent's path, class, and other details.
+This allows the delegator to delegate to the correct task agent based on the user's query.
+
+## Steps to Add a New Agent
+
+### 1. Create a New Agent Folder
+
+1. **Create a new folder** in the `agents/src` directory for your new agent.
+2. **Implement the agent logic** within this folder. Ensure that the agent class is defined and ready to handle the specific type of queries it is designed for.
+
+### 2. Update `config.py`
+
+1. **Open the `config.py` file** located in the `agents/src` directory.
+2. **Add a new entry** in the `DELEGATOR_CONFIG` dictionary with the following details:
+ - `path`: The path to the agent's module.
+ - `class`: The class name of the agent.
+ - `detail`: A description of when to use this agent.
+ - `name`: A unique name for the agent.
+ - `upload`: A boolean indicating if the agent requires a file to be uploaded from the front-end before it should be called.
+
+#### Example:
+```python:agents/src/config.py
+DELEGATOR_CONFIG = {
+ "agents": [
+ # ... existing agents ...
+ {
+ "path": "new_agent.src.agent",
+ "class": "NewAgent",
+ "description": "if the prompt is related to new functionality, choose new agent",
+ "name": "new agent",
+ "upload": false
+ }
+ ]
+}
+```
+
+
+### 3. Implement Agent Logic
+
+1. **Define the agent class** in the specified path.
+2. **Ensure the agent can handle the queries** it is designed for.
+
+#### Example:
+```python:agents/src/new_agent/src/agent.py
+class NewAgent:
+ def __init__(self, agent_info, llm, llm_ollama, embeddings, flask_app):
+ """
+ Initialize the NewAgent.
+
+ Parameters:
+ - agent_info (dict): Configuration details for the agent.
+ - llm (object): The main language model instance.
+ - llm_ollama (object): An additional language model instance from Ollama.
+ - embeddings (object): Embedding model for handling vector representations.
+ - flask_app (Flask): The Flask application instance.
+ """
+ self.agent_info = agent_info
+ self.llm = llm
+ self.llm_ollama = llm_ollama
+ self.embeddings = embeddings
+ self.flask_app = flask_app
+
+ def chat(self, request):
+ # Implement chat logic
+ pass
+
+ # Add other methods as needed
+```
+
+
+### 4. Handle Multi-Turn Conversations
+
+Agents can handle multi-turn conversations by returning a next_turn_agent which indicates the name of the agent that should handle the next turn.
+
+#### Example:
+```python
+class NewAgent:
+ def __init__(self, agent_info, llm, llm_ollama, embeddings, flask_app):
+ """
+ Initialize the NewAgent.
+
+ Parameters:
+ - agent_info (dict): Configuration details for the agent.
+ - llm (object): The main language model instance.
+ - llm_ollama (object): An additional language model instance.
+ - embeddings (object): Embedding model for handling vector representations.
+ - flask_app (Flask): The Flask application instance.
+ """
+ self.agent_info = agent_info
+ self.llm = llm
+ self.llm_ollama = llm_ollama
+ self.embeddings = embeddings
+ self.flask_app = flask_app
+
+ def chat(self, request, user_id):
+ # Process the query and determine the next agent
+ next_turn_agent = self.agent_info["name"]
+
+ # Generate response where we want to initiate a multi-turn conversation with the same agent.
+
+ return response, next_turn_agent
+
+```
+
+### 5. Integration
+
+The `Delegator` will automatically:
+- Import the agent module.
+- Instantiate the agent class.
+- Add the agent to its internal dictionary.
+
+### 6. Test the New Agent
+
+1. **Ensure the `Delegator` can properly route requests** to the new agent.
+2. **Test the agent's functionality** through the chat interface.
diff --git a/submodules/moragents_dockers/agents/Dockerfile b/submodules/moragents_dockers/agents/Dockerfile
index 060da90..52bcec7 100644
--- a/submodules/moragents_dockers/agents/Dockerfile
+++ b/submodules/moragents_dockers/agents/Dockerfile
@@ -24,7 +24,6 @@ copy . .
# Expose the port your application listens on
EXPOSE 5000
-
# Set the environment variable for Flask
ENV FLASK_APP=src/app.py
diff --git a/submodules/moragents_dockers/agents/Dockerfile-apple b/submodules/moragents_dockers/agents/Dockerfile-apple
index ef7ee27..ad80504 100644
--- a/submodules/moragents_dockers/agents/Dockerfile-apple
+++ b/submodules/moragents_dockers/agents/Dockerfile-apple
@@ -29,4 +29,4 @@ EXPOSE 5000
ENV FLASK_APP=src/app.py
# Run the application
-CMD ["flask", "run", "--host", "0.0.0.0"]
+CMD ["flask", "run", "--host", "0.0.0.0"]
\ No newline at end of file
diff --git a/submodules/moragents_dockers/agents/download_model.py b/submodules/moragents_dockers/agents/download_model.py
index 09aafa3..55f3fd4 100644
--- a/submodules/moragents_dockers/agents/download_model.py
+++ b/submodules/moragents_dockers/agents/download_model.py
@@ -3,16 +3,18 @@
from huggingface_hub import hf_hub_download
from model_config import Config
-def download_model(model_name,revision):
+
+def download_model(model_name, revision):
"""Function to download model from the hub"""
- model_directory=hf_hub_download(repo_id=model_name,filename=revision)
+ model_directory = hf_hub_download(repo_id=model_name, filename=revision)
return model_directory
+
def move_files(src_dir, dest_dir):
"""Move files from source to destination directory."""
for f in os.listdir(src_dir):
- src_path = os.path.join(src_dir, f)
- dst_path = os.path.join(dest_dir, f)
+ src_path = os.path.join(src_dir, f)
+ dst_path = os.path.join(dest_dir, f)
shutil.copy2(src_path, dst_path)
os.remove(src_path)
@@ -20,8 +22,8 @@ def move_files(src_dir, dest_dir):
if __name__ == "__main__":
download_dir = Config.DOWNLOAD_DIR
os.makedirs(download_dir, exist_ok=True)
- model_name=Config.MODEL_NAME
- revision=Config.MODEL_REVISION
- path=download_model(model_name,revision)
- model_path = '/'.join(path.split('/')[:-1])+'/'
- move_files(model_path,download_dir)
+ model_name = Config.MODEL_NAME
+ revision = Config.MODEL_REVISION
+ path = download_model(model_name, revision)
+ model_path = "/".join(path.split("/")[:-1]) + "/"
+ move_files(model_path, download_dir)
diff --git a/submodules/moragents_dockers/agents/model_config.py b/submodules/moragents_dockers/agents/model_config.py
index 6efe801..a5868e4 100644
--- a/submodules/moragents_dockers/agents/model_config.py
+++ b/submodules/moragents_dockers/agents/model_config.py
@@ -3,11 +3,11 @@
# Logging configuration
logging.basicConfig(level=logging.INFO)
+
# Configuration object
class Config:
# Model configuration
MODEL_NAME = "meetkai/functionary-small-v2.4-GGUF"
MODEL_REVISION = "functionary-small-v2.4.Q4_0.gguf"
- MODEL_PATH = "model/"+MODEL_REVISION
+ MODEL_PATH = "model/" + MODEL_REVISION
DOWNLOAD_DIR = "model"
-
\ No newline at end of file
diff --git a/submodules/moragents_dockers/agents/requirements.txt b/submodules/moragents_dockers/agents/requirements.txt
index c3838ef..e87e422 100644
--- a/submodules/moragents_dockers/agents/requirements.txt
+++ b/submodules/moragents_dockers/agents/requirements.txt
@@ -1,4 +1,4 @@
-llama-cpp-python==0.2.65
+llama-cpp-python==0.2.90
transformers==4.43.3
sentencepiece==0.2.0
protobuf==5.27.2
@@ -12,4 +12,6 @@ pymupdf==1.22.5
faiss-cpu==1.8.0.post1
langchain-text-splitters==0.2.2
langchain-core==0.2.24
-langchain-community==0.2.10
\ No newline at end of file
+langchain-community==0.2.10
+torch
+tweepy
\ No newline at end of file
diff --git a/submodules/moragents_dockers/agents/src/README-UI.md b/submodules/moragents_dockers/agents/src/README-UI.md
deleted file mode 100644
index eef241b..0000000
--- a/submodules/moragents_dockers/agents/src/README-UI.md
+++ /dev/null
@@ -1,65 +0,0 @@
-#Documentation for building a UI with our swap agent
-
-First you need to build the image
-
-```docker build -t agent .```
-
-Then Run the image by exposing port 5000
-
-```docker run --name agent -p 5000:5000 agent```
-
-
-And then once it is running we will have 2 endpoints
-
-first endpoint is for chat
-### chat endpoint = 'http://127.0.0.1:5000/'
-
-* The chat api accepts inputs in openai chat compelition format and
- we need to send the messages as a list
-
- ```messages = {"role":"user","content":"what is the price of bitcoin?"}```
-
-### Usage
-
- ```sh
- url = 'http://127.0.0.1:5000/
- message={"role":"user","content":"what is the price of bitcoin?"}
- data = {'prompt':message}
- response = requests.post(url, json=data)
- ```
-
-* The response will also be in this format
- ```sh
- response = {"role":"assistant","content":"The price of bitcoin is 62,000$"}
- ```
-
-* Then you can continue the conversation
-
-
- ### Messages endpoint = 'http://127.0.0.1:5000/messages'
- Since now the conversation is history is stored in the backend you can retrieve it using this api
- # Usage
-
-
- ```sh
- url = 'http://127.0.0.1:5000/messages'
- response = requests.get(url)
-
- result=response.text
- ```
-
- This will return a message to be displayed on the ui
-
- ```sh
-
- {"messages":[{"content":"what is the price of bitcoin?","role":"user"},{"content":"The price of itcoin is 62,000$","role":"assistant"}
-
- ```
-
- Then we can get the conversation history by using the message key result["messages"]
-
-
-
-
-
-
diff --git a/submodules/moragents_dockers/agents/src/app.py b/submodules/moragents_dockers/agents/src/app.py
index 65d225c..a483683 100644
--- a/submodules/moragents_dockers/agents/src/app.py
+++ b/submodules/moragents_dockers/agents/src/app.py
@@ -1,123 +1,220 @@
-from flask_cors import CORS
-from flask import Flask, request, jsonify
+import os
+import logging
+import time
+from functools import wraps
from config import Config
-from swap_agent.src import agent as swap_agent
-from data_agent.src import agent as data_agent
-from rag_agent.src import agent as rag_agent
from llama_cpp import Llama
-from llama_cpp.llama_tokenizer import LlamaHFTokenizer
-import os
-import logging
+from flask_cors import CORS
+from flask import Flask, request, jsonify
from langchain_community.llms import Ollama
+from delegator import Delegator
+from llama_cpp.llama_tokenizer import LlamaHFTokenizer
from langchain_community.embeddings import OllamaEmbeddings
-from langchain_core.prompts import ChatPromptTemplate
-from rag_agent.src.config import Config as ollama_config
+# Constants
+INITIAL_MESSAGE = {
+ "role": "assistant",
+ "content": "This highly experimental chatbot is not intended for making important decisions, and its responses are generated based on incomplete data and algorithms that may evolve rapidly. By using this chatbot, you acknowledge that you use it at your own discretion and assume all risks associated with its limitations and potential errors.",
+}
+UPLOAD_FOLDER = os.path.join(os.getcwd(), "uploads")
+
+# Configure logging
+logging.basicConfig(
+ level=logging.INFO,
+ format="%(asctime)s - %(name)s - %(levelname)s - %(message)s",
+ filename="app.log",
+ filemode="a",
+)
+logger = logging.getLogger(__name__)
def load_llm():
- llm = Llama(
- model_path=Config.MODEL_PATH,
- chat_format="functionary-v2",
- tokenizer=LlamaHFTokenizer.from_pretrained("meetkai/functionary-small-v2.4-GGUF"),
- n_gpu_layers=0,
- n_batch=4000,
- n_ctx=4000
- )
- return llm
+ logger.info("Loading LLM model")
+ try:
+ llm = Llama(
+ model_path=Config.MODEL_PATH,
+ chat_format="functionary-v2",
+ tokenizer=LlamaHFTokenizer.from_pretrained(
+ "meetkai/functionary-small-v2.4-GGUF"
+ ),
+ n_gpu_layers=-1, # Use all available GPU layers
+ n_batch=1024, # Increase batch size for faster processing
+ n_ctx=1024, # Increase context size for better performance
+ verbose=False, # Disable verbose output for speed
+ use_mlock=True, # Lock memory to prevent swapping
+ use_mmap=True, # Use memory mapping for faster loading
+ n_threads=16, # Increase number of threads for more parallel processing
+ )
+ logger.info("LLM model loaded successfully")
+ return llm
+ except Exception as e:
+ logger.error(f"Error loading LLM model: {str(e)}")
+ raise
-llm=load_llm()
-
app = Flask(__name__)
CORS(app)
-upload_state=False
-UPLOAD_FOLDER = os.path.join(os.getcwd(), 'uploads')
+upload_state = False
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
-app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
-app.config['MAX_CONTENT_LENGTH'] = ollama_config.MAX_LENGTH
-
-llm_ollama = Ollama(model="llama3",base_url=ollama_config.URL)
-embeddings = OllamaEmbeddings(model="nomic-embed-text",base_url=ollama_config.URL)
-
-logging.basicConfig(level=logging.DEBUG)
-
-
-agent = None
-messages=[]
-prompt = ChatPromptTemplate.from_template(
- """
- Answer the following question only based on the given context
-
-
- {context}
-
-
- Question: {input}
-"""
-)
+app.config["UPLOAD_FOLDER"] = UPLOAD_FOLDER
+app.config["MAX_CONTENT_LENGTH"] = Config.MAX_UPLOAD_LENGTH
+
+try:
+ llm = load_llm()
+except TimeoutError:
+ logger.error("LLM loading timed out")
+ llm = None
+except Exception as e:
+ logger.error(f"Failed to load LLM: {str(e)}")
+ llm = None
+
+llm_ollama = Ollama(model="llama3.1", base_url=Config.OLLAMA_URL)
+embeddings = OllamaEmbeddings(model="nomic-embed-text", base_url=Config.OLLAMA_URL)
+
+delegator = Delegator(Config.DELEGATOR_CONFIG, llm, llm_ollama, embeddings, app)
+messages = [INITIAL_MESSAGE]
+next_turn_agent = None
+
+
+@app.route("/", methods=["POST"])
+def chat():
+ global next_turn_agent, messages
+ data = request.get_json()
+ logger.info(f"Received chat request: {data}")
+
+ try:
+ current_agent = None
+ if "prompt" in data:
+ prompt = data["prompt"]
+ messages.append(prompt)
+
+ if not next_turn_agent:
+ logger.info("No next turn agent, getting delegator response")
+
+ start_time = time.time()
+ result = delegator.get_delegator_response(prompt, upload_state)
+ end_time = time.time()
+ logger.info(f"Delegator response time: {end_time - start_time:.2f} seconds")
+ logger.info(f"Delegator response: {result}")
+
+ if "next" not in result:
+ logger.error(f"Missing 'next' key in delegator response: {result}")
+ raise ValueError("Invalid delegator response: missing 'next' key")
+
+ next_agent = result["next"]
+ current_agent, response_swap = delegator.delegate_chat(next_agent, request)
+ else:
+ logger.info(f"Delegating chat to next turn agent: {next_turn_agent}")
+ current_agent, response_swap = delegator.delegate_chat(
+ next_turn_agent, request
+ )
+
+ # Handle both dictionary and tuple returns from delegate_chat
+ response, status_code = (
+ response_swap if isinstance(response_swap, tuple) else (response_swap, 200)
+ )
+
+ # If response_swap is an error, reset next_turn_agent
+ next_turn_agent = (
+ response_swap.get("next_turn_agent")
+ if isinstance(response_swap, dict)
+ else None
+ )
+
+ if isinstance(response, dict) and "role" in response and "content" in response:
+ response_with_agent = response.copy()
+ response_with_agent["agentName"] = current_agent or "Unknown"
+
+ messages.append(response_with_agent)
+
+ logger.info("Sending response: %s", response_with_agent)
+ return jsonify(response_with_agent), status_code
+ else:
+ logger.error(f"Invalid response format: {response}")
+ return jsonify({"error": "Invalid response format"}), 500
+
+ except TimeoutError:
+ logger.error("Chat request timed out")
+ return jsonify({"error": "Request timed out"}), 504
+ except ValueError as ve:
+ logger.error(f"Input formatting error: {str(ve)}")
+ return jsonify({"error": str(ve)}), 400
+ except Exception as e:
+ logger.error(f"Error in chat route: {str(e)}", exc_info=True)
+ return jsonify({"error": str(e)}), 500
+
+
+@app.route("/tx_status", methods=["POST"])
+def swap_agent_tx_status():
+ logger.info("Received tx_status request")
+ response = delegator.delegate_route("crypto swap agent", request, "tx_status")
+ messages.append(response)
+ return jsonify(response)
-@app.route('/swap_agent/', methods=['POST'])
-def swap_agent_chat():
- global llm
- return swap_agent.chat(request, llm)
-@app.route('/swap_agent/tx_status', methods=['POST'])
-def swap_agent_tx_status():
- return swap_agent.tx_status(request)
-
-@app.route('/swap_agent/messages', methods=['GET'])
-def swap_agent_messages():
- return swap_agent.get_messages()
-
-@app.route('/swap_agent/clear_messages', methods=['GET'])
-def swap_agent_clear_messages():
- return swap_agent.clear_messages()
-
-@app.route('/swap_agent/allowance', methods=['POST'])
+@app.route("/messages", methods=["GET"])
+def get_messages():
+ logger.info("Received get_messages request")
+ return jsonify({"messages": messages})
+
+
+@app.route("/clear_messages", methods=["GET"])
+def clear_messages():
+ global messages
+ logger.info("Clearing message history")
+ messages = [INITIAL_MESSAGE]
+ return jsonify({"response": "successfully cleared message history"})
+
+
+@app.route("/allowance", methods=["POST"])
def swap_agent_allowance():
- return swap_agent.get_allowance(request)
-
-@app.route('/swap_agent/approve', methods=['POST'])
-def swap_agent_approve():
- return swap_agent.approve(request)
-
-@app.route('/swap_agent/swap', methods=['POST'])
-def swap_agent_swap():
- return swap_agent.swap(request)
+ logger.info("Received allowance request")
+ return delegator.delegate_route("crypto swap agent", request, "get_allowance")
-@app.route('/data_agent/', methods=['POST'])
-def data_agent_chat():
- global llm
- return data_agent.chat(request, llm)
+@app.route("/approve", methods=["POST"])
+def swap_agent_approve():
+ logger.info("Received approve request")
+ return delegator.delegate_route("crypto swap agent", request, "approve")
-@app.route('/data_agent/messages', methods=['GET'])
-def data_agent_messages():
- return data_agent.get_messages()
-@app.route('/data_agent/clear_messages', methods=['GET'])
-def data_agent_clear_messages():
- return data_agent.clear_messages()
+@app.route("/swap", methods=["POST"])
+def swap_agent_swap():
+ logger.info("Received swap request")
+ return delegator.delegate_route("crypto swap agent", request, "swap")
-@app.route('/rag_agent/upload', methods=['POST'])
+
+@app.route("/upload", methods=["POST"])
def rag_agent_upload():
- global llm_ollama,UPLOAD_FOLDER,embeddings
- return rag_agent.upload_file(request, UPLOAD_FOLDER, llm_ollama, embeddings,ollama_config.MAX_FILE_SIZE)
+ global messages, upload_state
+ logger.info("Received upload request")
+ response = delegator.delegate_route(
+ "general purpose and context-based rag agent", request, "upload_file"
+ )
+ messages.append(response)
+ upload_state = True
+ return jsonify(response)
+
+
+@app.route("/regenerate_tweet", methods=["POST"])
+def regenerate_tweet():
+ logger.info("Received generate tweet request")
+ return delegator.delegate_route("tweet sizzler agent", None, "generate_tweet")
+
+
+@app.route("/post_tweet", methods=["POST"])
+def post_tweet():
+ logger.info("Received x post request")
+ return delegator.delegate_route("tweet sizzler agent", request, "post_tweet")
-@app.route('/rag_agent/', methods=['POST'])
-def rag_agent_chat():
- return rag_agent.chat(request)
-@app.route('/rag_agent/messages', methods=['GET'])
-def rag_agent_messages():
- return rag_agent.get_messages()
+# TODO: Persist the X API key in the database (once we set this up)
+@app.route("/set_x_api_key", methods=["POST"])
+def set_x_api_key():
+ logger.info("Received set X API key request")
+ return delegator.delegate_route("tweet sizzler agent", request, "set_x_api_key")
-@app.route('/rag_agent/clear_messages', methods=['GET'])
-def rag_agent_clear_messages():
- return rag_agent.clear_messages()
-
-if __name__ == '__main__':
- app.run(host='0.0.0.0', port=5000, debug=True)
\ No newline at end of file
+if __name__ == "__main__":
+ app.run(host="0.0.0.0", port=5000, debug=True)
diff --git a/submodules/moragents_dockers/agents/src/claim_agent/src/agent.py b/submodules/moragents_dockers/agents/src/claim_agent/src/agent.py
new file mode 100644
index 0000000..3538eab
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/claim_agent/src/agent.py
@@ -0,0 +1,70 @@
+import json
+from claim_agent.src import tools
+from claim_agent.src.config import Config
+
+class ClaimAgent:
+ def __init__(self, agent_info, llm, llm_ollama, embeddings, flask_app):
+ self.agent_info = agent_info
+ self.llm = llm
+ self.tools_provided = tools.get_tools()
+ self.conversation_state = {}
+
+ def get_response(self, message, wallet_address):
+ if wallet_address not in self.conversation_state:
+ self.conversation_state[wallet_address] = {"state": "initial"}
+
+ state = self.conversation_state[wallet_address]["state"]
+
+ if state == "initial":
+ rewards = {
+ 0: tools.get_current_user_reward(wallet_address, 0),
+ 1: tools.get_current_user_reward(wallet_address, 1)
+ }
+ available_rewards = {pool: amount for pool, amount in rewards.items() if amount > 0}
+
+ if available_rewards:
+ selected_pool = max(available_rewards, key=available_rewards.get)
+ self.conversation_state[wallet_address]["available_rewards"] = {selected_pool: available_rewards[selected_pool]}
+ self.conversation_state[wallet_address]["receiver_address"] = wallet_address
+ self.conversation_state[wallet_address]["state"] = "awaiting_confirmation"
+ return f"You have {available_rewards[selected_pool]} MOR rewards available in pool {selected_pool}. Would you like to proceed with claiming these rewards?", "assistant", self.agent_info["name"]
+ else:
+ return f"No rewards found for your wallet address {wallet_address} in either pool. Claim cannot be processed.", "assistant", None
+
+ elif state == "awaiting_confirmation":
+ user_input = message[-1]['content'].lower()
+ if any(word in user_input for word in ['yes', 'proceed', 'confirm', 'claim']):
+ return self.prepare_transactions(wallet_address)
+ else:
+ return "Please confirm if you want to proceed with the claim by saying 'yes', 'proceed', 'confirm', or 'claim'.", "assistant", self.agent_info["name"]
+
+ return "I'm sorry, I didn't understand that. Can you please rephrase your request?", "assistant", self.agent_info["name"]
+
+ def prepare_transactions(self, wallet_address):
+ available_rewards = self.conversation_state[wallet_address]["available_rewards"]
+ receiver_address = self.conversation_state[wallet_address]["receiver_address"]
+ transactions = []
+
+ for pool_id in available_rewards.keys():
+ try:
+ tx_data = tools.prepare_claim_transaction(pool_id, receiver_address)
+ transactions.append({"pool": pool_id, "transaction": tx_data})
+ except Exception as e:
+ return f"Error preparing transaction for pool {pool_id}: {str(e)}", "assistant", None
+
+ self.conversation_state[wallet_address]["transactions"] = transactions
+ tx_data_str = json.dumps(transactions, indent=2)
+ return f"Transaction data prepared for signing:\n\n{tx_data_str}", "assistant", None
+
+ def chat(self, request):
+ try:
+ data = request.get_json()
+ if 'prompt' in data and 'wallet_address' in data:
+ prompt = data['prompt']
+ wallet_address = data['wallet_address']
+ response, role, next_turn_agent = self.get_response([prompt], wallet_address)
+ return {"role": role, "content": response, "next_turn_agent": next_turn_agent}
+ else:
+ return {"error": "Missing required parameters"}, 400
+ except Exception as e:
+ return {"Error": str(e)}, 500
diff --git a/submodules/moragents_dockers/agents/src/claim_agent/src/config.py b/submodules/moragents_dockers/agents/src/claim_agent/src/config.py
new file mode 100644
index 0000000..c7bbddc
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/claim_agent/src/config.py
@@ -0,0 +1,58 @@
+import logging
+
+# Logging configuration
+logging.basicConfig(level=logging.INFO)
+
+# Configuration object
+class Config:
+
+ WEB3RPCURL = {
+ "1": "https://eth.llamarpc.com/"
+ }
+ MINT_FEE = 0.001 # in ETH
+
+ DISTRIBUTION_PROXY_ADDRESS = "0x47176B2Af9885dC6C4575d4eFd63895f7Aaa4790"
+ DISTRIBUTION_ABI = [
+ {
+ "inputs": [
+ {
+ "internalType": "uint256",
+ "name": "poolId_",
+ "type": "uint256"
+ },
+ {
+ "internalType": "address",
+ "name": "receiver_",
+ "type": "address"
+ }
+ ],
+ "name": "claim",
+ "outputs": [],
+ "stateMutability": "payable",
+ "type": "function"
+ },
+ {
+ "inputs": [
+ {
+ "internalType": "uint256",
+ "name": "poolId_",
+ "type": "uint256"
+ },
+ {
+ "internalType": "address",
+ "name": "user_",
+ "type": "address"
+ }
+ ],
+ "name": "getCurrentUserReward",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ }
+ ]
diff --git a/submodules/moragents_dockers/agents/src/claim_agent/src/tools.py b/submodules/moragents_dockers/agents/src/claim_agent/src/tools.py
new file mode 100644
index 0000000..bee91c6
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/claim_agent/src/tools.py
@@ -0,0 +1,91 @@
+from web3 import Web3
+from claim_agent.src.config import Config
+
+def get_current_user_reward(wallet_address, pool_id):
+ web3 = Web3(Web3.HTTPProvider(Config.WEB3RPCURL["1"]))
+ distribution_contract = web3.eth.contract(
+ address=web3.to_checksum_address(Config.DISTRIBUTION_PROXY_ADDRESS),
+ abi=Config.DISTRIBUTION_ABI
+ )
+
+ try:
+ if not web3.is_connected():
+ raise Exception("Unable to connect to Ethereum network")
+
+ reward = distribution_contract.functions.getCurrentUserReward(
+ pool_id,
+ web3.to_checksum_address(wallet_address)
+ ).call()
+ formatted_reward = web3.from_wei(reward, 'ether')
+ return round(formatted_reward, 4)
+ except Exception as e:
+ raise Exception(f"Error occurred while fetching the reward: {str(e)}")
+
+def prepare_claim_transaction(pool_id, wallet_address):
+ try:
+ web3 = Web3(Web3.HTTPProvider(Config.WEB3RPCURL["1"]))
+ contract = web3.eth.contract(
+ address=web3.to_checksum_address(Config.DISTRIBUTION_PROXY_ADDRESS),
+ abi=Config.DISTRIBUTION_ABI
+ )
+ tx_data = contract.encode_abi(fn_name="claim", args=[pool_id, web3.to_checksum_address(wallet_address)])
+ mint_fee = web3.to_wei(Config.MINT_FEE, 'ether')
+ estimated_gas = contract.functions.claim(pool_id, web3.to_checksum_address(wallet_address)).estimate_gas({
+ 'from': web3.to_checksum_address(wallet_address),
+ 'value': mint_fee
+ })
+ return {
+ "to": Config.DISTRIBUTION_PROXY_ADDRESS,
+ "data": tx_data,
+ "value": str(mint_fee),
+ "gas": str(estimated_gas),
+ "chainId": "1"
+ }
+ except Exception as e:
+ raise Exception(f"Failed to prepare claim transaction: {str(e)}")
+
+def get_tools():
+ return [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_user_reward",
+ "description": "Fetch the token amount of currently accrued MOR rewards for a user address from a specific pool",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "wallet_address": {
+ "type": "string",
+ "description": "The wallet address to check rewards for"
+ },
+ "pool_id": {
+ "type": "integer",
+ "description": "The ID of the pool to check rewards from"
+ }
+ },
+ "required": ["wallet_address", "pool_id"]
+ }
+ }
+ },
+ {
+ "type": "function",
+ "function": {
+ "name": "prepare_claim_transaction",
+ "description": "Prepare a transaction to claim rewards",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "pool_id": {
+ "type": "integer",
+ "description": "The ID of the pool to claim from"
+ },
+ "wallet_address": {
+ "type": "string",
+ "description": "The wallet address to claim rewards for"
+ }
+ },
+ "required": ["pool_id", "wallet_address"]
+ }
+ }
+ }
+ ]
\ No newline at end of file
diff --git a/submodules/moragents_dockers/agents/src/config.py b/submodules/moragents_dockers/agents/src/config.py
index a0868d3..51274c7 100644
--- a/submodules/moragents_dockers/agents/src/config.py
+++ b/submodules/moragents_dockers/agents/src/config.py
@@ -3,35 +3,60 @@
# Logging configuration
logging.basicConfig(level=logging.INFO)
+
# Configuration object
class Config:
+
# Model configuration
MODEL_NAME = "meetkai/functionary-small-v2.4-GGUF"
MODEL_REVISION = "functionary-small-v2.4.Q4_0.gguf"
- MODEL_PATH = "model/"+MODEL_REVISION
+ MODEL_PATH = "model/" + MODEL_REVISION
DOWNLOAD_DIR = "model"
- # API endpoints
- COINGECKO_BASE_URL = "https://api.coingecko.com/api/v3"
- DEFILLAMA_BASE_URL = "https://api.llama.fi"
- PRICE_SUCCESS_MESSAGE = "The price of {coin_name} is ${price:,}"
- PRICE_FAILURE_MESSAGE = "Failed to retrieve price. Please enter a valid coin name."
- FLOOR_PRICE_SUCCESS_MESSAGE = "The floor price of {nft_name} is ${floor_price:,}"
- FLOOR_PRICE_FAILURE_MESSAGE = "Failed to retrieve floor price. Please enter a valid NFT name."
- TVL_SUCCESS_MESSAGE = "The TVL of {protocol_name} is ${tvl:,}"
- TVL_FAILURE_MESSAGE = "Failed to retrieve TVL. Please enter a valid protocol name."
- FDV_SUCCESS_MESSAGE = "The fully diluted valuation of {coin_name} is ${fdv:,}"
- FDV_FAILURE_MESSAGE = "Failed to retrieve FDV. Please enter a valid coin name."
- MARKET_CAP_SUCCESS_MESSAGE = "The market cap of {coin_name} is ${market_cap:,}"
- MARKET_CAP_FAILURE_MESSAGE = "Failed to retrieve market cap. Please enter a valid coin name."
- API_ERROR_MESSAGE = "I can't seem to access the API at the moment."
- INCH_URL = "https://api.1inch.dev/token"
- QUOTE_URL = "https://api.1inch.dev/swap"
- APIBASEURL = f"https://api.1inch.dev/swap/v6.0/"
- HEADERS = { "Authorization": "Bearer WvQuxaMYpPvDiiOL5RHWUm7OzOd20nt4", "accept": "application/json" }
- WEB3RPCURL = {"56":"https://bsc-dataseed.binance.org","42161":"https://arb1.arbitrum.io/rpc","137":"https://polygon-rpc.com","1":"https://cloudflare-eth.com","10":"https://mainnet.optimism.io","8453":"https://mainnet.base.org"}
- NATIVE_TOKENS={"137":"MATIC","56":"BNB","1":"ETH","42161":"ETH","10":"ETH","8453":"ETH"}
- ERC20_ABI = [
- {"constant": True, "inputs": [], "name": "decimals", "outputs": [{"name": "", "type": "uint8"}], "payable": False, "stateMutability": "view", "type": "function"},
- {"constant": True, "inputs": [{"name": "_owner", "type": "address"}], "name": "balanceOf", "outputs": [{"name": "balance", "type": "uint256"}], "payable": False, "stateMutability": "view", "type": "function"}
- ]
- INCH_NATIVE_TOKEN_ADDRESS = "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"
\ No newline at end of file
+ OLLAMA_URL = "http://host.docker.internal:11434"
+ MAX_UPLOAD_LENGTH = 16 * 1024 * 1024
+ DELEGATOR_CONFIG = {
+ "agents": [
+ {
+ "path": "rag_agent.src.agent",
+ "class": "RagAgent",
+ "description": "Handles general queries, information retrieval, and context-based questions. Use for any query that doesn't explicitly match other agents' specialties.",
+ "name": "general purpose and context-based rag agent",
+ "upload_required": True,
+ },
+ {
+ "path": "data_agent.src.agent",
+ "class": "DataAgent",
+ "description": "Crypto-specific. Provides real-time cryptocurrency data such as price, market cap, and fully diluted valuation (FDV).",
+ "name": "crypto data agent",
+ "upload_required": False,
+ },
+ {
+ "path": "swap_agent.src.agent",
+ "class": "SwapAgent",
+ "description": "Handles cryptocurrency swapping operations. Use when the query explicitly mentions swapping, exchanging, or converting one cryptocurrency to another.",
+ "name": "crypto swap agent",
+ "upload_required": False,
+ },
+ {
+ "path": "tweet_sizzler_agent.src.agent",
+ "class": "TweetSizzlerAgent",
+ "description": "Generates and posts engaging tweets. Use when the query explicitly mentions Twitter, tweeting, or X platform.",
+ "name": "tweet sizzler agent",
+ "upload_required": False,
+ },
+ # {
+ # "path": "claim_agent.src.agent",
+ # "class": "ClaimAgent",
+ # "description": "Manages the process of claiming rewards or tokens, specifically MOR rewards. Use when the query explicitly mentions claiming rewards or tokens.",
+ # "name": "claim agent",
+ # "upload_required": False,
+ # },
+ {
+ "path": "reward_agent.src.agent",
+ "class": "RewardAgent",
+ "description": "Provides information about user's accrued MOR rewards or tokens. Use when the query is about checking or querying reward balances.",
+ "name": "reward agent",
+ "upload_required": False,
+ },
+ ]
+ }
diff --git a/submodules/moragents_dockers/agents/src/data_agent/src/agent.py b/submodules/moragents_dockers/agents/src/data_agent/src/agent.py
index acf79df..1f5dd54 100644
--- a/submodules/moragents_dockers/agents/src/data_agent/src/agent.py
+++ b/submodules/moragents_dockers/agents/src/data_agent/src/agent.py
@@ -1,71 +1,79 @@
-from llama_cpp import Llama
-from llama_cpp.llama_tokenizer import LlamaHFTokenizer
-from flask import Flask, request, jsonify
-from flask_cors import CORS
-import requests
-from data_agent.src import tools
-from config import Config
import json
+from data_agent.src import tools
+import logging
+logger = logging.getLogger(__name__)
-tools_provided=tools.get_tools()
-messages_ui=[]
+class DataAgent:
+ def __init__(self, config, llm, llm_ollama, embeddings, flask_app):
+ self.llm = llm
+ self.flask_app = flask_app
+ self.config = config
+ self.tools_provided = tools.get_tools()
-def get_response(message,llm):
- global tools_provided
- messages=[{"role": "system", "content": "Don't make assumptions about the value of the arguments for the function thy should always be supplied by the user and do not alter the value of the arguments . Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous."}]
- messages.extend(message)
- result = llm.create_chat_completion(
- messages = messages,
- tools=tools_provided,
- tool_choice="auto"
- )
- if "tool_calls" in result["choices"][0]["message"].keys():
- func=result["choices"][0]["message"]["tool_calls"][0]['function']
- if func["name"]=='get_price':
- args=json.loads(func["arguments"])
- return tools.get_coin_price_tool(args['coin_name']),"assistant"
- elif func["name"]=='get_floor_price':
- args=json.loads(func["arguments"])
- return tools.get_nft_floor_price_tool(args['nft_name']),"assistant"
- elif func["name"]=='get_fdv':
- args=json.loads(func["arguments"])
- return tools.get_fully_diluted_valuation_tool(args['coin_name']),"assistant"
- elif func["name"]=='get_tvl':
- args=json.loads(func["arguments"])
- return tools.get_protocol_total_value_locked_tool(args['protocol_name']),"assistant"
- elif func["name"]=='get_market_cap':
- args=json.loads(func["arguments"])
- return tools.get_coin_market_cap_tool(args['coin_name']),"assistant"
- return result["choices"][0]["message"]['content'],"assistant"
+ def get_response(self, message):
+ messages = [
+ {
+ "role": "system",
+ "content": (
+ "Don't make assumptions about the value of the arguments for the function "
+ "they should always be supplied by the user and do not alter the value of the arguments. "
+ "Don't make assumptions about what values to plug into functions. Ask for clarification if a user "
+ "request is ambiguous."
+ ),
+ }
+ ]
+ messages.extend(message)
+ logger.info("Sending request to LLM with %d messages", len(messages))
+ result = self.llm.create_chat_completion(
+ messages=messages, tools=self.tools_provided, tool_choice="auto"
+ )
-def generate_response(prompt,llm):
- global messages_ui
- messages_ui.append(prompt)
- response,role = get_response([prompt],llm)
- messages_ui.append({"role":role,"content":response})
- return response,role
-
-def chat(request,llm):
- try:
- data = request.get_json()
- if 'prompt' in data:
- prompt = data['prompt']
- response,role = generate_response(prompt,llm)
- return jsonify({"role":role,"content":response})
- else:
- return jsonify({"error": "Missing required parameters"}), 400
+ logger.info("Received response from LLM: %s", result)
- except Exception as e:
- return jsonify({"Error": str(e)}), 500
-
+ if "tool_calls" in result["choices"][0]["message"].keys():
+ func = result["choices"][0]["message"]["tool_calls"][0]["function"]
+ logger.info("LLM suggested using tool: %s", func["name"])
+ args = json.loads(func["arguments"])
+ if func["name"] == "get_price":
+ return tools.get_coin_price_tool(args["coin_name"]), "assistant"
+ elif func["name"] == "get_floor_price":
+ return tools.get_nft_floor_price_tool(args["nft_name"]), "assistant"
+ elif func["name"] == "get_fdv":
+ return (
+ tools.get_fully_diluted_valuation_tool(args["coin_name"]),
+ "assistant",
+ )
+ elif func["name"] == "get_tvl":
+ return (
+ tools.get_protocol_total_value_locked_tool(args["protocol_name"]),
+ "assistant",
+ )
+ elif func["name"] == "get_market_cap":
+ return tools.get_coin_market_cap_tool(args["coin_name"]), "assistant"
+ else:
+ logger.info("LLM provided a direct response without using tools")
+ return result["choices"][0]["message"]["content"], "assistant"
-def get_messages():
- global messages_ui
- return jsonify({"messages":messages_ui})
+ def generate_response(self, prompt):
+ response, role = self.get_response([prompt])
+ return response, role
-def clear_messages():
- global messages_ui
- messages_ui=[]
- return jsonify({"response":"successfully cleared message history"})
+ def chat(self, request):
+ try:
+ data = request.get_json()
+ if "prompt" in data:
+ prompt = data["prompt"]
+ logger.info(
+ "Received chat request with prompt: %s",
+ prompt[:50] + "..." if len(prompt) > 50 else prompt,
+ )
+ response, role = self.generate_response(prompt)
+ return {"role": role, "content": response}
+ else:
+ logger.warning("Received chat request without 'prompt' in data")
+ return {"error": "Missing required parameters"}, 400
+ except Exception as e:
+ logger.error("Error in chat method: %s", str(e), exc_info=True)
+ return {"Error": str(e)}, 500
diff --git a/submodules/moragents_dockers/agents/src/data_agent/src/tools.py b/submodules/moragents_dockers/agents/src/data_agent/src/tools.py
index d3fcf77..e02839a 100644
--- a/submodules/moragents_dockers/agents/src/data_agent/src/tools.py
+++ b/submodules/moragents_dockers/agents/src/data_agent/src/tools.py
@@ -1,7 +1,6 @@
import requests
import logging
-from config import Config
-import time
+from data_agent.src.config import Config
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.metrics.pairwise import cosine_similarity
diff --git a/submodules/moragents_dockers/agents/src/delegator.py b/submodules/moragents_dockers/agents/src/delegator.py
new file mode 100644
index 0000000..bc4d605
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/delegator.py
@@ -0,0 +1,151 @@
+import importlib
+import logging
+import json
+
+logger = logging.getLogger(__name__)
+
+# Configurable default agent
+DEFAULT_AGENT = "general purpose and context-based rag agent"
+
+
+class Delegator:
+ def __init__(self, config, llm, llm_ollama, embeddings, flask_app):
+ self.llm = llm
+ self.flask_app = flask_app
+ self.llm_ollama = llm_ollama
+ self.embeddings = embeddings
+ self.config = config
+ self.agents = self.load_agents(config)
+ logger.info("Delegator initialized with %d agents", len(self.agents))
+
+ def load_agents(self, config):
+ agents = {}
+ for agent_info in config["agents"]:
+ try:
+ module = importlib.import_module(agent_info["path"])
+ agent_class = getattr(module, agent_info["class"])
+ agent_instance = agent_class(
+ agent_info,
+ self.llm,
+ self.llm_ollama,
+ self.embeddings,
+ self.flask_app,
+ )
+ agents[agent_info["name"]] = agent_instance
+ logger.info("Loaded agent: %s", agent_info["name"])
+ except Exception as e:
+ logger.error("Failed to load agent %s: %s", agent_info["name"], str(e))
+ return agents
+
+ def get_delegator_response(self, prompt, upload_state):
+ available_agents = [
+ agent_info["name"]
+ for agent_info in self.config["agents"]
+ if not (agent_info["upload_required"] and not upload_state)
+ ]
+ agent_descriptions = "\n".join(
+ f"- {agent_info['name']}: {agent_info['description']}"
+ for agent_info in self.config["agents"]
+ if agent_info["name"] in available_agents
+ )
+
+ prompt_text = (
+ "### Instruction: Your name is Morpheus. "
+ "Your primary function is to select the correct agent based on the user's input. "
+ "You MUST use the 'route' function to select an agent. "
+ "Available agents and their descriptions:\n"
+ f"{agent_descriptions}\n"
+ "Analyze the user's input and select the most appropriate agent. "
+ "Do not respond with any text other than calling the 'route' function. "
+ "###"
+ )
+
+ tools = [
+ {
+ "type": "function",
+ "function": {
+ "name": "route",
+ "description": "Choose which agent to run next",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "next": {
+ "type": "string",
+ "enum": available_agents,
+ "description": "The name of the next agent to run",
+ }
+ },
+ "required": ["next"],
+ },
+ },
+ }
+ ]
+
+ message_list = [
+ {"role": "system", "content": prompt_text},
+ prompt,
+ {
+ "role": "system",
+ "content": "Remember, you must use the 'route' function to select an agent.",
+ },
+ ]
+
+ logger.info("Sending prompt to LLM: %s", prompt)
+ result = self.llm.create_chat_completion(
+ messages=message_list,
+ tools=tools,
+ tool_choice="auto",
+ temperature=0.3,
+ )
+ logger.info("Received response from LLM: %s", result)
+
+ response = result["choices"][0]["message"]
+
+ if response.get("tool_calls"):
+ try:
+ function_args = json.loads(
+ response["tool_calls"][0]["function"]["arguments"]
+ )
+ return {"next": function_args["next"]}
+ except (json.JSONDecodeError, KeyError) as e:
+ logger.error(f"Error parsing function call: {e}")
+ return {"next": DEFAULT_AGENT}
+ else:
+ logger.warning(
+ "No tool calls in LLM response, defaulting to general purpose agent"
+ )
+ return {"next": DEFAULT_AGENT}
+
+ def delegate_chat(self, agent_name, request):
+ logger.info(f"Attempting to delegate chat to agent: {agent_name}")
+ agent = self.agents.get(agent_name)
+ if agent:
+ logger.info(f"Successfully found agent: {agent_name}")
+ try:
+ result = agent.chat(request)
+ logger.info(f"Chat delegation to {agent_name} completed successfully")
+ logger.info(f"Response from {agent_name}: {result}")
+ return agent_name, result
+ except Exception as e:
+ logger.error(f"Error during chat delegation to {agent_name}: {str(e)}")
+ return {"error": f"Chat delegation to {agent_name} failed"}, 500
+ else:
+ logger.warning(f"Attempted to delegate to non-existent agent: {agent_name}")
+ return {"error": f"No such agent registered: {agent_name}"}, 400
+
+ def delegate_route(self, agent_name, request, method_name):
+ agent = self.agents.get(agent_name)
+ if agent:
+ if hasattr(agent, method_name):
+ logger.info("Delegating %s to agent: %s", method_name, agent_name)
+ method = getattr(agent, method_name)
+ return method(request)
+ else:
+ logger.warning(
+ "Method %s not found in agent %s", method_name, agent_name
+ )
+ return {
+ "error": f"No such method '{method_name}' in agent '{agent_name}'"
+ }, 400
+ logger.warning("Attempted to delegate to non-existent agent: %s", agent_name)
+ return {"error": "No such agent registered"}, 400
diff --git a/submodules/moragents_dockers/agents/src/rag_agent/src/agent.py b/submodules/moragents_dockers/agents/src/rag_agent/src/agent.py
index 99e76ea..0880cf0 100644
--- a/submodules/moragents_dockers/agents/src/rag_agent/src/agent.py
+++ b/submodules/moragents_dockers/agents/src/rag_agent/src/agent.py
@@ -1,5 +1,4 @@
-from flask import jsonify
-import os
+import os
import logging
from langchain_community.document_loaders import PyMuPDFLoader
from langchain_community.vectorstores import FAISS
@@ -13,85 +12,79 @@
logging.basicConfig(level=logging.DEBUG)
-agent = None
-messages=[{'role':"assistant","content":"Please upload a file to begin"}]
-upload_state = False
-prompt = ChatPromptTemplate.from_template(
- """
- Answer the following question only based on the given context
-
-
- {context}
-
-
- Question: {input}
-"""
-)
+class RagAgent:
+ def __init__(self, config, llm, llm_ollama, embeddings,flask_app):
+ self.llm = llm_ollama
+ self.flask_app = flask_app
+ self.embedding=embeddings
+ self.config = config
+ self.agent = None
+ self.messages = [{'role': "assistant", "content": "Please upload a file to begin"}]
+ self.upload_state = False
+ self.prompt = ChatPromptTemplate.from_template(
+ """
+ Answer the following question only based on the given context
+
+
+ {context}
+
+
+ Question: {input}
+ """
+ )
+ self.UPLOAD_FOLDER = flask_app.config['UPLOAD_FOLDER']
+ self.max_size = 5 * 1024 * 1024
+ self.retriever = None
+
-def handle_file_upload(file,UPLOAD_FOLDER,llm,embeddings):
- global agent,prompt
- if not os.path.exists(UPLOAD_FOLDER):
- os.makedirs(UPLOAD_FOLDER, exist_ok=True)
- filename = secure_filename(file.filename)
- file.save(os.path.join(UPLOAD_FOLDER, filename))
- # DocumentToolsGenerator class instantiation
- loader = PyMuPDFLoader(os.path.join(UPLOAD_FOLDER,filename))
- docs = loader.load()
- text_splitter = RecursiveCharacterTextSplitter()
- split_documents = text_splitter.split_documents(docs)
- vector_store = FAISS.from_documents(split_documents, embeddings)
- docs_chain = create_stuff_documents_chain(llm, prompt)
- retriever = vector_store.as_retriever()
- agent = create_retrieval_chain(retriever, docs_chain)
+ def handle_file_upload(self,file):
+ if not os.path.exists(self.UPLOAD_FOLDER):
+ os.makedirs(self.UPLOAD_FOLDER, exist_ok=True)
+ filename = secure_filename(file.filename)
+ file.save(os.path.join(self.UPLOAD_FOLDER, filename))
+ # DocumentToolsGenerator class instantiation
+ loader = PyMuPDFLoader(os.path.join(self.UPLOAD_FOLDER,filename))
+ docs = loader.load()
+ text_splitter = RecursiveCharacterTextSplitter(chunk_size=1024,chunk_overlap=20,length_function=len,is_separator_regex=False)
+ split_documents = text_splitter.split_documents(docs)
+ vector_store = FAISS.from_documents(split_documents, self.embedding)
+ self.retriever = vector_store.as_retriever(search_kwargs={"k": 7})
-def upload_file(request,UPLOAD_FOLDER,llm,embeddings,MAX_SIZE):
- global upload_state
- if 'file' not in request.files:
- return jsonify({'error': 'No file part'}), 400
- file = request.files['file']
- if file.filename == '':
- return jsonify({'error': 'No selected file'}), 400
- # Check file size
- file.seek(0, os.SEEK_END)
- file_length = file.tell()
- file.seek(0, 0) # Reset the file pointer to the beginning
- if file_length > MAX_SIZE:
- messages.append({"role": "assistant", "content": 'please use a file less than 5 MB'})
- return jsonify({"role": "assistant", "content": 'please use a file less than 5 MB'})
- try:
- handle_file_upload(file,UPLOAD_FOLDER,llm,embeddings)
- upload_state = True
- messages.append({"role": "assistant", "content": 'You have successfully uploaded the text'})
- return jsonify({"role": "assistant", "content": 'You have successfully uploaded the text'})
- except Exception as e:
- logging.error(f'Error during file upload: {str(e)}')
- return jsonify({'error': str(e)}), 500
-
-def chat(request):
- global messages,upload_state,agent
- try:
- data = request.get_json()
- if 'prompt' in data:
- prompt = data['prompt']['content']
- messages.append(data['prompt'])
- role = "assistant"
- response = agent.invoke({"input": prompt}) if upload_state else {"answer":"please upload a file first"}
-
- messages.append({"role": role, "content": response["answer"]})
- return jsonify({"role": role, "content": response["answer"]})
- else:
- return jsonify({"error": "Missing required parameters"}), 400
- except Exception as e:
- logging.error(f'Error in chat endpoint: {str(e)}')
- return jsonify({"Error": str(e)}), 500
-
-def get_messages():
- global messages
- return jsonify({"messages": messages})
-
-def clear_messages():
- global messages
- messages = [{'role':"assistant","content":"Please upload a file to begin"}]
- return jsonify({"response": "successfully cleared message history"})
+ def upload_file(self,request):
+ if 'file' not in request.files:
+ return {'error': 'No file part'}, 400
+ file = request.files['file']
+ if file.filename == '':
+ return {'error': 'No selected file'}, 400
+ # Check file size
+ file.seek(0, os.SEEK_END)
+ file_length = file.tell()
+ file.seek(0, 0) # Reset the file pointer to the beginning
+ if file_length > self.max_size:
+ return {"role": "assistant", "content": 'please use a file less than 5 MB'}
+ try:
+ self.handle_file_upload(file)
+ self.upload_state = True
+ return {"role": "assistant", "content": 'You have successfully uploaded the text'}
+ except Exception as e:
+ logging.error(f'Error during file upload: {str(e)}')
+ return {'error': str(e)}, 500
+ def chat(self,request):
+ try:
+ data = request.get_json()
+ if 'prompt' in data:
+ prompt = data['prompt']['content']
+ role = "assistant"
+ retrieved_docs = self.retriever.invoke(prompt)
+ formatted_context = "\n\n".join(doc.page_content for doc in retrieved_docs)
+ formatted_prompt = f"Question: {prompt}\n\nContext: {formatted_context}"
+ answer=self.llm(formatted_prompt)
+ response = answer if self.upload_state else "please upload a file first"
+ return {"role": role, "content": response}
+ else:
+ return {"error": "Missing required parameters"}, 400
+ except Exception as e:
+ logging.error(f'Error in chat endpoint: {str(e)}')
+ return {"Error": str(e)}, 500
diff --git a/submodules/moragents_dockers/agents/src/rag_agent/src/config.py b/submodules/moragents_dockers/agents/src/rag_agent/src/config.py
index ad1952d..63983c2 100644
--- a/submodules/moragents_dockers/agents/src/rag_agent/src/config.py
+++ b/submodules/moragents_dockers/agents/src/rag_agent/src/config.py
@@ -6,5 +6,4 @@
# Configuration object
class Config:
MAX_FILE_SIZE=5 * 1024 * 1024 # 5 MB
- MAX_LENGTH=16 * 1024 * 1024
- URL="http://host.docker.internal:11434"
\ No newline at end of file
+ MAX_LENGTH=16 * 1024 * 1024
\ No newline at end of file
diff --git a/submodules/moragents_dockers/agents/src/reward_agent/src/agent.py b/submodules/moragents_dockers/agents/src/reward_agent/src/agent.py
new file mode 100644
index 0000000..44555da
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/reward_agent/src/agent.py
@@ -0,0 +1,49 @@
+import json
+import logging
+from reward_agent.src import tools
+
+logger = logging.getLogger(__name__)
+
+
+class RewardAgent:
+ def __init__(self, agent_info, llm, llm_ollama, embeddings, flask_app):
+ self.agent_info = agent_info
+ self.llm = llm
+ self.llm_ollama = llm_ollama
+ self.embeddings = embeddings
+ self.flask_app = flask_app
+ self.tools_provided = tools.get_tools()
+
+ def get_response(self, message, wallet_address):
+ logger.info(f"Checking rewards for wallet address: {wallet_address}")
+
+ try:
+ rewards = {
+ 0: tools.get_current_user_reward(wallet_address, 0),
+ 1: tools.get_current_user_reward(wallet_address, 1)
+ }
+
+ response = f"Your current MOR rewards:\n"
+ response += f"Capital Providers Pool (Pool 0): {rewards[0]} MOR\n"
+ response += f"Code Providers Pool (Pool 1): {rewards[1]} MOR"
+
+ logger.info(f"Rewards retrieved successfully for {wallet_address}")
+ return response, "assistant", None
+ except Exception as e:
+ logger.error(f"Error occurred while checking rewards: {str(e)}")
+ return f"An error occurred while checking your rewards: {str(e)}", "assistant", None
+
+ def chat(self, request):
+ try:
+ data = request.get_json()
+ if 'prompt' in data and 'wallet_address' in data:
+ prompt = data['prompt']
+ wallet_address = data['wallet_address']
+ response, role, next_turn_agent = self.get_response(prompt, wallet_address)
+ return {"role": role, "content": response, "next_turn_agent": next_turn_agent}
+ else:
+ logger.warning("Missing required parameters in request")
+ return {"error": "Missing required parameters"}, 400
+ except Exception as e:
+ logger.error(f"Error in chat method: {str(e)}")
+ return {"Error": str(e)}, 500
diff --git a/submodules/moragents_dockers/agents/src/reward_agent/src/config.py b/submodules/moragents_dockers/agents/src/reward_agent/src/config.py
new file mode 100644
index 0000000..ff7c6d6
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/reward_agent/src/config.py
@@ -0,0 +1,39 @@
+import logging
+
+# Logging configuration
+logging.basicConfig(level=logging.INFO)
+
+# Configuration object
+class Config:
+
+ WEB3RPCURL = {
+ "1": "https://eth.llamarpc.com/",
+ }
+
+ DISTRIBUTION_PROXY_ADDRESS = "0x47176B2Af9885dC6C4575d4eFd63895f7Aaa4790"
+ DISTRIBUTION_ABI = [
+ {
+ "inputs": [
+ {
+ "internalType": "uint256",
+ "name": "poolId_",
+ "type": "uint256"
+ },
+ {
+ "internalType": "address",
+ "name": "user_",
+ "type": "address"
+ }
+ ],
+ "name": "getCurrentUserReward",
+ "outputs": [
+ {
+ "internalType": "uint256",
+ "name": "",
+ "type": "uint256"
+ }
+ ],
+ "stateMutability": "view",
+ "type": "function"
+ }
+ ]
diff --git a/submodules/moragents_dockers/agents/src/reward_agent/src/tools.py b/submodules/moragents_dockers/agents/src/reward_agent/src/tools.py
new file mode 100644
index 0000000..cbb749f
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/reward_agent/src/tools.py
@@ -0,0 +1,47 @@
+from web3 import Web3
+from reward_agent.src.config import Config
+
+def get_current_user_reward(wallet_address, pool_id):
+ web3 = Web3(Web3.HTTPProvider(Config.WEB3RPCURL["1"]))
+ distribution_contract = web3.eth.contract(
+ address=web3.to_checksum_address(Config.DISTRIBUTION_PROXY_ADDRESS),
+ abi=Config.DISTRIBUTION_ABI
+ )
+
+ try:
+ if not web3.is_connected():
+ raise Exception("Unable to connect to Ethereum network")
+
+ reward = distribution_contract.functions.getCurrentUserReward(
+ pool_id,
+ web3.to_checksum_address(wallet_address)
+ ).call()
+ formatted_reward = web3.from_wei(reward, 'ether')
+ return round(formatted_reward, 4)
+ except Exception as e:
+ raise Exception(f"Error occurred while fetching the reward: {str(e)}")
+
+def get_tools():
+ return [
+ {
+ "type": "function",
+ "function": {
+ "name": "get_current_user_reward",
+ "description": "Fetch the token amount of currently accrued MOR rewards for a user address from a specific pool",
+ "parameters": {
+ "type": "object",
+ "properties": {
+ "wallet_address": {
+ "type": "string",
+ "description": "The wallet address to check rewards for"
+ },
+ "pool_id": {
+ "type": "integer",
+ "description": "The ID of the pool to check rewards from"
+ }
+ },
+ "required": ["wallet_address", "pool_id"]
+ }
+ }
+ }
+ ]
\ No newline at end of file
diff --git a/submodules/moragents_dockers/agents/src/swap_agent/src/agent.py b/submodules/moragents_dockers/agents/src/swap_agent/src/agent.py
index 90f861a..73f2694 100644
--- a/submodules/moragents_dockers/agents/src/swap_agent/src/agent.py
+++ b/submodules/moragents_dockers/agents/src/swap_agent/src/agent.py
@@ -1,207 +1,190 @@
-from flask import Flask, request, jsonify
-from llama_cpp import Llama
-from llama_cpp.llama_tokenizer import LlamaHFTokenizer
+import json
import requests
+from flask import jsonify
from swap_agent.src import tools
-from swap_agent.src.tools import InsufficientFundsError, TokenNotFoundError, SwapNotPossibleError
-from config import Config
-import json
-
-
-tools_provided = tools.get_tools()
-messages = [{'role':"assistant","content":"This highly experimental chatbot is not intended for making important decisions, and its responses are generated based on incomplete data and algorithms that may evolve rapidly. By using this chatbot, you acknowledge that you use it at your own discretion and assume all risks associated with its limitations and potential errors."}]
-context = []
-
-def api_request_url(method_name, query_params, chain_id):
- base_url = Config.APIBASEURL + str(chain_id)
- return f"{base_url}{method_name}?{'&'.join([f'{key}={value}' for key, value in query_params.items()])}"
-
-def check_allowance(token_address, wallet_address, chain_id):
- url = api_request_url("/approve/allowance", {"tokenAddress": token_address, "walletAddress": wallet_address}, chain_id)
- response = requests.get(url, headers=Config.HEADERS)
- data = response.json()
- return data
-
-def approve_transaction(token_address, chain_id, amount=None):
- query_params = {"tokenAddress": token_address, "amount": amount} if amount else {"tokenAddress": token_address}
- url = api_request_url("/approve/transaction", query_params, chain_id)
- response = requests.get(url, headers=Config.HEADERS)
- transaction = response.json()
- return transaction
-
-def build_tx_for_swap(swap_params, chain_id):
- url = api_request_url("/swap", swap_params, chain_id)
- swap_transaction = requests.get(url, headers=Config.HEADERS).json()
- return swap_transaction
-
-def get_response(message, chain_id, wallet_address,llm):
- global tools_provided , messages, context
- prompt = [{"role": "system", "content": "Don't make assumptions about the value of the arguments for the function they should always be supplied by the user and do not alter the value of the arguments. Don't make assumptions about what values to plug into functions. Ask for clarification if a user request is ambiguous. you only need the value of token1 we dont need the value of token2. After starting from scratch do not assume the name of token1 or token2"}]
- prompt.extend(message)
- result = llm.create_chat_completion(
- messages=prompt,
- tools=tools_provided,
- tool_choice="auto",
- temperature=0.01
- )
- if "tool_calls" in result["choices"][0]["message"].keys():
- func = result["choices"][0]["message"]["tool_calls"][0]['function']
- if func["name"] == "swap_agent":
- args = json.loads(func["arguments"])
- tok1 = args["token1"]
- tok2 = args["token2"]
- value = args["value"]
- try:
- res, role = tools.swap_coins(tok1, tok2, float(value), chain_id, wallet_address)
- messages.append({"role": role, "content": res})
- except (tools.InsufficientFundsError, tools.TokenNotFoundError,tools.SwapNotPossibleError) as e:
- context = []
- messages.append({"role": "assistant", "content": str(e)})
- return str(e), "assistant"
- return res, role
- messages.append({"role": "assistant", "content": result["choices"][0]["message"]['content']})
- context.append({"role": "assistant", "content": result["choices"][0]["message"]['content']})
- return result["choices"][0]["message"]['content'], "assistant"
-
-def get_status(flag, tx_hash, tx_type):
- global messages, context
- response = ''
-
- if flag == "cancelled":
- response = f"The {tx_type} transaction has been cancelled."
- elif flag == "success":
- response = f"The {tx_type} transaction was successful."
- elif flag == "failed":
- response = f"The {tx_type} transaction has failed."
- elif flag == "initiated":
- response = f"Transaction has been sent, please wait for it to be confirmed."
-
- if tx_hash:
- response = response + f" The transaction hash is {tx_hash}."
-
- if flag == "success" and tx_type == "approve":
- response = response + " Please proceed with the swap transaction."
- elif flag != "initiated":
- response = response + " Is there anything else I can help you with?"
-
- if flag != "initiated":
- context = []
- messages.append({"role": "assistant", "content": response})
- context.append({"role": "assistant", "content": response})
- context.append({"role": "user", "content": "okay lets start again from scratch"})
- else:
- messages.append({"role": "assistant", "content": response})
-
- return response
-
-def generate_response(prompt,chainid,walletAddress,llm):
- global messages,context
- messages.append(prompt)
- context.append(prompt)
- response,role = get_response(context,chainid,walletAddress,llm)
- return response,role
-
-
-def chat(request,llm):
- try:
- data = request.get_json()
- if 'prompt' in data:
- prompt = data['prompt']
- wallet_address = data['wallet_address']
- chain_id = data['chain_id']
- response, role = generate_response(prompt, chain_id, wallet_address,llm)
- return jsonify({"role": role, "content": response})
- else:
- return jsonify({"error": "Missing required parameters"}), 400
-
- except Exception as e:
- return jsonify({"Error": str(e)}), 500
-
-
-def tx_status(request):
- try:
- data = request.get_json()
- if 'status' in data:
- prompt = data['status']
- tx_hash = data.get('tx_hash', '')
- tx_type = data.get('tx_type', '')
- response = get_status(prompt, tx_hash, tx_type)
- return jsonify({"role": "assistant", "content": response})
- else:
- return jsonify({"error": "Missing required parameters"}), 400
-
- except Exception as e:
- return jsonify({"Error": str(e)}), 500
-
-def get_messages():
- global messages
- try:
- return jsonify({"messages": messages})
- except Exception as e:
- return jsonify({"Error": str(e)}), 500
-
-def clear_messages():
- global messages, context
- try:
- messages = [{'role':"assistant","content":"This highly experimental chatbot is not intended for making important decisions, and its responses are generated based on incomplete data and algorithms that may evolve rapidly. By using this chatbot, you acknowledge that you use it at your own discretion and assume all risks associated with its limitations and potential errors."}]
- context = []
- return jsonify({"response": "successfully cleared message history"})
- except Exception as e:
- return jsonify({"Error": str(e)}), 500
-
-def get_allowance(request):
- try:
- data = request.get_json()
- if 'tokenAddress' in data:
- token = data['tokenAddress']
- wallet_address = data['walletAddress']
- chain_id = data["chain_id"]
- res = check_allowance(token, wallet_address, chain_id)
- return jsonify({"response": res})
- else:
- return jsonify({"error": "Missing required parameters"}), 400
-
- except Exception as e:
- return jsonify({"Error": str(e)}), 500
-
-def approve(request):
- try:
- data = request.get_json()
- if 'tokenAddress' in data:
- token = data['tokenAddress']
- chain_id = data['chain_id']
- amount = data['amount']
- res = approve_transaction(token, chain_id, amount)
- return jsonify({"response": res})
- else:
- return jsonify({"error": "Missing required parameters"}), 400
-
- except Exception as e:
- return jsonify({"Error": str(e)}), 500
-
-def swap(request):
- try:
- data = request.get_json()
- if 'src' in data:
- token1 = data['src']
- token2 = data['dst']
- wallet_address = data['walletAddress']
- amount = data['amount']
- slippage = data['slippage']
- chain_id = data['chain_id']
- swap_params = {
- "src": token1,
- "dst": token2,
- "amount": amount,
- "from": wallet_address,
- "slippage": slippage,
- "disableEstimate": False,
- "allowPartialFill": False,
+from swap_agent.src.config import Config
+
+
+class SwapAgent:
+ def __init__(self, config, llm, llm_ollama, embeddings, flask_app):
+ self.llm = llm
+ self.flask_app = flask_app
+ self.config = config
+ self.tools_provided = tools.get_tools()
+ self.context = []
+
+ def api_request_url(self, method_name, query_params, chain_id):
+ base_url = Config.APIBASEURL + str(chain_id)
+ return f"{base_url}{method_name}?{'&'.join([f'{key}={value}' for key, value in query_params.items()])}"
+
+ def check_allowance(self, token_address, wallet_address, chain_id):
+ url = self.api_request_url(
+ "/approve/allowance",
+ {"tokenAddress": token_address, "walletAddress": wallet_address},
+ chain_id
+ )
+ response = requests.get(url, headers=Config.HEADERS)
+ data = response.json()
+ return data
+
+ def approve_transaction(self, token_address, chain_id, amount=None):
+ query_params = {"tokenAddress": token_address, "amount": amount} if amount else {"tokenAddress": token_address}
+ url = self.api_request_url("/approve/transaction", query_params, chain_id)
+ response = requests.get(url, headers=Config.HEADERS)
+ transaction = response.json()
+ return transaction
+
+ def build_tx_for_swap(self, swap_params, chain_id):
+ url = self.api_request_url("/swap", swap_params, chain_id)
+ swap_transaction = requests.get(url, headers=Config.HEADERS).json()
+ return swap_transaction
+
+ def get_response(self, message, chain_id, wallet_address):
+ prompt = [
+ {
+ "role": "system",
+ "content": (
+ "Don't make assumptions about the value of the arguments for the function "
+ "they should always be supplied by the user and do not alter the value of the arguments. "
+ "Don't make assumptions about what values to plug into functions. Ask for clarification if a user "
+ "request is ambiguous. you only need the value of token1 we dont need the value of token2. After "
+ "starting from scratch do not assume the name of token1 or token2"
+ )
}
- swap_transaction = build_tx_for_swap(swap_params, chain_id)
- return swap_transaction
- else:
- return jsonify({"error": "Missing required parameters"}), 400
-
- except Exception as e:
- return jsonify({"Error": str(e)}), 500
\ No newline at end of file
+ ]
+ prompt.extend(message)
+ result = self.llm.create_chat_completion(
+ messages=prompt,
+ tools=self.tools_provided,
+ tool_choice="auto",
+ temperature=0.01
+ )
+ if "tool_calls" in result["choices"][0]["message"].keys():
+ func = result["choices"][0]["message"]["tool_calls"][0]['function']
+ if func["name"] == "swap_agent":
+ args = json.loads(func["arguments"])
+ tok1 = args["token1"]
+ tok2 = args["token2"]
+ value = args["value"]
+ try:
+ res, role = tools.swap_coins(tok1, tok2, float(value), chain_id, wallet_address)
+ except (tools.InsufficientFundsError, tools.TokenNotFoundError, tools.SwapNotPossibleError) as e:
+ self.context = []
+ return str(e), "assistant", None
+ return res, role, None
+ self.context.append({"role": "assistant", "content": result["choices"][0]["message"]['content']})
+ return result["choices"][0]["message"]['content'], "assistant", "crypto swap agent"
+
+ def get_status(self, flag, tx_hash, tx_type):
+ response = ''
+
+ if flag == "cancelled":
+ response = f"The {tx_type} transaction has been cancelled."
+ elif flag == "success":
+ response = f"The {tx_type} transaction was successful."
+ elif flag == "failed":
+ response = f"The {tx_type} transaction has failed."
+ elif flag == "initiated":
+ response = f"Transaction has been sent, please wait for it to be confirmed."
+
+ if tx_hash:
+ response = response + f" The transaction hash is {tx_hash}."
+
+ if flag == "success" and tx_type == "approve":
+ response = response + " Please proceed with the swap transaction."
+ elif flag != "initiated":
+ response = response + " Is there anything else I can help you with?"
+
+ if flag != "initiated":
+ self.context = []
+ self.context.append({"role": "assistant", "content": response})
+ self.context.append({"role": "user", "content": "okay lets start again from scratch"})
+
+ return {"role": "assistant", "content": response}
+
+ def generate_response(self, prompt, chain_id, wallet_address):
+ self.context.append(prompt)
+ response, role, next_turn_agent = self.get_response(self.context, chain_id, wallet_address)
+ return response, role, next_turn_agent
+
+ def chat(self, request):
+ try:
+ data = request.get_json()
+ if 'prompt' in data:
+ prompt = data['prompt']
+ wallet_address = data['wallet_address']
+ chain_id = data['chain_id']
+ response, role, next_turn_agent = self.generate_response(prompt, chain_id, wallet_address)
+ return {"role": role, "content": response, "next_turn_agent": next_turn_agent}
+ else:
+ return {"error": "Missing required parameters"}, 400
+ except Exception as e:
+ return {"Error": str(e)}, 500
+
+ def tx_status(self, request):
+ try:
+ data = request.get_json()
+ if 'status' in data:
+ prompt = data['status']
+ tx_hash = data.get('tx_hash', '')
+ tx_type = data.get('tx_type', '')
+ response = self.get_status(prompt, tx_hash, tx_type)
+ return response
+ else:
+ return {"error": "Missing required parameters"}, 400
+ except Exception as e:
+ return {"Error": str(e)}, 500
+
+ def get_allowance(self, request):
+ try:
+ data = request.get_json()
+ if 'tokenAddress' in data:
+ token = data['tokenAddress']
+ wallet_address = data['walletAddress']
+ chain_id = data["chain_id"]
+ res = self.check_allowance(token, wallet_address, chain_id)
+ return jsonify({"response": res})
+ else:
+ return jsonify({"error": "Missing required parameters"}), 400
+ except Exception as e:
+ return jsonify({"Error": str(e)}), 500
+
+ def approve(self, request):
+ try:
+ data = request.get_json()
+ if 'tokenAddress' in data:
+ token = data['tokenAddress']
+ chain_id = data['chain_id']
+ amount = data['amount']
+ res = self.approve_transaction(token, chain_id, amount)
+ return jsonify({"response": res})
+ else:
+ return jsonify({"error": "Missing required parameters"}), 400
+ except Exception as e:
+ return jsonify({"Error": str(e)}), 500
+
+ def swap(self, request):
+ try:
+ data = request.get_json()
+ if 'src' in data:
+ token1 = data['src']
+ token2 = data['dst']
+ wallet_address = data['walletAddress']
+ amount = data['amount']
+ slippage = data['slippage']
+ chain_id = data['chain_id']
+ swap_params = {
+ "src": token1,
+ "dst": token2,
+ "amount": amount,
+ "from": wallet_address,
+ "slippage": slippage,
+ "disableEstimate": False,
+ "allowPartialFill": False,
+ }
+ swap_transaction = self.build_tx_for_swap(swap_params, chain_id)
+ return swap_transaction
+ else:
+ return jsonify({"error": "Missing required parameters"}), 400
+ except Exception as e:
+ return jsonify({"Error": str(e)}), 500
diff --git a/submodules/moragents_dockers/agents/src/swap_agent/src/config.py b/submodules/moragents_dockers/agents/src/swap_agent/src/config.py
index d54cea4..2e57c81 100644
--- a/submodules/moragents_dockers/agents/src/swap_agent/src/config.py
+++ b/submodules/moragents_dockers/agents/src/swap_agent/src/config.py
@@ -22,4 +22,5 @@ class Config:
{"constant": True, "inputs": [{"name": "_owner", "type": "address"}], "name": "balanceOf", "outputs": [{"name": "balance", "type": "uint256"}], "payable": False, "stateMutability": "view", "type": "function"}
]
INCH_NATIVE_TOKEN_ADDRESS = "0xEeeeeEeeeEeEeeEeEeEeeEEEeeeeEeeeeeeeEEeE"
+
\ No newline at end of file
diff --git a/submodules/moragents_dockers/agents/src/swap_agent/src/tools.py b/submodules/moragents_dockers/agents/src/swap_agent/src/tools.py
index 516f026..1800dfe 100644
--- a/submodules/moragents_dockers/agents/src/swap_agent/src/tools.py
+++ b/submodules/moragents_dockers/agents/src/swap_agent/src/tools.py
@@ -1,8 +1,8 @@
import requests
import logging
import time
-from config import Config
from web3 import Web3
+from swap_agent.src.config import Config
class InsufficientFundsError(Exception):
diff --git a/submodules/moragents_dockers/agents/src/tweet_sizzler_agent/src/agent.py b/submodules/moragents_dockers/agents/src/tweet_sizzler_agent/src/agent.py
new file mode 100644
index 0000000..08dbf32
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/tweet_sizzler_agent/src/agent.py
@@ -0,0 +1,143 @@
+import logging
+import tweepy
+from .config import Config
+
+# Configure logging
+logging.basicConfig(
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+)
+logger = logging.getLogger(__name__)
+
+
+class TweetSizzlerAgent:
+ def __init__(self, config, llm, llm_ollama, embeddings, flask_app):
+ self.llm = llm
+ self.flask_app = flask_app
+ self.config = config
+ self.x_api_key = None
+ self.last_prompt_content = None
+ self.twitter_client = None
+
+ def generate_tweet(self, prompt_content=None):
+ # State management for tweet regeneration purposes
+ if prompt_content is not None:
+ self.last_prompt_content = prompt_content
+ elif self.last_prompt_content is None:
+ logger.warning("No prompt content available for tweet generation")
+ return "Tweet generation failed. Please provide a prompt."
+ else:
+ prompt_content = self.last_prompt_content
+
+ logger.info(f"Generating tweet for prompt_content: {prompt_content}")
+ messages = [
+ {
+ "role": "system",
+ "content": Config.TWEET_GENERATION_PROMPT,
+ },
+ {"role": "user", "content": f"Generate a tweet for: {prompt_content}"},
+ ]
+
+ try:
+ result = self.llm.create_chat_completion(
+ messages=messages,
+ max_tokens=Config.LLM_MAX_TOKENS,
+ temperature=Config.LLM_TEMPERATURE,
+ )
+ tweet = result["choices"][0]["message"]["content"]
+ logger.info(f"Tweet generated successfully: {tweet}")
+ return tweet
+ except Exception as e:
+ logger.error(f"Error generating tweet: {str(e)}")
+ raise
+
+ def post_tweet(self, request):
+ data = request.get_json()
+ tweet_content = data.get("post_content")
+ logger.info(f"Received tweet content: {tweet_content}")
+
+ if not tweet_content:
+ logger.warning("Attempted to post tweet without providing content")
+ return {"error": Config.ERROR_NO_TWEET_CONTENT}, 400
+
+ required_keys = [
+ "api_key",
+ "api_secret",
+ "access_token",
+ "access_token_secret",
+ "bearer_token",
+ ]
+ if not all(key in data for key in required_keys):
+ logger.warning("Missing required API credentials")
+ return {"error": Config.ERROR_MISSING_API_CREDENTIALS}, 400
+
+ try:
+ client = tweepy.Client(
+ consumer_key=data["api_key"],
+ consumer_secret=data["api_secret"],
+ access_token=data["access_token"],
+ access_token_secret=data["access_token_secret"],
+ bearer_token=data["bearer_token"],
+ )
+
+ # Post tweet
+ response = client.create_tweet(text=tweet_content)
+ logger.info(f"Tweet posted successfully: {response}")
+ return {
+ "success": "Tweet posted successfully",
+ "tweet": response.data["text"],
+ "tweet_id": response.data["id"],
+ }, 200
+ except Exception as e:
+ logger.error(f"Error posting tweet: {str(e)}")
+ return {"error": f"Failed to post tweet: {str(e)}"}, 500
+
+ def set_x_api_key(self, request):
+ data = request.get_json()
+ required_keys = [
+ "api_key",
+ "api_secret",
+ "access_token",
+ "access_token_secret",
+ "bearer_token",
+ ]
+
+ if not all(key in data for key in required_keys):
+ logger.warning("Missing required API credentials")
+ return {"error": Config.ERROR_MISSING_API_CREDENTIALS}, 400
+
+ # Save these credentials to local storage
+ for key in required_keys:
+ self.flask_app.config[key] = data[key]
+
+ return {"success": "API credentials saved successfully"}, 200
+
+ def chat(self, request):
+ try:
+ data = request.get_json()
+ logger.info(f"Received chat request: {data}")
+ if "prompt" in data:
+ prompt = data["prompt"]
+ action = data.get("action", Config.DEFAULT_ACTION)
+ logger.debug(f"Extracted prompt: {prompt}, action: {action}")
+
+ if action == "generate":
+ logger.info(f"Generating tweet for prompt: {prompt['content']}")
+ tweet = self.generate_tweet(prompt["content"])
+ logger.info(f"Generated tweet: {tweet}")
+ return {"role": "assistant", "content": tweet}
+ elif action == "post":
+ logger.info("Attempting to post tweet")
+ result, status_code = self.post_tweet(request)
+ logger.info(
+ f"Posted tweet result: {result}, status code: {status_code}"
+ )
+ return result, status_code
+ else:
+ logger.error(f"Invalid action received: {action}")
+ return {"error": Config.ERROR_INVALID_ACTION}, 400
+ else:
+ logger.error("Missing 'prompt' in chat request data")
+ return {"error": Config.ERROR_MISSING_PARAMETERS}, 400
+ except Exception as e:
+ logger.exception(f"Unexpected error in chat method: {str(e)}")
+ return {"Error": str(e)}, 500
diff --git a/submodules/moragents_dockers/agents/src/tweet_sizzler_agent/src/config.py b/submodules/moragents_dockers/agents/src/tweet_sizzler_agent/src/config.py
new file mode 100644
index 0000000..b7380b7
--- /dev/null
+++ b/submodules/moragents_dockers/agents/src/tweet_sizzler_agent/src/config.py
@@ -0,0 +1,37 @@
+import logging
+
+# Logging configuration
+logging.basicConfig(
+ level=logging.INFO, format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+)
+
+
+# Configuration object
+class Config:
+
+ # Twitter API configuration
+ TWITTER_API_VERSION = "2"
+ TWEET_MAX_LENGTH = 280
+
+ LLM_MAX_TOKENS = 280
+ LLM_TEMPERATURE = 0.7
+
+ TWEET_GENERATION_PROMPT = (
+ "You are a witty and engaging tweet generator. Your task is to create spicy, "
+ "attention-grabbing tweets based on the user's prompt. It is CRUCIAL that you "
+ "keep the tweets strictly under 280 characters - this is a hard limit. Make the "
+ "tweets as engaging as possible while adhering to this character limit. Do not "
+ "surround your tweet with quotes. Do not preface it with any text like 'here is "
+ "your tweet'. Simply generate and output the tweet, ensuring it is less than "
+ "280 characters long."
+ )
+
+ DEFAULT_ACTION = "generate"
+
+ ERROR_NO_TWEET_CONTENT = "No tweet content provided"
+ ERROR_TWITTER_CLIENT_NOT_INITIALIZED = (
+ "Twitter client not initialized. Please set X API credentials first."
+ )
+ ERROR_MISSING_API_CREDENTIALS = "Missing required X API credentials"
+ ERROR_INVALID_ACTION = "Invalid action"
+ ERROR_MISSING_PARAMETERS = "Missing required parameters"
diff --git a/submodules/moragents_dockers/docker-compose-apple.yml b/submodules/moragents_dockers/docker-compose-apple.yml
index 8cc8dbc..9cf6833 100644
--- a/submodules/moragents_dockers/docker-compose-apple.yml
+++ b/submodules/moragents_dockers/docker-compose-apple.yml
@@ -1,8 +1,8 @@
-version: '3.8'
+version: "3.8"
services:
agents:
- image: lachsbagel/moragents_dockers-agents:apple-0.0.9
+ image: lachsbagel/moragents_dockers-agents:apple-0.1.0
build:
dockerfile: Dockerfile-apple
context: ./agents
@@ -18,14 +18,13 @@ services:
- BASE_URL=http://host.docker.internal:11434
nginx:
- image: lachsbagel/moragents_dockers-nginx:apple-0.0.9
+ image: lachsbagel/moragents_dockers-nginx:apple-0.1.0
build:
context: ./frontend
dockerfile: Dockerfile
target: nginx
ports:
- - '3333:80'
-
+ - "3333:80"
volumes:
agents_data:
diff --git a/submodules/moragents_dockers/docker-compose.yml b/submodules/moragents_dockers/docker-compose.yml
index 0d875eb..135b524 100644
--- a/submodules/moragents_dockers/docker-compose.yml
+++ b/submodules/moragents_dockers/docker-compose.yml
@@ -1,8 +1,8 @@
-version: '3.8'
+version: "3.8"
services:
agents:
- image: lachsbagel/moragents_dockers-agents:amd64-0.0.9
+ image: lachsbagel/moragents_dockers-agents:amd64-0.1.0
build:
dockerfile: Dockerfile
context: ./agents
@@ -18,15 +18,13 @@ services:
- BASE_URL=http://host.docker.internal:11434
nginx:
- image: lachsbagel/moragents_dockers-nginx:amd64-0.0.9
+ image: lachsbagel/moragents_dockers-nginx:amd64-0.1.0
build:
context: ./frontend
dockerfile: Dockerfile
target: nginx
ports:
- - '3333:80'
-
-
+ - "3333:80"
volumes:
agents_data:
diff --git a/submodules/moragents_dockers/frontend/components/Chat/index.tsx b/submodules/moragents_dockers/frontend/components/Chat/index.tsx
index c79a4ba..d450856 100644
--- a/submodules/moragents_dockers/frontend/components/Chat/index.tsx
+++ b/submodules/moragents_dockers/frontend/components/Chat/index.tsx
@@ -1,473 +1,174 @@
-import React, { FC, useCallback, useEffect, useMemo, useState } from "react";
-import { Box, Flex, Input, Button, Text, HStack, InputGroup, InputRightAddon, IconButton, Icon, Grid, GridItem, InputLeftAddon } from "@chakra-ui/react";
-import { Avatar } from "../Avatar";
-import { ChatMessage, SwapTxPayloadType, ApproveTxPayloadType, SwapMessagePayload, sendSwapStatus, getHttpClient, UserOrAssistantMessage, SWAP_STATUS } from "../../services/backendClient";
-import { SwapForm } from "../SwapForm";
-import { useAccount, useCall, useChainId, useSendTransaction, useTransactionConfirmations } from "wagmi";
-import { availableAgents } from "../../config";
-import { SendIcon } from "../CustomIcon/SendIcon";
-import { Loader } from "../Loader";
-import { on } from "events";
-import { AttachmentIcon } from "@chakra-ui/icons";
+import React, { FC, useCallback, useEffect, useState } from "react";
+import { Box } from "@chakra-ui/react";
+import {
+ ChatMessage,
+ sendSwapStatus,
+ getHttpClient,
+ SWAP_STATUS,
+} from "../../services/backendClient";
+import {
+ useAccount,
+ useChainId,
+ useSendTransaction,
+ useTransactionConfirmations,
+} from "wagmi";
+import { MessageList } from "../MessageList";
+import { ChatInput } from "../ChatInput";
+import { LoadingIndicator } from "../LoadingIndicator";
+import {
+ UserOrAssistantMessage,
+ SwapMessage,
+} from "../../services/backendClient";
export type ChatProps = {
- onSubmitMessage: (message: string, file: File | null) => Promise;
- onCancelSwap: (fromAction: number) => void;
- messages: ChatMessage[];
- selectedAgent: string;
- onBackendError: () => void;
+ onSubmitMessage: (message: string, file: File | null) => Promise;
+ onCancelSwap: (fromAction: number) => void;
+ messages: ChatMessage[];
+ selectedAgent: string;
+ onBackendError: () => void;
};
export const Chat: FC = ({
- onSubmitMessage,
- onCancelSwap,
- messages,
- selectedAgent,
- onBackendError,
+ onSubmitMessage,
+ onCancelSwap,
+ messages,
+ selectedAgent,
+ onBackendError,
}) => {
- const [message, setMessage] = useState('');
- const [messagesData, setMessagesData] = useState(messages);
- const [countSwapsMessages, setCountSwapsMessages] = useState(0);
-
-
- const [file, setFile] = useState(null);
-
- const { address } = useAccount();
- const chainId = useChainId();
-
-
- useEffect(() => {
- setMessagesData([...messages]);
-
- const swapsMessages = messages.filter((message) => message.role === 'swap');
-
- setCountSwapsMessages(swapsMessages.length);
- }, [messages]);
-
- const [txHash, setTxHash] = useState(``);
- const [approveTxHash, setApproveTxHash] = useState(``);
- const [callbackSent, setCallbackSent] = useState(false);
- const [showSpinner, setShowSpinner] = useState(false);
-
- const confirmatons = useTransactionConfirmations({
- hash: (txHash || '0x') as `0x${string}`,
- });
-
- const approveConfirmations = useTransactionConfirmations({
- hash: (approveTxHash || '0x') as `0x${string}`,
- });
-
- const agentSupportsFileUploads = useMemo(() => {
- return availableAgents[selectedAgent]?.supportsFiles || false;
- }, [selectedAgent]);
-
- useEffect(() => {
- if (null === file) return;
-
-
- setMessage(`File selected: ${file.name}, click send button to upload it.`);
- }, [file]);
-
- useEffect(() => {
- if (approveTxHash === '') {
- return;
+ const [messagesData, setMessagesData] = useState(messages);
+ const [showSpinner, setShowSpinner] = useState(false);
+ const [txHash, setTxHash] = useState("");
+ const [approveTxHash, setApproveTxHash] = useState("");
+
+ const { address } = useAccount();
+ const chainId = useChainId();
+ const { sendTransaction } = useSendTransaction();
+
+ const confirmatons = useTransactionConfirmations({
+ hash: (txHash || "0x") as `0x${string}`,
+ });
+
+ const approveConfirmations = useTransactionConfirmations({
+ hash: (approveTxHash || "0x") as `0x${string}`,
+ });
+
+ useEffect(() => {
+ setMessagesData([...messages]);
+ }, [messages]);
+
+ const handleSwapStatus = useCallback(
+ async (status: string, hash: string, isApprove: number) => {
+ try {
+ const response: ChatMessage = await sendSwapStatus(
+ getHttpClient(selectedAgent),
+ chainId,
+ address?.toLowerCase() || "0x",
+ status,
+ hash,
+ isApprove
+ );
+
+ if (
+ response.role === "assistant" &&
+ typeof response.content === "string"
+ ) {
+ setMessagesData((prev) => [
+ ...prev,
+ {
+ role: "assistant",
+ content: response.content,
+ } as UserOrAssistantMessage,
+ ]);
+ } else if (response.role === "swap") {
+ setMessagesData((prev) => [...prev, response as SwapMessage]);
}
- if (approveTxHash !== '' && approveConfirmations.data && approveConfirmations.data >= 1) {
- sendSwapStatus(
- getHttpClient(selectedAgent),
- chainId,
- address?.toLowerCase() || '0x',
- SWAP_STATUS.SUCCESS,
- approveTxHash,
- 1
- ).then((response: ChatMessage) => {
- setMessagesData([...messagesData, {
- role: 'assistant',
- content: response.content,
- } as UserOrAssistantMessage]);
-
- setApproveTxHash('');
- }).catch((error) => {
- setApproveTxHash('');
- console.log(`Error sending approve status: ${error}`);
-
- onBackendError();
- });
- }
-
- }, [approveTxHash, approveConfirmations, selectedAgent, chainId, address, messagesData, onBackendError]);
-
- useEffect(() => {
- if (!callbackSent && confirmatons.data && confirmatons.data >= 1) {
- setCallbackSent(true);
- setShowSpinner(true);
- sendSwapStatus(
- getHttpClient(selectedAgent),
- chainId,
- address?.toLowerCase() || '0x',
- SWAP_STATUS.SUCCESS,
- txHash,
- 0
- ).then((response: ChatMessage) => {
- setMessagesData([...messagesData, {
- role: 'assistant',
- content: response.content,
- } as UserOrAssistantMessage]);
-
- setTxHash('');
- setCallbackSent(false);
- setShowSpinner(false);
- }).catch((error) => {
- console.log(`Error sending swap status: ${error}`);
- setTxHash('');
- setCallbackSent(false);
- setShowSpinner(false);
- onBackendError();
- });
+ if (isApprove) {
+ setApproveTxHash("");
+ } else {
+ setTxHash("");
}
- }, [confirmatons, callbackSent, chainId, selectedAgent, address, messagesData, onBackendError]);
-
- const { sendTransaction } = useSendTransaction();
-
- const isMostRecentSwapMessage = useCallback((message: ChatMessage) => {
- const swapsMessages = messagesData.filter((message) => message.role === 'swap');
- // const msgIndex = messagesData.findIndex((msg) => msg.content === message.content);
-
- // if (msgIndex !== messagesData.length - 1) {
- // return false;
- // }
-
- if (message.role === 'swap') {
- const isLastMessage = messagesData[messagesData.length - 1]?.content === message.content;
-
- if (!isLastMessage) {
- const anotherSwapMessagesExists = swapsMessages.length > 1;
-
- if (!anotherSwapMessagesExists) {
- return true;
- }
- }
- }
-
- return swapsMessages[swapsMessages.length - 1] === message;
- }, [messagesData]);
-
- const handleSubmit = async () => {
- if (!message) {
- return;
+ setShowSpinner(false);
+ } catch (error) {
+ console.log(
+ `Error sending ${isApprove ? "approve" : "swap"} status: ${error}`
+ );
+ onBackendError();
+ if (isApprove) {
+ setApproveTxHash("");
+ } else {
+ setTxHash("");
}
-
- setShowSpinner(true);
-
- await onSubmitMessage(message, file);
- setMessage('');
- setFile(null); // Clear the file state after upload
setShowSpinner(false);
+ }
+ },
+ [selectedAgent, chainId, address, onBackendError]
+ );
+
+ useEffect(() => {
+ if (
+ approveTxHash &&
+ approveConfirmations.data &&
+ approveConfirmations.data >= 1
+ ) {
+ handleSwapStatus(SWAP_STATUS.SUCCESS, approveTxHash, 1);
}
+ }, [approveTxHash, approveConfirmations.data, handleSwapStatus]);
- return (
-