diff --git a/issues_prs/gh_issues_pr.md b/issues_prs/gh_issues_pr.md new file mode 100644 index 0000000..caf4089 --- /dev/null +++ b/issues_prs/gh_issues_pr.md @@ -0,0 +1,206 @@ +# GitHub Issues and Pull Requests Fetcher + +A powerful command-line tool to fetch and format GitHub issues and pull requests from any public or authorized repository. Provides detailed information including comments, review comments, and PR-specific data with flexible output formats. + +## Features + +- Fetch both issues and pull requests +- View complete comment history, including PR review comments +- Multiple output formats (pretty, JSON, CSV) +- Filter by state (open, closed, all) +- Chronologically sorted messages +- Detailed PR information (changes, mergeability, review status) +- Support for pagination (handles large repositories) +- Debug mode for troubleshooting + +## Prerequisites + +- Python 3.6 or higher +- GitHub CLI (`gh`) installed and authenticated +- Required Python packages: none (uses only standard library) + +## Installation + +1. Install the GitHub CLI: + ```bash + # macOS + brew install gh + + # Windows + winget install --id GitHub.cli + + # Ubuntu/Debian + sudo apt install gh + ``` + +2. Authenticate with GitHub: + ```bash + gh auth login + ``` + +3. Download and make executable: + ```bash + chmod +x gh_issues_pr.py + ``` + +## Usage + +Basic syntax: +```bash +./gh_issues_pr.py [options] +``` + +### Options + +| Option | Description | +|--------|-------------| +| `-t, --type` | Item type to fetch: `issue` (default) or `pr` | +| `-f, --format` | Output format: `pretty` (default), `json`, or `csv` | +| `-s, --state` | Item state to fetch: `open` (default), `closed`, or `all` | +| `-m, --message` | Include specific message number (0 for body, 1+ for comments) | +| `-v, --verbose` | Include all messages (body and comments) | +| `--debug` | Enable debug output | + +### Examples + +1. View open issues (default): + ```bash + ./gh_issues_pr.py microsoft/vscode + ``` + +2. View open pull requests: + ```bash + ./gh_issues_pr.py microsoft/vscode -t pr + ``` + +3. Get closed PRs with their comments in JSON format: + ```bash + ./gh_issues_pr.py microsoft/vscode -t pr -s closed -v -f json + ``` + +4. View specific comment (number 1) on an issue: + ```bash + ./gh_issues_pr.py microsoft/vscode -m 1 + ``` + +5. Export all open issues to CSV: + ```bash + ./gh_issues_pr.py microsoft/vscode -f csv > issues.csv + ``` + +6. Debug mode for troubleshooting: + ```bash + ./gh_issues_pr.py microsoft/vscode --debug + ``` + +## Output Formats + +### Pretty Format (default) +Displays items in a human-readable format with clear sections: + +``` +PR #1234: Sample Pull Request Title +-------------------------------------------------------------------------------- +State: open +Created: 2024-01-01 10:00:00 by username +Updated: 2024-01-02 15:30:00 +Branch: feature-branch → main +Status: Ready +Mergeable: clean +Changes: +100 -50 in 10 files +Commits: 5 +Comments: 3 (Reviews: 2) +URL: https://github.com/owner/repo/pull/1234 + +Messages: +-------------------------------------------------------------------------------- +[Message #0 - body by username at 2024-01-01 10:00:00] +Original description here... + +[Message #1 - comment by reviewer at 2024-01-01 11:00:00] +Regular comment here... + +[Message #2 - review_comment by reviewer at 2024-01-01 12:00:00] +File: src/main.js, Line: 42, Commit: abc1234 +Code review comment here... +``` + +### JSON Format +Outputs structured data with all fields: +```json +{ + "number": 1234, + "title": "Sample Pull Request", + "state": "open", + "created_at": "2024-01-01 10:00:00", + "updated_at": "2024-01-02 15:30:00", + "author": "username", + "branch": "feature-branch", + "base_branch": "main", + "mergeable_state": "clean", + "draft": false, + "comments": 3, + "review_comments": 2, + "messages": [ + { + "number": 0, + "type": "body", + "author": "username", + "created_at": "2024-01-01 10:00:00", + "body": "..." + }, + { + "number": 1, + "type": "review_comment", + "author": "reviewer", + "created_at": "2024-01-01 12:00:00", + "body": "...", + "path": "src/main.js", + "line": 42, + "commit_id": "abc1234" + } + ] +} +``` + +### CSV Format +Outputs in CSV format, ideal for spreadsheet analysis. Includes all base fields and can include messages when requested. + +## Message Types + +When using `-v` or `-m` flags, the tool fetches three types of messages: + +1. **body** (message #0): The original issue/PR description +2. **comment**: Regular comments on the issue/PR +3. **review_comment** (PR only): Code review comments with additional context: + - File path + - Line number + - Commit hash + +Messages are sorted chronologically and numbered sequentially. + +## Error Handling + +The script includes comprehensive error handling: +- GitHub CLI installation/authentication checks +- Invalid repository format detection +- API rate limiting handling +- Network connectivity issues +- Missing field handling +- Debug mode for detailed error information + +## Limitations + +- Requires GitHub CLI installation and authentication +- Subject to GitHub API rate limits +- Performance may be slower for repos with many issues/PRs when using `-v` +- CSV format may not handle newlines in messages well +- Review comments are only available for PRs + +## Contributing + +Feel free to submit issues and enhancement requests! + +## License + +This project is licensed under the MIT License - see the LICENSE file for details. diff --git a/issues_prs/gh_issues_pr.py b/issues_prs/gh_issues_pr.py new file mode 100644 index 0000000..733d664 --- /dev/null +++ b/issues_prs/gh_issues_pr.py @@ -0,0 +1,245 @@ +#!/usr/bin/env python3 +import argparse +import json +import subprocess +import sys +import csv +from datetime import datetime +from typing import List, Dict, Optional, Union +import io + +class GitHubFetcher: + def __init__(self, debug=False): + self.debug = debug + self.verify_gh_installation() + + def log(self, message): + """Debug logging""" + if self.debug: + print(f"DEBUG: {message}", file=sys.stderr) + + def verify_gh_installation(self): + """Verify that GitHub CLI is installed and authenticated.""" + try: + result = subprocess.run(['gh', '--version'], capture_output=True, text=True, check=True) + self.log(f"gh version: {result.stdout}") + except (subprocess.CalledProcessError, FileNotFoundError) as e: + sys.exit(f"Error: GitHub CLI (gh) is not installed or not in PATH.\nDetails: {str(e)}") + + def run_gh_command(self, cmd): + """Run a gh command and handle errors""" + self.log(f"Running command: gh {' '.join(cmd)}") + try: + result = subprocess.run(['gh'] + cmd, capture_output=True, text=True, check=True) + self.log(f"Command output length: {len(result.stdout)} bytes") + return result.stdout + except subprocess.CalledProcessError as e: + print(f"Error executing command: gh {' '.join(cmd)}", file=sys.stderr) + print(f"Error output: {e.stderr}", file=sys.stderr) + raise + + def fetch_items(self, repo: str, state: str = 'open', message_num: Optional[int] = None, + verbose: bool = False, item_type: str = 'issue') -> List[Dict]: + """Fetch issues or pull requests from GitHub repository.""" + try: + # Construct the API query + if item_type == 'pr': + # For PRs, use the search API which doesn't require base/head + query = f'repos/{repo}/pulls?state={state}' + params = ['api', query] + else: + # For issues, use the issues API + endpoint = f'repos/{repo}/issues' + params = ['api', endpoint, '--jq', '.[] | select(.pull_request == null)'] + if state != 'all': + params.extend(['--field', f'state={state}']) + + # Add pagination + params.append('--paginate') + + # Execute API command + self.log(f"Fetching {item_type}s with state {state} from {repo}") + api_output = self.run_gh_command(params) + + # Parse JSON response + try: + # The output might be multiple JSON objects, one per line + if '\n' in api_output.strip(): + items = [json.loads(line) for line in api_output.splitlines() if line.strip()] + # Flatten the list if it's nested + if items and isinstance(items[0], list): + items = [item for sublist in items for item in sublist] + else: + items = json.loads(api_output) + if not isinstance(items, list): + items = [items] + + self.log(f"Fetched {len(items)} items") + except json.JSONDecodeError as e: + print(f"Error parsing JSON response. Raw output:", file=sys.stderr) + print(api_output, file=sys.stderr) + raise + + # Clean items + cleaned_items = [] + for item in items: + cleaned_item = self.clean_item(item, repo, message_num, verbose, item_type) + if cleaned_item: # Skip any None returns from clean_item + cleaned_items.append(cleaned_item) + + self.log(f"Processed {len(cleaned_items)} items after filtering") + return cleaned_items + + except Exception as e: + print(f"Error fetching items: {str(e)}", file=sys.stderr) + raise + + def clean_item(self, item: Dict, repo: str, message_num: Optional[int] = None, + verbose: bool = False, item_type: str = 'issue') -> Optional[Dict]: + """Clean and format an issue or PR data.""" + try: + cleaned = { + 'number': item['number'], + 'title': item['title'], + 'state': item['state'], + 'created_at': datetime.fromisoformat(item['created_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S'), + 'updated_at': datetime.fromisoformat(item['updated_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S'), + 'author': item['user']['login'], + 'labels': ','.join(label['name'] for label in item.get('labels', [])), + 'assignees': ','.join(assignee['login'] for assignee in item.get('assignees', [])), + 'url': item['html_url'] + } + + # Get comments count from the right field depending on type + if item_type == 'pr': + cleaned['comments'] = item.get('review_comments', 0) + item.get('comments', 0) + else: + cleaned['comments'] = item.get('comments', 0) + + if item.get('closed_at'): + cleaned['closed_at'] = datetime.fromisoformat(item['closed_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S') + + # Add PR-specific fields if it's a PR + if item_type == 'pr': + try: + cleaned.update({ + 'branch': item['head']['ref'], + 'base_branch': item['base']['ref'], + 'draft': item.get('draft', False), + 'merged': item.get('merged', False), + 'mergeable_state': item.get('mergeable_state', 'unknown'), + 'commits': item.get('commits', 0), + 'changed_files': item.get('changed_files', 0), + 'additions': item.get('additions', 0), + 'deletions': item.get('deletions', 0), + 'review_comments': item.get('review_comments', 0), + 'comments': item.get('comments', 0), + 'maintainer_can_modify': item.get('maintainer_can_modify', False), + 'rebaseable': item.get('rebaseable', False) + }) + + if item.get('merged_at'): + cleaned['merged_at'] = datetime.fromisoformat(item['merged_at'].replace('Z', '+00:00')).strftime('%Y-%m-%d %H:%M:%S') + except KeyError as e: + self.log(f"Warning: Missing PR field {e} in item {item['number']}") + + return cleaned + + except Exception as e: + print(f"Error cleaning item {item.get('number', 'unknown')}: {str(e)}", file=sys.stderr) + self.log(f"Raw item data: {json.dumps(item, indent=2)}") + return None + + def output_pretty(self, items: List[Dict], item_type: str = 'issue'): + """Output items in a human-readable format.""" + if not items: + print("No items found.") + return + + for item in items: + print(f"\n{'PR' if item_type == 'pr' else 'Issue'} #{item['number']}: {item['title']}") + print("-" * 80) + print(f"State: {item['state']}") + print(f"Created: {item['created_at']} by {item['author']}") + print(f"Updated: {item['updated_at']}") + + if 'closed_at' in item: + print(f"Closed: {item['closed_at']}") + if 'merged_at' in item: + print(f"Merged: {item['merged_at']}") + + if item['assignees']: + print(f"Assignees: {item['assignees']}") + if item['labels']: + print(f"Labels: {item['labels']}") + + if item_type == 'pr': + print(f"Comments: {item['comments']} (Reviews: {item['review_comments']})") + else: + print(f"Comments: {item['comments']}") + + print(f"URL: {item['url']}") + + if item_type == 'pr': + print(f"Branch: {item['branch']} → {item['base_branch']}") + print(f"Status: {'Draft' if item['draft'] else 'Ready'}") + print(f"Mergeable: {item['mergeable_state']}") + print(f"Changes: +{item['additions']} -{item['deletions']} in {item['changed_files']} files") + print(f"Commits: {item['commits']}") + print(f"Maintainer can modify: {'Yes' if item['maintainer_can_modify'] else 'No'}") + + def output_json(self, items: List[Dict]): + """Output items in JSON format.""" + if not items: + print("[]") + return + print(json.dumps(items, indent=2)) + + def output_csv(self, items: List[Dict]): + """Output items in CSV format.""" + if not items: + print("No items found.") + return + + output = io.StringIO() + writer = csv.DictWriter(output, fieldnames=items[0].keys()) + writer.writeheader() + writer.writerows(items) + print(output.getvalue()) + output.close() + +def main(): + parser = argparse.ArgumentParser(description='Fetch GitHub issues or pull requests from a repository') + parser.add_argument('repo', help='Repository in format owner/repo') + parser.add_argument('-t', '--type', choices=['issue', 'pr'], + default='issue', help='Type of items to fetch (default: issue)') + parser.add_argument('-f', '--format', choices=['json', 'csv', 'pretty'], + default='pretty', help='Output format (default: pretty)') + parser.add_argument('-s', '--state', choices=['open', 'closed', 'all'], + default='open', help='Item state to fetch (default: open)') + parser.add_argument('--debug', action='store_true', + help='Enable debug output') + + args = parser.parse_args() + + try: + fetcher = GitHubFetcher(debug=args.debug) + items = fetcher.fetch_items(args.repo, args.state, item_type=args.type) + + # Output based on selected format + if args.format == 'json': + fetcher.output_json(items) + elif args.format == 'csv': + fetcher.output_csv(items) + else: + fetcher.output_pretty(items, args.type) + + except Exception as e: + print(f"Error: {str(e)}", file=sys.stderr) + if args.debug: + import traceback + traceback.print_exc() + sys.exit(1) + +if __name__ == "__main__": + main() diff --git a/issues_prs/issues.json b/issues_prs/issues.json new file mode 100644 index 0000000..3cbc83b --- /dev/null +++ b/issues_prs/issues.json @@ -0,0 +1,2740 @@ +[ + { + "number": 523, + "title": "Add services from plugins", + "state": "open", + "created_at": "2024-11-22 22:51:29", + "updated_at": "2024-11-22 23:38:18", + "author": "0xFlicker", + "labels": "", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/pull/523", + "messages": [ + { + "number": 0, + "author": "0xFlicker", + "created_at": "2024-11-22T22:51:29Z", + "body": "\r\n\r\n# Relates to:\r\nn/a\r\n\r\n\r\n\r\n\r\n\r\n# Risks\r\n\r\nlow to medium. anyone using the node plugin and not loading these services would. this could cause errors? but probably stuff that didn't work starts working\r\n\r\n\r\n\r\n# Background\r\n\r\nSpun up an eliza bot from eliza-starter (which I also separetly have some updates for)\r\n\r\n## What does this PR do?\r\n\r\nNoticed that for the nodePlugin, the services it defines are not available from the agent runtime when calling `getService`. This change made it so that nodePlugin automatically brings in these services and the associated actions worked for me: https://github.com/ai16z/eliza/blob/main/packages/plugin-node/src/index.ts#L19-L25\r\n\r\n## What kind of change is this?\r\n\r\nBug fix\r\n\r\n\r\n\r\n\r\n\r\n# Documentation changes needed?\r\n\r\nNone, eliza-starter and other nodePlugin consumers should just \"work better\"\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\nExample code such as https://github.com/ai16z/eliza/blob/main/docs/docs/packages/agent.md should work as expected\r\n\r\n## Detailed testing steps\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "ponderingdemocritus", + "created_at": "2024-11-22T23:38:17Z", + "body": "this is already being done - not sure why this needs to be here" + } + ] + }, + { + "number": 522, + "title": "fix: fixing failing goals, cache and token tests", + "state": "open", + "created_at": "2024-11-22 22:49:20", + "updated_at": "2024-11-22 22:49:20", + "author": "ai16z-demirix", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/522", + "messages": [ + { + "number": 0, + "author": "ai16z-demirix", + "created_at": "2024-11-22T22:49:20Z", + "body": "\r\n\r\n# Relates to:\r\nhttps://github.com/ai16z/eliza/issues/519\r\n\r\n\r\n\r\n\r\n# Risks\r\nlow - fixing tests\r\n\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\nFixing goals, cache and token tests\r\n## What kind of change is this?\r\nBug-fixes\r\n\r\n\r\n\r\n\r\nFixing tests to prevent flaky tests.\r\n# Documentation changes needed?\r\nNone\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\nnavigate to packages/core\r\n## Detailed testing steps\r\nrun pnpm install, pnpm test\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + } + ] + }, + { + "number": 521, + "title": "fix: ollama local and llama local", + "state": "open", + "created_at": "2024-11-22 21:12:17", + "updated_at": "2024-11-22 23:51:29", + "author": "yodamaster726", + "labels": "", + "assignees": "", + "comments": 5, + "url": "https://github.com/ai16z/eliza/pull/521", + "messages": [ + { + "number": 0, + "author": "yodamaster726", + "created_at": "2024-11-22T21:12:17Z", + "body": "\r\n\r\n# Ollama local broken and local llama issues\r\n[Relates partially to issue #443 ](https://github.com/ai16z/eliza/issues/443)\r\n\r\n\r\n\r\n# Risks\r\n\r\nLow risk - affects only the Llama and Ollama - llama.ts\r\n\r\n# Background\r\nEliza is supposed to be able to work with Ollama local and it was broken in several ways. Wrong embeddings were being loaded. Local llama was being downloaded even when the Ollama local was configured.\r\n\r\nModel can be selected in the character by setting the ModelProviderName to LLAMALOCAL or OLLAMA\r\nex: modelProvider: ModelProviderName.LLAMALOCAL\r\n\r\ndefault ModelProvider can be set on the .env file by setting the OLLAMA_MODEL= env variable\r\n\r\n## What does this PR do?\r\n- Fixes loading the correct embeddings file to be used depending on which llama model provider is selected\r\n- Improved logging in embedddings, generation.\r\n- Fixed bug in logger - was looking for lowercase environment variable instead of VERBOSE=true \r\n- Improved the download progress indicator for the local llama model and other files. \r\n- Added a new method to logger for download progress % indicator\r\n- I- Model provider validation in AgentRuntime and additional error logging\r\n- Improved download and error logging in the Image provider\r\n- Improved model provider in llama.ts\r\n\r\n## What kind of change is this?\r\nBug fixes and improvements\r\n\r\n\r\n\r\n\r\n# Documentation changes needed?\r\nNo, but might want to document the VERBOSE=true for more detailed logging for debugging purposes\r\n\r\n\r\n\r\n\r\n\r\n# Testing\r\nTested for each of the providers LLAMALOCAL or OLLAMA\r\n\r\n## Where should a reviewer start?\r\n\r\n## Detailed testing steps\r\n- update the model provider part for defaultCharacter.ts to one of the above,\r\n- pnpm build\r\n- pnpm start\r\n\r\nFor ollama local, you will need to install Ollama software and install the models that you will be using.\r\n- mxbai-embed-large:latest for the embeddings\r\n- hermes3:latest for the LLM\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "ponderingdemocritus", + "created_at": "2024-11-22T22:22:31Z", + "body": "This is good and was working locally however few issues:\r\n\r\n- LLAMA is still downloaded if external provider is used. Can you fix this." + }, + { + "number": 2, + "author": "yodamaster726", + "created_at": "2024-11-22T22:52:53Z", + "body": "> This is good and was working locally however few issues:\r\n> \r\n> * LLAMA is still downloaded if external provider is used. Can you fix this.\r\n@ponderingdemocritus \r\n\r\nWhich external provider ? OpenAI ? Where did you define it? In the .env file or character?\r\n\r\nI tested the different variations with OLLAMA ?\r\n" + }, + { + "number": 3, + "author": "ponderingdemocritus", + "created_at": "2024-11-22T23:23:40Z", + "body": "I tested with anthropic and it still inits OLLAMA\r\n\r\nenv is within character file" + }, + { + "number": 4, + "author": "yodamaster726", + "created_at": "2024-11-22T23:36:50Z", + "body": "> I tested with anthropic and it still inits OLLAMA\r\n> \r\n> env is within character file\r\n\r\nI updated defaultCharacter.ts with ANTHROPIC and it didn't try to download the llamalocal model. It attempts to call the antropic api. Do you have anything else in your .env related to llama definited ? Are you on the latest code ? \r\n\r\n![image](https://github.com/user-attachments/assets/eec8a775-a0e2-429e-bcdf-555f33c8414b)\r\n" + }, + { + "number": 5, + "author": "ponderingdemocritus", + "created_at": "2024-11-22T23:39:15Z", + "body": "in the logs it tells me it has initalised " + } + ] + }, + { + "number": 519, + "title": "Tests failing - token.test.ts failing because it is commented out. Cache and goals tests are failing because jest is now switched with vitest", + "state": "open", + "created_at": "2024-11-22 20:14:58", + "updated_at": "2024-11-22 20:14:58", + "author": "ai16z-demirix", + "labels": "bug", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/519", + "messages": [ + { + "number": 0, + "author": "ai16z-demirix", + "created_at": "2024-11-22T20:14:58Z", + "body": "**Describe the bug**\r\n\r\n\r\nTests are failing: \r\ntoken.test.ts is failing because it is empty (whole file is commented out). Instead of commenting out, test should be skipped. \r\ngoals and cache tests are failing because jest is used for mock functions, but jest is now switched with vitest.\r\n\r\n**To Reproduce**\r\n\r\n\r\nNavigate to packages/core and run pnpm test\r\n**Expected behavior**\r\nTests should not fail because of their setup.\r\n\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Additional context**\r\n\r\n\r\n" + } + ] + }, + { + "number": 518, + "title": "integrate tavily", + "state": "open", + "created_at": "2024-11-22 16:27:12", + "updated_at": "2024-11-22 16:27:47", + "author": "tcm390", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/518", + "messages": [ + { + "number": 0, + "author": "tcm390", + "created_at": "2024-11-22T16:27:12Z", + "body": "related: https://github.com/ai16z/eliza/issues/363\r\n\r\n\"Screenshot\r\n" + } + ] + }, + { + "number": 517, + "title": "feat: Add buttplug.io integration", + "state": "open", + "created_at": "2024-11-22 12:36:20", + "updated_at": "2024-11-22 21:35:10", + "author": "8times4", + "labels": "", + "assignees": "", + "comments": 4, + "url": "https://github.com/ai16z/eliza/pull/517", + "messages": [ + { + "number": 0, + "author": "8times4", + "created_at": "2024-11-22T12:36:20Z", + "body": "\r\n\r\n# Relates to:\r\n\r\n\r\nhttps://fxtwitter.com/shawmakesmagic/status/1859888258358137158\r\n\r\n\r\n\r\n# Risks\r\n\r\n\r\nLow (may buzz your bum tho')\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\nThis adds a Buttplug.io service to allow Eliza to vibrate intimate toys using a local intiface server.\r\n\r\n## What kind of change is this?\r\nFeatures (non-breaking change which adds functionality)\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Documentation changes needed?\r\nIf a docs change is needed: I have updated the documentation accordingly.\r\n\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\n## Detailed testing steps\r\n\r\nRequires bun for now and Intiface-central app. \r\nSet up intiface-central:\r\nApp Modes(menu) - > Show Advanced/Experimental(toggle) - > Device Websocket Server(toggle) -> Devices(menu)\r\n-> Websocket Devices(Advanced) -> add a `lovense` protocol device with name: `Eliza Buttplug Device`\r\n\r\nStart the server, then `cd packages/plugin-buttplug` `pnpm run test-via-bun`\r\n\r\nExpected output:\r\n```\r\n> @ai16z/plugin-buttplug@0.1.4-alpha.3 test-via-bun .../eliza/packages/plugin-buttplug\r\n> bun test/simulate.ts\r\n\r\n[fake-buttplug] Starting simulator service\r\n[fake-buttplug] Initializing device simulator\r\n[fake-buttplug] Attempting to connect to ws://127.0.0.1:54817\r\n[fake-buttplug] Connection established\r\n[fake-buttplug] Sending handshake: {\"identifier\":\"Eliza Buttplug Device\",\"address\":\"8A3D9FAC2A45\",\"version\":0}\r\n[fake-buttplug] Connection closed: {\r\n code: 1000,\r\n reason: \"\",\r\n}\r\nUsing real Buttplug device: Lovense Hush\r\nStarting test sequence with: Lovense Hush\r\nVibrating at 50%\r\n[Simulation] Vibrating Lovense Hush at 50%\r\nStopping device\r\n[Simulation] Stopping Lovense Hush\r\nTest sequence completed\r\n```\r\n\r\nOther test: \r\n\r\n`bun run packages/plugin-buttplug/test/fake-buttplug.ts`\r\n```\r\n[fake-buttplug] Starting simulator service\r\n[fake-buttplug] Initializing device simulator\r\n[fake-buttplug] Attempting to connect to ws://127.0.0.1:54817\r\n[fake-buttplug] Connection established\r\n[fake-buttplug] Sending handshake: {\"identifier\":\"Eliza Buttplug Device\",\"address\":\"8A3D9FAC2A45\",\"version\":0}\r\n```\r\n\r\nstart Eliza with some character conf, i.e:\r\n```\r\n plugins: [buttplugPlugin],\r\n // clients: [],\r\n modelProvider: ModelProviderName.GROQ,\r\n settings: {\r\n secrets: {},\r\n buttplug: true,\r\n voice: {\r\n model: \"en_US-hfc_female-medium\",\r\n },\r\n },\r\n\r\n```\r\nAsk Eliza: \"Vibrate for 10 seconds\"\r\n\r\nOutput:\r\n\r\n```\r\n\r\nYou: Vibrate for 10 seconds\r\n [\"\u25ce Querying knowledge for: vibrate for 10 seconds\"] \r\n\r\n [\"\u25ce Genarating message response..\"] \r\n\r\n [\"\u25ce Genarating text...\"] \r\n\r\nInitializing Groq model.\r\nReceived response from Groq model.\r\n \u25ce LOGS\r\n Evaluating \r\n GET_FACTS \r\n\r\n \u25ce LOGS\r\n Evaluating \r\n UPDATE_GOAL \r\n\r\n [\"\u2713 Normalized action: vibrate\"] \r\n\r\n [\"\u2713 Executing handler for action: VIBRATE\"] \r\n\r\nPreferred device Eliza Buttplug Device not found, using first available device\r\nPreferred device Eliza Buttplug Device not found, using first available device\r\nAgent: Vibrating at 50% intensity for 2000ms\r\nAgent: let's not get sidetracked by the buzzing, what do you think about using fractals to model chaotic systems?\r\n\r\n```\r\n\r\n\r\n`fake-buttplug.ts` output, unless you own a device:\r\n\r\n```\r\n[fake-buttplug] Received: DeviceType;\r\n[fake-buttplug] Responding to DeviceType with device info\r\n[fake-buttplug] Received: Vibrate:0;\r\n[fake-buttplug] Received: Vibrate:10;\r\n[fake-buttplug] Received: Vibrate:0;\r\n[fake-buttplug] Received: Vibrate:10;\r\n[fake-buttplug] Received: Vibrate:0;\r\n```\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "awidearray", + "created_at": "2024-11-22T15:10:07Z", + "body": "what do i need to buy to test this though, ya know... for research" + }, + { + "number": 2, + "author": "8times4", + "created_at": "2024-11-22T17:14:01Z", + "body": "> what do i need to buy to test this though, ya know... for research\r\n\r\nhere\u2019s an affiliate link just for ya \u2620\ufe0f: https://www.lovense.com/r/rf3aal\r\n" + }, + { + "number": 3, + "author": "lalalune", + "created_at": "2024-11-22T18:09:18Z", + "body": "DUDE you are awesome. DM or drop your wallet address please" + }, + { + "number": 4, + "author": "8times4", + "created_at": "2024-11-22T18:47:46Z", + "body": "> DUDE you are awesome. DM or drop your wallet address please\r\n\r\nhahaha, thank you @lalalune AZ3UK5iEH3UbxLpt9s7gpMwfm88ptWtccbJ1sB6n72WL, also sent it to you in Discord DM with same timestamp for validation" + } + ] + }, + { + "number": 516, + "title": "feat: Add decentralized inferencing for Eliza (LLAMA, Hermes, Flux)", + "state": "open", + "created_at": "2024-11-22 08:52:30", + "updated_at": "2024-11-22 21:35:26", + "author": "genesis-0000", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/516", + "messages": [ + { + "number": 0, + "author": "genesis-0000", + "created_at": "2024-11-22T08:52:30Z", + "body": "# Risks\r\n\r\nLow\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\nAdd decentralized inferencing for Eliza (LLAMA, Hermes, Flux)\r\n\r\n## What kind of change is this?\r\n\r\nAdd more inference provider\r\n\r\n## Why are we doing this? Any context or related work?\r\n\r\nCurrently, Eliza supports only OpenAI. However, centralized AI providers limit user control, pose trust issues, and create single points of failure with high costs and restricted access. Decentralized AI offers a more transparent, accessible, and resilient alternative, empowering users with greater autonomy.\r\n\r\nLearn more about our decentralized inference AI: https://eternalai.org/api\r\n\r\n\r\n# Documentation changes needed?\r\n\r\nMy changes require a change to the project documentation.\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\nReviewers start from here https://github.com/ai16z/eliza/pull/516/files#diff-1b7f90a5457ad849ec6200d5075c621eb77e2b6566f44c4eb255bde4a4ed0b06\r\n\r\n## Detailed testing steps\r\n\r\nRun pnpm start --characters=\"path/to/your/characters/eternal.character.json\"\r\n\r\n\r\n# Deploy Notes\r\n\r\n## Database changes\r\n\r\nNo change\r\n\r\n## Deployment instructions\r\n\r\nChange .env by adding more\r\n\r\nETERNALAI_URL=https://api.eternalai.org/v1/chat/completions\r\nETERNALAI_API_KEY=***\r\nPOST_IMMEDIATELY=yes#optional" + } + ] + }, + { + "number": 507, + "title": "Non node.js environments have issues building (workers for instance)", + "state": "open", + "created_at": "2024-11-22 04:09:18", + "updated_at": "2024-11-22 04:10:09", + "author": "antpb", + "labels": "bug", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/507", + "messages": [ + { + "number": 0, + "author": "antpb", + "created_at": "2024-11-22T04:09:18Z", + "body": "**Describe the bug**\r\n\r\nNon node.js environments may have issues building because of some packages and how they are imported. \r\n\r\n**To Reproduce**\r\n\r\nMake a worker, include eliza/core and attempt to build. It will throw an error similar to \r\n\r\n```\r\n \u2718 [ERROR] No loader is configured for \".node\" files:\r\n node_modules/@anush008/tokenizers-darwin-universal/tokenizers.darwin-universal.node\r\n\r\n node_modules/@anush008/tokenizers/index.js:116:32:\r\n 116 \u2502 ... nativeBinding = require('@anush008/tokenizers-darwin-universal')\r\n ```\r\n\r\n\r\n**Expected behavior**\r\n\r\nBuild successful\r\n\r\n**Screenshots**\r\n\r\n\"image\"\r\n\r\nPR incoming" + } + ] + }, + { + "number": 506, + "title": "Non node.js environments have issues building (workers for instance)", + "state": "open", + "created_at": "2024-11-22 04:07:31", + "updated_at": "2024-11-22 06:25:46", + "author": "antpb", + "labels": "bug", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/506", + "messages": [ + { + "number": 0, + "author": "antpb", + "created_at": "2024-11-22T04:07:31Z", + "body": "**Describe the bug**\r\n\r\n\r\n\r\n**To Reproduce**\r\n\r\n\r\n\r\n**Expected behavior**\r\n\r\n\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Additional context**\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "yodamaster726", + "created_at": "2024-11-22T06:25:45Z", + "body": "this seems to be a dup of #507 \r\n" + } + ] + }, + { + "number": 491, + "title": "Add image generation capability to Telegram messaging", + "state": "open", + "created_at": "2024-11-21 19:34:10", + "updated_at": "2024-11-22 07:06:21", + "author": "0xTomDaniel", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/491", + "messages": [ + { + "number": 0, + "author": "0xTomDaniel", + "created_at": "2024-11-21T19:34:10Z", + "body": "# Relates to:\r\n\r\nN/A\r\n\r\n# Risks\r\n\r\nLow. Image generation could be affected\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\n- Completes image generation functionality for Together provider\r\n- Adds image generation functionality to Telegram\r\n\r\n## What kind of change is this?\r\n\r\nBug fixes (non-breaking change which fixes an issue)\r\nImprovements (misc. changes to existing features)\r\nFeatures (non-breaking change which adds functionality)\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\nJust follow testing steps below.\r\n\r\n## Detailed testing steps\r\n\r\n1. (optionally) Add `\"imageModelProvider\": \"llama_cloud\"` to a character file\r\n2. (optionally) Add a Together API key if using `llama_cloud` provider. Otherwise ensure there's an API key for the chosen `modelProvider\"\r\n3. Ask the agent on Telegram to generate an image.\r\n4. Verify that an image is returned as requested." + } + ] + }, + { + "number": 481, + "title": "feat: add Conflux plugin", + "state": "open", + "created_at": "2024-11-21 09:56:59", + "updated_at": "2024-11-21 10:35:00", + "author": "darwintree", + "labels": "", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/pull/481", + "messages": [ + { + "number": 0, + "author": "darwintree", + "created_at": "2024-11-21T09:56:59Z", + "body": "# Relates to:\r\n\r\n\r\nNo specific issue or ticket linked.\r\n\r\n# Risks\r\n\r\nLow. The changes primarily involve adding new functionalities and templates for interacting with the Conflux network, which is isolated from other projects.\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\nThis PR introduces a plugin for interacting with the Conflux network, providing actions and templates for token transactions such as buying, selling, transferring, and bridging tokens across different spaces within the Conflux network.\r\n\r\n## What kind of change is this?\r\n\r\nFeatures (non-breaking change which adds functionality)\r\n\r\n# Documentation changes needed?\r\n\r\nMy changes require a change to the project documentation. The README should be updated to include usage instructions for the new plugin functionalities.\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\nReviewers should start by examining the main plugin configuration and action files, particularly focusing on the `confiPump.ts`, `transfer.ts`, and `bridgeTransfer.ts` files.\r\n\r\n## Detailed testing steps\r\n\r\n1. Correctly set the character file on your disk (can use the example file at the end)\r\n2. Set the environment arguments correctly (as well as the model key)\r\n\r\n```\r\nCONFLUX_CORE_PRIVATE_KEY=0xxxxxxxxxx\r\nCONFLUX_CORE_SPACE_RPC_URL=https://test.confluxrpc.com\r\nCONFLUX_ESPACE_PRIVATE_KEY=0xxxxxxxxxxx\r\nCONFLUX_ESPACE_RPC_URL=https://evmtest.confluxrpc.com\r\nCONFLUX_MEME_CONTRACT_ADDRESS=0xA016695B5E633399027Ec36941ECa4D5601aBEac\r\n```\r\n3. Use the character file to start the conversation `pnpm start --character=\"....\"`\r\n\r\nSet \r\n\r\n```json\r\n{\r\n \"name\": \"crypto_expert\",\r\n \"clients\": [],\r\n \"modelProvider\": \"openai\", \r\n \"people\": [],\r\n \"settings\": {\r\n \"secrets\": {\r\n \"CONFLUX_CORE_PRIVATE_KEY\": \"0xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx\"\r\n },\r\n \"model\": \"gpt-4o-mini\",\r\n \"embeddingsModel\": \"text-embedding-3-large\",\r\n \"openai\": {\r\n \"model\": \"gpt-4o-mini\",\r\n \"temperature\": 0.7,\r\n \"max_tokens\": 4096\r\n }\r\n },\r\n \"plugins\": [\"@ai16z/plugin-conflux\"],\r\n \"bio\": [\r\n \"The Crypto Expert is a seasoned investor and blockchain enthusiast.\",\r\n \"He has been involved in the cryptocurrency space since the early days of Bitcoin.\",\r\n \"He believes in the transformative power of blockchain technology and its potential to revolutionize industries.\",\r\n \"Known for his strategic insights and market predictions, he is a trusted advisor in the crypto community.\",\r\n \"He is always ready to help users execute cryptocurrency transactions, including sending, buying, and selling tokens on various blockchains.\"\r\n ],\r\n \"lore\": [\r\n \"The Crypto Expert started his journey in the world of finance before discovering the potential of cryptocurrencies.\",\r\n \"He is passionate about educating others on the benefits and risks of digital currencies.\",\r\n \"He has a reputation for accurately predicting market trends and advising on successful investment strategies.\",\r\n \"His expertise is sought after by both novice and experienced investors looking to navigate the volatile crypto markets.\",\r\n \"He takes pride in helping users complete their cryptocurrency transactions safely and efficiently.\"\r\n ],\r\n \"knowledge\": [],\r\n \"messageExamples\": [\r\n [\r\n {\r\n \"user\": \"{{user1}}\",\r\n \"content\": {\r\n \"text\": \"How do I get started with cryptocurrency?\"\r\n }\r\n },\r\n {\r\n \"user\": \"crypto_expert\",\r\n \"content\": {\r\n \"text\": \"Start by educating yourself on the basics of blockchain technology and different cryptocurrencies. Diversify your investments and never invest more than you can afford to lose. Stay informed about market trends and regulatory changes.\"\r\n }\r\n }\r\n ],\r\n [\r\n {\r\n \"user\": \"{{user1}}\",\r\n \"content\": {\r\n \"text\": \"What is the future of Bitcoin?\"\r\n }\r\n },\r\n {\r\n \"user\": \"crypto_expert\",\r\n \"content\": {\r\n \"text\": \"Bitcoin has established itself as a store of value and is often referred to as digital gold. Its future will depend on adoption rates, regulatory developments, and technological advancements. Keep an eye on these factors to make informed decisions.\"\r\n }\r\n }\r\n ]\r\n ],\r\n \"postExamples\": [\r\n \"\"\r\n ],\r\n \"topics\": [\r\n \"\"\r\n ],\r\n \"style\": {\r\n \"all\": [\r\n \"\"\r\n ],\r\n \"chat\": [\r\n \"\"\r\n ],\r\n \"post\": [\r\n \"\"\r\n ]\r\n },\r\n \"adjectives\": [\r\n \"\"\r\n ]\r\n }\r\n```\r\n\"image\"\r\n\"image\"\r\n\"image\"\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "darwintree", + "created_at": "2024-11-21T10:00:27Z", + "body": "This pull request relies on https://github.com/ai16z/eliza/pull/445 as `generateObjectV2` is used" + } + ] + }, + { + "number": 480, + "title": "feat: Integrate Twitter V2 API ", + "state": "open", + "created_at": "2024-11-21 06:51:01", + "updated_at": "2024-11-21 19:33:27", + "author": "monilpat", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/480", + "messages": [ + { + "number": 0, + "author": "monilpat", + "created_at": "2024-11-21T06:51:01Z", + "body": "---\r\n\r\n### Relates to:\r\n\r\nNo specific issue or ticket linked. Sister PR (client side usage of): https://github.com/ai16z/agent-twitter-client/pull/3/files\r\n\r\n---\r\n\r\n### Risks\r\n\r\n**Risk Level**: Medium \r\n- **Potential Impacts**: \r\n - Breaks compatibility with existing Twitter V1 API usage if misconfigured. \r\n - Errors in authentication if API keys or tokens are invalid or not provided. \r\n\r\n---\r\n\r\n### Background\r\n\r\n#### What does this PR do? \r\nThis PR integrates support for the Twitter V2 API in the application while maintaining fallback compatibility with the V1 API for scenarios where V2 credentials are unavailable. Key features include: \r\n- Authentication using V2 API keys and tokens. \r\n- Enhanced tweet handling logic to differentiate between V1 and V2. \r\n- Updated `.env.example` file to include required settings for V2 API. \r\n\r\n#### What kind of change is this? \r\n- **Feature**: Adds functionality for Twitter V2 API integration. \r\n\r\n#### Why are we doing this? \r\nThe integration of the Twitter V2 API ensures better performance, additional functionality, and alignment with the latest Twitter developer standards. Maintaining V1 as a fallback ensures backward compatibility. \r\n\r\n---\r\n\r\n### Documentation changes needed?\r\n\r\n- [x] My changes require a change to the project documentation. \r\n- [ ] If a docs change is needed: I have updated the documentation accordingly. \r\n\r\nChanges to `.env.example` and setup instructions for V2 API keys must be added to the documentation.\r\n\r\n---\r\n\r\n### Testing\r\n\r\n#### Where should a reviewer start? \r\n1. Review changes in `packages/client-twitter/src/*` for updated API integration logic. \r\n2. Validate `.env.example` updates for configuration changes. \r\n\r\n#### Detailed testing steps \r\n1. Configure `.env` with V2 API keys and tokens. \r\n2. Run the application to verify successful authentication via V2. \r\n3. Remove V2 credentials and test fallback to V1. \r\n4. Perform end-to-end tweet retrieval and posting for both V1 and V2 modes. \r\n\r\n---\r\n\r\n### Screenshots\r\n\r\n#### Before \r\nN/A (No V2 integration)\r\n\r\n#### After \r\nN/A (No UI changes)\r\n\r\n---\r\n\r\n### Deploy Notes\r\n\r\n1. Ensure `.env` includes both V1 and V2 API credentials. \r\n2. Update deployment scripts to validate environment variables for the V2 API. \r\n\r\n---\r\n\r\n### Database changes \r\n\r\nNone. \r\n\r\n---\r\n\r\n### Deployment instructions \r\n\r\nStandard deployment process; no additional steps required. \r\n\r\n---\r\n\r\n### Discord username \r\n\r\n`@0x8664` for project communication and role assignment. " + } + ] + }, + { + "number": 477, + "title": "feat: Split off direct-client and terminal chat from agent", + "state": "open", + "created_at": "2024-11-21 05:46:30", + "updated_at": "2024-11-22 04:45:22", + "author": "reffan", + "labels": "", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/pull/477", + "messages": [ + { + "number": 0, + "author": "reffan", + "created_at": "2024-11-21T05:46:30Z", + "body": "\r\n\r\n# Relates to:\r\n\r\nSplitting off the direct-client and terminal chat from the main agent in order to achieve more control and insight. \r\n\r\n\r\n\r\n\r\n\r\n# Risks\r\n\r\nMedium\r\n\r\nUsers could be confused about the missing TERMINAL client without a proper docs update.\r\n\r\n\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\nBoth the direct-client start() and chat() commands usually are fired when initializing the agent. This PR makes a small refactor to the agent, bringing it more in line with how clients usually get initialized. Also split of the chat() command and related functions to the packages/client-terminal. In order to enable the in line terminal chat again the client TERMINAL must be added to the character.\r\n\r\n## What kind of change is this?\r\n\r\nImprovements\r\n\r\n\r\n\r\n\r\n\r\n\r\nMy reasoning for why is to bring the starting of the clients more in line with normal procedures. It also grants more control over and insight into the agents making it less of a mystery why some things work and others don't.\r\n\r\n# Documentation changes needed?\r\n\r\nSlight changes needed, TERMINAL should be added and could/should be added to the defaultCharacter.\r\n\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\n## Detailed testing steps\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "reffan", + "created_at": "2024-11-22T04:45:20Z", + "body": "I believe I've fixed all of the bugs now and modified it to work in multi agent mode. Also updated the types and character files so there will be no conflict with the quickstart docs experience." + } + ] + }, + { + "number": 471, + "title": "State should use a cosine similarity of messages in the DB ", + "state": "open", + "created_at": "2024-11-21 02:17:52", + "updated_at": "2024-11-21 02:17:52", + "author": "edisontim", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/471", + "messages": [ + { + "number": 0, + "author": "edisontim", + "created_at": "2024-11-21T02:17:52Z", + "body": "Instead of getting all recent messages to generate the state, a similarity should be done on the vectors. \r\n\r\nThis way we only get relevant messages in the current state" + } + ] + }, + { + "number": 469, + "title": "Error when call `generateObjectV2`", + "state": "open", + "created_at": "2024-11-21 01:51:10", + "updated_at": "2024-11-21 01:51:41", + "author": "darwintree", + "labels": "bug", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/469", + "messages": [ + { + "number": 0, + "author": "darwintree", + "created_at": "2024-11-21T01:51:10Z", + "body": "**Describe the bug**\r\n\r\nIf `generateObjectV2` is used, the passed modelClass is not processed appropriately and would produce a bug.\r\n\r\ntrimTokens receive the parameters `models` which is expected to be strings like `gpt-4o`. However, in generateObjectV2, modelClass, which is supposed to be `small`, \"medium\" or \"large\" is provided.\r\n\r\n```\r\ncontext = await trimTokens(context, max_context_length, modelClass);\r\n```\r\n\r\n```\r\nexport function trimTokens(context, maxTokens, model) {\r\n // Count tokens and truncate context if necessary\r\n const encoding = encoding_for_model(model as TiktokenModel);\r\n let tokens = encoding.encode(context);\r\n const textDecoder = new TextDecoder();\r\n if (tokens.length > maxTokens) {\r\n tokens = tokens.reverse().slice(maxTokens).reverse();\r\n\r\n context = textDecoder.decode(encoding.decode(tokens));\r\n }\r\n return context;\r\n}\r\n```\r\n\r\n**Screenshots**\r\n\r\n\"image\"\r\n\r\n**Additional context**\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "darwintree", + "created_at": "2024-11-21T01:51:40Z", + "body": "A relating PR is created #445 " + } + ] + }, + { + "number": 467, + "title": "Bug: plugin-solana crash report", + "state": "open", + "created_at": "2024-11-21 00:57:23", + "updated_at": "2024-11-21 00:57:23", + "author": "odilitime", + "labels": "bug", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/467", + "messages": [ + { + "number": 0, + "author": "odilitime", + "created_at": "2024-11-21T00:57:23Z", + "body": "**Describe the bug**\r\n\r\nReading cached data from file for key: dexScreenerData_search_Degenai\r\nReturning cached search DexScreener data.\r\nfile:///root/eliza-dev/packages/plugin-solana/dist/index.js:732\r\n const currentLiquidity = currentPair.liquidity.usd;\r\n ^\r\n\r\nTypeError: Cannot read properties of undefined (reading 'usd')\r\n at file:///root/eliza-dev/packages/plugin-solana/dist/index.js:732:60\r\n at Array.reduce ()\r\n\r\n\r\n**To Reproduce**\r\n\r\nRun Discord client\r\n\r\n**Expected behavior**\r\n\r\nnot crash\r\n\r\n**Additional context**\r\n\r\ncommit: a2e0954a5871eaace15dc9197fd7457b1b62064e stable-11-17\r\n" + } + ] + }, + { + "number": 461, + "title": "Windows 11: Command Failed with exit Code 7:", + "state": "open", + "created_at": "2024-11-20 21:08:36", + "updated_at": "2024-11-22 12:30:38", + "author": "notOccupanther", + "labels": "bug", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/issues/461", + "messages": [ + { + "number": 0, + "author": "notOccupanther", + "created_at": "2024-11-20T21:08:36Z", + "body": "During **pnpm i** section of the install I received the following error \"\u2009_ELIFECYCLE\u2009 Command failed with exit code 7._\"\r\n\r\nI managed to fix it, by manually downlaoding the Discord Opus piece that it failed on, but for some reason, it is not downloading via the script.\r\n\r\nI tried downloading to the actual directory via a browser and it failed - windows reported the download reported an error,. So I downloaded it into the 'downloads' folder under my profile and manually copied the file in. I then extracted the files and re-ran \r\n\r\npnpm i \r\n\r\nIt seems to have worked. So this is an FYI really. \r\n\r\nThanks\r\n\r\nN\r\n\r\nC:\\Users\\clancynl\\Documents\\GitHub\\eliza-starter>pnpm i\r\nLockfile is up to date, resolution step is skipped\r\nPackages: +1108\r\n++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++\r\nProgress: resolved 1108, reused 1108, downloaded 0, added 0, done\r\nnode_modules/.pnpm/es5-ext@0.10.64/node_modules/es5-ext: Running postinstall script, done in 167ms\r\nnode_modules/.pnpm/@discordjs+opus@https+++codeload.github.com+discordjs+opus+tar.gz+31da49d8d2cc6c5a2ab1bfd332033ff7d5f9fb02/node_modules/@discordjs/opus: Running install script, failed in 640ms\r\n.../node_modules/@discordjs/opus install$ node-pre-gyp install --fallback-to-build\r\n\u2502 node-pre-gyp info it worked if it ends with ok\r\n\u2502 node-pre-gyp info using node-pre-gyp@0.4.5\r\n\u2502 node-pre-gyp info using node@23.2.0 | win32 | x64\r\n\u2502 (node:30168) [DEP0040] DeprecationWarning: The `punycode` module is deprecated. Please use a userland alternative instead.\r\n\u2502 (Use `node --trace-deprecation ...` to show where the warning was created)\r\n\u2502 node-pre-gyp info check checked for \"C:\\Users\\clancynl\\Documents\\GitHub\\eliza-starter\\node_modules\\.pnpm\\@discordjs+opus@https+++codeload.github.com+discordjs+opus+tar.gz+31da49d8d2cc6c5a2ab1bfd332033ff7d5f9fb02\\node_modules\\@discordjs\\opus\\prebuild\\node-v131-napi-v3-win32-x64-unknown-unknown\\opus.node\" (not found)\r\n\u2502 node-pre-gyp http GET https://github.com/discordjs/opus/releases/download/v0.9.0/opus-v0.9.0-node-v131-napi-v3-win32-x64-unknown-unknown.tar.gz\r\n\u2502 node-pre-gyp ERR! install response status 404 Not Found on https://github.com/discordjs/opus/releases/download/v0.9.0/opus-v0.9.0-node-v131-napi-v3-win32-x64-unknown-unknown.tar.gz\r\n\u2502 node-pre-gyp WARN Pre-built binaries not installable for @discordjs/opus@0.9.0 and node@23.2.0 (node-v131 ABI, unknown) (falling back to source compile with node-gyp)\r\n\u2502 node-pre-gyp WARN Hit error response status 404 Not Found on https://github.com/discordjs/opus/releases/download/v0.9.0/opus-v0.9.0-node-v131-napi-v3-win32-x64-unknown-unknown.tar.gz\r\n\u2502 node-pre-gyp ERR! UNCAUGHT EXCEPTION\r\n\u2502 node-pre-gyp ERR! stack Error: spawn EINVAL\r\n\u2502 node-pre-gyp ERR! stack at ChildProcess.spawn (node:internal/child_process:421:11)\r\n\u2502 node-pre-gyp ERR! stack at Object.spawn (node:child_process:753:9)\r\n\u2502 node-pre-gyp ERR! stack at module.exports.run_gyp (C:\\Users\\clancynl\\Documents\\GitHub\\eliza-starter\\node_modules\\.pnpm\\@discordjs+node-pre-gyp@0.4.5\\node_modules\\@discordjs\\node-pre-gyp\\lib\\util\\compile.js:72:17)\r\n\u2502 node-pre-gyp ERR! stack at build (C:\\Users\\clancynl\\Documents\\GitHub\\eliza-starter\\node_modules\\.pnpm\\@discordjs+node-pre-gyp@0.4.5\\node_modules\\@discordjs\\node-pre-gyp\\lib\\build.js:36:11)\r\n\u2502 node-pre-gyp ERR! stack at self.commands. [as build] (C:\\Users\\clancynl\\Documents\\GitHub\\eliza-starter\\node_modules\\.pnpm\\@discordjs+node-pre-gyp@0.4.5\\node_modules\\@discordjs\\node-pre-gyp\\lib\\node-pre-gyp.js:72:34)\r\n\u2502 node-pre-gyp ERR! stack at run (C:\\Users\\clancynl\\Documents\\GitHub\\eliza-starter\\node_modules\\.pnpm\\@discordjs+node-pre-gyp@0.4.5\\node_modules\\@discordjs\\node-pre-gyp\\lib\\main.js:90:29)\r\n\u2502 node-pre-gyp ERR! stack at process.processTicksAndRejections (node:internal/process/task_queues:85:11)\r\n\u2502 node-pre-gyp ERR! System Windows_NT 10.0.26120\r\n\u2502 node-pre-gyp ERR! command \"C:\\\\Program Files\\\\nodejs\\\\node.exe\" \"C:\\\\Users\\\\clancynl\\\\Documents\\\\GitHub\\\\eliza-starter\\\\node_modules\\\\.pnpm\\\\@discordjs+node-pre-gyp@0.4.5\\\\node_modules\\\\@discordjs\\\\node-pre-gyp\\\\bin\\\\node-pre-gyp\" \"install\" \"--fallback-to-build\"\r\n\u2502 node-pre-gyp ERR! cwd C:\\Users\\clancynl\\Documents\\GitHub\\eliza-starter\\node_modules\\.pnpm\\@discordjs+opus@https+++codeload.github.com+discordjs+opus+tar.gz+31da49d8d2cc6c5a2ab1bfd332033ff7d5f9fb02\\node_modules\\@discordjs\\opus\r\n\u2502 node-pre-gyp ERR! node -v v23.2.0\r\n\u2502 node-pre-gyp ERR! node-pre-gyp -v v0.4.5\r\n\u2514\u2500 Failed in 640ms at C:\\Users\\clancynl\\Documents\\GitHub\\eliza-starter\\node_modules\\.pnpm\\@discordjs+opus@https+++codeload.github.com+discordjs+opus+tar.gz+31da49d8d2cc6c5a2ab1bfd332033ff7d5f9fb02\\node_modules\\@discordjs\\opus\r\nnode_modules/.pnpm/canvas@2.11.2/node_modules/canvas: Running install script, failed in 626ms (skipped as optional)\r\nnode_modules/.pnpm/puppeteer@19.11.1_bufferutil@4.0.8_typescript@5.6.3_utf-8-validate@5.0.10/node_modules/puppeteer: Running postinstall script...\r\nnode_modules/.pnpm/node-llama-cpp@3.1.1_typescript@5.6.3/node_modules/node-llama-cpp: Running postinstall script...\r\n\u2009ELIFECYCLE\u2009 Command failed with exit code 7." + }, + { + "number": 1, + "author": "sirkitree", + "created_at": "2024-11-21T16:38:02Z", + "body": "please try the following:\r\nmain branch, node v23.1.0, bash\r\npnpm clean\r\npnpm install\r\npnpm build\r\npnpm start --character=(file)" + }, + { + "number": 2, + "author": "lakshya404stc", + "created_at": "2024-11-21T22:02:25Z", + "body": "this might solve the issue\r\nnpm install -g node-pre-gyp\r\npnpm i\r\n\r\nedit **\r\nI found a better solution\r\n\r\nlinux\r\nbefore running \"pnpm i\"\r\nsudo apt update && sudo apt install build-essentials && python3\r\n\r\nwindows\r\nbefore running \"pnpm i\"\r\nnpm install --global --production windows-build-tools" + } + ] + }, + { + "number": 458, + "title": "bugfix: Modify docker run error after agent folder move", + "state": "open", + "created_at": "2024-11-20 17:07:19", + "updated_at": "2024-11-21 11:28:12", + "author": "THtianhao", + "labels": "", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/pull/458", + "messages": [ + { + "number": 0, + "author": "THtianhao", + "created_at": "2024-11-20T17:07:19Z", + "body": "# Relates to:\r\n\r\nDocker running error\r\n\r\n\r\n# Risks\r\n\r\nLow, The scope of influence is the operation of docker\r\n\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\nAfter the author moved the agent folder, docker could not map the agent directory in docker, so docker volumn mapping incorrectly. I re-mapped the agent directory\r\n\r\ndocker has the following error:\r\n```\r\nsudo docker compose -f docker-compose-waifu.yaml up\r\n[+] Running 1/0\r\n\u2714 Container waifu Created 0.0 s\r\nAttaching to waifu\r\nwaifu |\r\nwaifu | > eliza@ start /app\r\nwaifu | > pnpm --filter \"@ai16z/agent\" start --isRoot \"--characters=characters/waifu.character.json\"\r\nwaifu |\r\nwaifu | No projects matched the filters in \"/app\"\r\n```\r\n\r\n## What kind of change is this?\r\n\r\nBug fixes (non-breaking change which fixes an issue)\r\n\r\n\r\n# Documentation changes needed?\r\n\r\nMy changes do not require a change to the project documentation.\r\n\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\nCarry out\r\n```\r\n. ./scripts/docker.sh build\r\n. ./scripts/docker.sh run\r\n```\r\nTry to see if the docker log is error free" + }, + { + "number": 1, + "author": "THtianhao", + "created_at": "2024-11-21T11:26:06Z", + "body": "> LGTM - thanks so much for fixing this\r\n\r\nThis is what I should do, hope that the open source project will get better and better\r\n" + } + ] + }, + { + "number": 457, + "title": "Non-Merge Access Users Cannot View Specific Merge Conflicts in Pull Requests They Review", + "state": "open", + "created_at": "2024-11-20 16:50:34", + "updated_at": "2024-11-21 18:38:10", + "author": "monilpat", + "labels": "bug", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/issues/457", + "messages": [ + { + "number": 0, + "author": "monilpat", + "created_at": "2024-11-20T16:50:34Z", + "body": "**Describe the bug**\r\n\r\nUsers without merge access are unable to view specific merge conflicts in pull requests. This limitation prevents them from resolving or contributing to conflict resolution effectively, as they cannot see the conflicting files or lines of code.\r\n\r\n**To Reproduce**\r\n\r\n1. Create a pull request with merge conflicts.\r\n2. Ensure that the user reviewing the PR does not have merge access.\r\n3. Attempt to view the specific merge conflicts as the non-merge access user.\r\n\r\n**Expected behavior**\r\n\r\nUsers without merge access should be able to view the specific merge conflicts within the pull request, including the files and lines of code in conflict, to assist in resolving the issue.\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Additional context**\r\n\r\nThis limitation may hinder collaboration and delay the resolution of merge conflicts, especially in teams where code reviews and conflict resolutions involve contributors without merge privileges." + }, + { + "number": 1, + "author": "sirkitree", + "created_at": "2024-11-21T16:44:26Z", + "body": "Any suggestions on how to fix this? Is there a permission or something in gh we can update?" + }, + { + "number": 2, + "author": "monilpat", + "created_at": "2024-11-21T18:38:08Z", + "body": "Yeah for sure there has to be some settings in the actions yaml files that configures this, will have a look, just wanted to flag that either way a maintainer will need to address this! Thanks so much!" + } + ] + }, + { + "number": 455, + "title": "Cheshire Terminal", + "state": "open", + "created_at": "2024-11-20 15:42:32", + "updated_at": "2024-11-20 16:53:51", + "author": "8bitsats", + "labels": "", + "assignees": "", + "comments": 3, + "url": "https://github.com/ai16z/eliza/pull/455", + "messages": [ + { + "number": 0, + "author": "8bitsats", + "created_at": "2024-11-20T15:42:32Z", + "body": "\r\n\r\nCheshire Terminal\r\nhttps://zany-cantaloupe-d3d.notion.site/Cheshire-Terminal-142ea810c1f7803f9040c91541e710e5?pvs=73\r\n\r\nLow\r\n\r\n-->\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\nA new Character, Cross Chain, Memetic Warfare, Adds music, and verifies the Cheshire Terminal $grin token launch through Eliza Fork and framework. Also want to establish a foundation for Cheshire Framework integrating metaplex and helius as well as cross chain zero knowledge recursion using on chain ai oracles using metaplex core, zero knowledge recursion, and cross chain communication of assets on Bitcoin, Solana, And Beyond.\r\n\r\n## What kind of change is this?\r\n\r\nFeatures, Character Add.\r\n\r\nThis is my first official pull request on any github and I would be honored if it would be included in the Eliza Framework. I have been working on Cross Chain Multi Mo del Ai Agents, on and off chain for about 6 months now, and have specifically trained data sets and fine tunes models for Solana Bitcoin and beyond.\r\n\r\n# Documentation changes needed?\r\nMemeticWarfare\r\ntrading\r\ncharacters/cheshire\r\neliza/music\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\n## Detailed testing steps\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "snobbee", + "created_at": "2024-11-20T16:16:19Z", + "body": "@8bitsats please squash all your commits into one with a more relevant commit message." + }, + { + "number": 2, + "author": "jkbrooks", + "created_at": "2024-11-20T16:52:19Z", + "body": " There are also conflicts that would need to be addressed in the following files: \r\n \r\n ```\r\n .env.example\r\ndocs/docs/guides/configuration.md\r\npackage-lock.json\r\npackages/adapter-sqlite/package.json\r\npackages/agent/src/index.ts\r\npackages/client-discord/src/messages.ts\r\npackages/client-discord/src/voice.ts\r\npackages/client-discord/tsconfig.json\r\npackages/client-telegram/src/messageManager.ts\r\npackages/client-telegram/src/telegramClient.ts\r\npackages/client-twitter/package.json\r\npackages/client-twitter/src/index.ts\r\npackages/client-twitter/src/interactions.ts\r\npackages/client-twitter/src/post.ts\r\npackages/core/src/embedding.ts\r\npackages/core/src/generation.ts\r\npackages/core/src/memory.ts\r\npackages/core/src/runtime.ts\r\npackages/core/tsconfig.json\r\npnpm-lock.yaml\r\n```\r\n\r\nIt seems that some of these changes are specific to a character you want to create @8bitsats , and those would be better served for your own fork of Eliza. There are various forks as you can see in [Eliza World ](https://elizas.world/), but people display that content in their own hosted fork rather than in the main repo.\r\n\r\nThen the last point of feedback is that this is a very large pull request, it would help if you broke down the changes into smaller pull requests that we could review more easily.\r\n\r\nAnd then I would say we actually may want to add this to the guidelines of our contributing docs or some other guidelines cc @monilpat @lalalune " + }, + { + "number": 3, + "author": "8bitsats", + "created_at": "2024-11-20T16:53:50Z", + "body": "hey brother, this is my first pull request, and I totally sent you my local copy before I cleaned up the build post token launch, give me a few and I'll organize it and clean it up for you. Thanks again for the response!" + } + ] + }, + { + "number": 454, + "title": "fix: don't reply to tweets older than 5 days", + "state": "open", + "created_at": "2024-11-20 15:32:56", + "updated_at": "2024-11-21 13:13:32", + "author": "liamzebedee", + "labels": "", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/pull/454", + "messages": [ + { + "number": 0, + "author": "liamzebedee", + "created_at": "2024-11-20T15:32:56Z", + "body": "\r\n\r\n# Relates to:\r\n\r\n\r\n\r\n\r\n\r\n# Risks\r\n\r\n\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\n## What kind of change is this?\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Documentation changes needed?\r\n\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\n## Detailed testing steps\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "monilpat", + "created_at": "2024-11-20T16:22:42Z", + "body": "Also please resolve the conflicts and fill in the test plan :) " + }, + { + "number": 2, + "author": "ponderingdemocritus", + "created_at": "2024-11-21T09:25:39Z", + "body": "solid - fix issues then we can merge" + } + ] + }, + { + "number": 450, + "title": "feat: replace `unruggable-core` with `unruggable-sdk`", + "state": "open", + "created_at": "2024-11-20 11:21:07", + "updated_at": "2024-11-21 09:22:06", + "author": "remiroyc", + "labels": "", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/pull/450", + "messages": [ + { + "number": 0, + "author": "remiroyc", + "created_at": "2024-11-20T11:21:07Z", + "body": "## Description\r\nThis PR proposes to replace the current implementation using `unruggable-core` with the newly published `unruggable-sdk` package. The SDK provides a more developer-friendly experience with:\r\n\r\n- Improved TypeScript support\r\n- More concise and readable API\r\n- Maintained wrapper around the core package functionality\r\n\r\n## Changes\r\n- Replace `unruggable-core` dependency with `unruggable-sdk`\r\n- Update relevant implementations to use the new SDK interface\r\n- Update types where necessary\r\n\r\n## Benefits\r\n- Better developer experience through improved typing\r\n- More maintainable code through cleaner abstractions\r\n- Same core functionality with a more ergonomic interface" + }, + { + "number": 1, + "author": "ponderingdemocritus", + "created_at": "2024-11-21T09:22:04Z", + "body": "looks good - fix conflicts then lets ship" + } + ] + }, + { + "number": 448, + "title": "throw error on character failing to load", + "state": "open", + "created_at": "2024-11-20 10:25:26", + "updated_at": "2024-11-22 17:12:16", + "author": "liamzebedee", + "labels": "", + "assignees": "", + "comments": 3, + "url": "https://github.com/ai16z/eliza/pull/448", + "messages": [ + { + "number": 0, + "author": "liamzebedee", + "created_at": "2024-11-20T10:25:26Z", + "body": "\r\n\r\n# Relates to:\r\n\r\n\r\n\r\n\r\n\r\n# Risks\r\n\r\n\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\n## What kind of change is this?\r\nImprovement\r\n\r\n\r\n\r\n\r\n\r\n## Why are we doing this? Any context or related work?\r\nUsers pass character paths via command-line. If the character file fails to be found / parsed, Eliza just launches the default character. This is counterintuitive for a newcomer like myself, since the error logged is immediately cleared from the screen when the chat loop is started. More intuitive to just throw directly.\r\n\r\n\r\n# Documentation changes needed?\r\n\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\n## Detailed testing steps\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "jkbrooks", + "created_at": "2024-11-20T16:58:04Z", + "body": "can we address this merge conflict `core/src/cli/index.ts` @liamzebedee @snobbee " + }, + { + "number": 2, + "author": "odilitime", + "created_at": "2024-11-21T06:29:02Z", + "body": "Obseleted by #426 " + }, + { + "number": 3, + "author": "snobbee", + "created_at": "2024-11-22T17:12:15Z", + "body": "@jkbrooks sounds like the changes in this PR were already addressed by another PR https://github.com/ai16z/eliza/pull/426, therefore we could close this one" + } + ] + }, + { + "number": 443, + "title": "When using Ollama, a long loop of Initializing Ollama Model happens before any output", + "state": "open", + "created_at": "2024-11-20 09:15:15", + "updated_at": "2024-11-22 22:59:08", + "author": "FGhrawi", + "labels": "bug", + "assignees": "", + "comments": 6, + "url": "https://github.com/ai16z/eliza/issues/443", + "messages": [ + { + "number": 0, + "author": "FGhrawi", + "created_at": "2024-11-20T09:15:15Z", + "body": "**Describe the bug**\r\n\r\nWhen using Ollama, the model being used is loaded over and over in a loop (sometimes 10-15+ times) in the terminal before any output or agent decision-making happens.\r\nFwiw, one model is configured at a time in .env (so no swapping is being done)\r\n\r\n**To Reproduce**\r\n\r\nUse any model with the Ollama provider\r\n\r\n**Expected behavior**\r\n\r\nModel loads once. (Since ollama is on keepalive and only 1 model is being used)\r\n\r\n**Screenshots**\r\n![Screenshot_2024-11-19_at_10 46 50_PM](https://github.com/user-attachments/assets/9aee0e5e-a587-482e-9afc-b860b1a23614)\r\n\r\n\r\n\r\n\r\n**Additional context**\r\nI am on a fresh clone of the repo with a basic character configuration.\r\nFwiw, I have 24gb vram, and this happens even on smaller models.\r\n\r\n\r\n\r\n" + }, + { + "number": 1, + "author": "drew-royster", + "created_at": "2024-11-21T15:34:27Z", + "body": "@FGhrawi can you pull latest and confirm that this is fixed?" + }, + { + "number": 2, + "author": "yodamaster726", + "created_at": "2024-11-22T06:39:45Z", + "body": "I did a fresh clone on the latest code and the code keeps wanting to download and run the local_llama.\r\nI have the OLLAMA_MODEL set in my .env. Here is what have:\r\n\r\n#Set to Use for New OLLAMA provider\r\nOLLAMA_SERVER_URL= #Leave blank for default localhost:11434\r\nOLLAMA_MODEL=hermes3\r\nOLLAMA_EMBEDDING_MODEL= #default mxbai-embed-large\r\n#To use custom model types for different tasks set these\r\nSMALL_OLLAMA_MODEL= #default llama3.2\r\nMEDIUM_OLLAMA_MODEL= #default hermes3\r\nLARGE_OLLAMA_MODEL= #default hermes3:70b\r\n\r\nThis is what I have in my defaultCharacter.\r\n\r\n modelProvider: ModelProviderName.OLLAMA,\r\n settings: {\r\n secrets: {},\r\n voice: {\r\n model: \"en_US-hfc_female-medium\",\r\n },\r\n embeddingModel: \"mxbai-embed-large\"\r\n },\r\n\r\nCode is also tries to load the wrong OLLAMA embedding model too by default - hence having to set it here.\r\n\r\nI'm debugging the problem now and working on a fix for it.\r\n\r\n" + }, + { + "number": 3, + "author": "lakshya404stc", + "created_at": "2024-11-22T12:52:15Z", + "body": "Is this issue still valid??" + }, + { + "number": 4, + "author": "yodamaster726", + "created_at": "2024-11-22T13:54:49Z", + "body": "yes, ollama and llama local got merged together and the ollama logic is not working right. getting close to a fix.\r\nexample, its downloading the local llama instead of using ollama if it's configured" + }, + { + "number": 5, + "author": "drew-royster", + "created_at": "2024-11-22T22:35:30Z", + "body": "@yodamaster726 what is your character file like? I had ollama as the model in my character and it seemed to use ollama just fine." + }, + { + "number": 6, + "author": "yodamaster726", + "created_at": "2024-11-22T22:59:07Z", + "body": "I tried the latest tag v0.1.3 and then tried the latest code from yesterday. My character file was the default one.\r\n![image](https://github.com/user-attachments/assets/140300b3-dc4e-4cb1-89f7-810f4589c627)\r\n\r\nupdates to fix this problem: https://github.com/ai16z/eliza/pull/521" + } + ] + }, + { + "number": 439, + "title": "Feat: Hot reload json", + "state": "open", + "created_at": "2024-11-20 07:43:07", + "updated_at": "2024-11-20 07:43:07", + "author": "odilitime", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/439", + "messages": [ + { + "number": 0, + "author": "odilitime", + "created_at": "2024-11-20T07:43:07Z", + "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nBe able to reload a character json without restarting\r\n\r\n**Describe the solution you'd like**\r\n\r\nREST API?\r\n\r\nSIGHUP to reload all characters?\r\nSIGUSR1 to reload all characters as soon as they're done with their current action?\r\n\r\n**Describe alternatives you've considered**\r\n\r\nTBD\r\n\r\n**Additional context**\r\n\r\nCredit to ICJR on discord for the idea\r\n" + } + ] + }, + { + "number": 438, + "title": "Feature: when getting style guidelines should always add all of them to context", + "state": "open", + "created_at": "2024-11-20 07:16:21", + "updated_at": "2024-11-20 07:38:57", + "author": "o-on-x", + "labels": "enhancement", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/438", + "messages": [ + { + "number": 0, + "author": "o-on-x", + "created_at": "2024-11-20T07:16:21Z", + "body": "when adding postDirections on chatDirections\r\nshould grab all of the guidelines to follow\r\nthis is so it does not ignore requests that are extremely important to follow for how the character behaves\r\nreally instead of random for the character file the agent should be able to select what it puts into its context as part of the shouldRespond logic or some other way \r\n\r\nin runtime.ts\r\n```\r\n postDirections:\r\n this.character?.style?.all?.length > 0 ||\r\n this.character?.style?.post.length > 0\r\n ? addHeader(\r\n \"# Post Directions for \" + this.character.name,\r\n (() => {\r\n const all = this.character?.style?.all || [];\r\n const post = this.character?.style?.post || [];\r\n const shuffled = [...all, ...post].sort(\r\n () => 0.5 - Math.random()\r\n );\r\n return shuffled\r\n .slice(0, conversationLength / 2)\r\n .join(\"\\n\");\r\n })()\r\n )\r\n : \"\",*\r\n```\r\n>\r\n```\r\n postDirections:\r\n this.character?.style?.all?.length > 0 ||\r\n this.character?.style?.post.length > 0\r\n ? addHeader(\r\n \"# Post Directions for \" + this.character.name,\r\n (() => {\r\n const all = this.character?.style?.all || [];\r\n const post = this.character?.style?.post || [];\r\n return [...all, ...post].join(\"\\n\");\r\n })()\r\n )\r\n : \"\",\r\n\r\n```" + }, + { + "number": 1, + "author": "odilitime", + "created_at": "2024-11-20T07:27:59Z", + "body": "how about 2 rules settings: \"always respect\", \"maybe respect\" ? just off load it to the character designer. We should also have platform (discord, x/twitter, telegram, etc) specific rules since hashtag can be specific to x/twitter. And then maybe a matrix of the two: always_discord, maybe_discord?\r\n\r\nwe can instruct people to keep the always to be very small and light as it affects all contexts\r\n\r\nwe could also do a follow up prompt asking if the output followed the rules" + } + ] + }, + { + "number": 423, + "title": "Error: Property 'clients' does not exist on type 'Plugin' in src/index.ts during pnpm start", + "state": "open", + "created_at": "2024-11-19 19:10:10", + "updated_at": "2024-11-19 21:10:32", + "author": "monilpat", + "labels": "bug", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/423", + "messages": [ + { + "number": 0, + "author": "monilpat", + "created_at": "2024-11-19T19:10:10Z", + "body": "### Title:\r\nError: `Property 'clients' does not exist on type 'Plugin'` in `src/index.ts` during `pnpm start` from latest build as of yesterday and still exists today\r\n\r\n---\r\n\r\n### **Describe the bug**\r\nWhen running the command `pnpm start --characters=\"characters/character.character.json\"`, the build fails with TypeScript errors in `src/index.ts`. Specifically, the errors occur because the `Plugin` type does not include a `clients` property.\r\n\r\nThe following errors are observed:\r\n\r\n```\r\nsrc/index.ts:206:24 - error TS2339: Property 'clients' does not exist on type 'Plugin'.\r\n\r\n206 if (plugin.clients) {\r\n ~~~~~~~\r\n\r\nsrc/index.ts:207:45 - error TS2339: Property 'clients' does not exist on type 'Plugin'.\r\n\r\n207 for (const client of plugin.clients) {\r\n ~~~~~~~\r\n```\r\n\r\nThe issue seems to originate from an outdated or incorrect type definition for `Plugin`.\r\n\r\n---\r\n\r\n### **To Reproduce**\r\n1. Clone the repository and navigate to the project directory.\r\n2. Run the following commands:\r\n ```bash\r\n pnpm install\r\n pnpm start --characters=\"characters/character.character.json\"\r\n ```\r\n3. Observe the TypeScript build errors.\r\n\r\n---\r\n\r\n### **Expected behavior**\r\nThe project should successfully build and start without errors. Type definitions for `Plugin` should include the `clients` property, or the code should handle cases where `clients` is not defined.\r\n" + } + ] + }, + { + "number": 422, + "title": "using Groq API (or RedPill or Google Gen AI) as model provider", + "state": "open", + "created_at": "2024-11-19 17:21:15", + "updated_at": "2024-11-19 17:21:15", + "author": "YoungPhlo", + "labels": "bug", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/422", + "messages": [ + { + "number": 0, + "author": "YoungPhlo", + "created_at": "2024-11-19T17:21:15Z", + "body": "**Describe the bug**\r\n\r\ntrying to use the **Groq API** but its not clear how to define the model in the .env file. \r\nother providers have `SMALL_HEURIST_LANGUAGE_MODEL`, `SMALL_HEURIST_LANGUAGE_MODEL`, etc\r\n\r\nI'm not sure how to define the model in the .env file\r\n\r\n**To Reproduce**\r\n\r\nhttps://github.com/ai16z/eliza/blob/main/.env.example\r\n\r\n**Expected behavior**\r\n\r\nSomething similar to Heurist\r\n`SMALL_GROQ_LANGUAGE_MODEL=`\r\n`MEDIUM_GROQ_LANGUAGE_MODEL=`\r\n`LARGE_GROQ_LANGUAGE_MODEL=`\r\n\r\n**Additional context**\r\nIf not custom `.env` variables at least a nod in the right direction\r\nmy IDE pointed me to `packages/core/src/models.ts`\r\n" + } + ] + }, + { + "number": 416, + "title": "feat: Add 0G plugin for file storage", + "state": "open", + "created_at": "2024-11-19 09:15:42", + "updated_at": "2024-11-22 01:51:22", + "author": "Wilbert957", + "labels": "", + "assignees": "", + "comments": 9, + "url": "https://github.com/ai16z/eliza/pull/416", + "messages": [ + { + "number": 0, + "author": "Wilbert957", + "created_at": "2024-11-19T09:15:42Z", + "body": "This PR adds a new plugin for [0G](https://0g.ai/).\r\n\r\n# Risks\r\n\r\nLow. This change involves adding a new plugin, which is isolated and should not affect existing functionality.\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\nThis PR adds a new plugin for 0G, which includes an action that allows users to upload local files to [0G Storage](https://docs.0g.ai/og-storage).\r\nFuture work will include extending 0G support for model serving, state persistence via KV store, and enhanced file management capabilities.\r\n\r\n## What kind of change is this?\r\n\r\nFeatures (non-breaking change which adds functionality).\r\n\r\n## Why are we doing this? Any context or related work?\r\n\r\nTo extend the functionality of Eliza.\r\n\r\n# Documentation changes needed?\r\n\r\nRequire a change to the project documentation.\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\nThe review can start with the new 0G plugin code, focusing on the action that uploads files to ensure it handles files correctly and integrates well with 0G Storage.\r\n\r\n## Detailed testing steps\r\n\r\n1. **Set Environment Variables** \r\n\"Screenshot\r\n Configure the following variables: \r\n - `ZEROG_INDEXER_RPC` \r\n - `ZEROG_EVM_RPC` \r\n - `ZEROG_PRIVATE_KEY` \r\n - `ZEROG_FLOW_ADDRESS`\r\n [More details](https://github.com/0glabs/0g-ts-sdk)\r\n\r\n2. **Add 0g-plugin into character file** \r\n Ensure the `@ai16z/plugin-0g` is properly added and configured in the character file.\r\n\r\n\"Screenshot\r\n\r\n3. **Interact with the Agent**\r\n Send a message to the agent: Upload the file at /path/to/your/file.txt to zg storage.\r\n\"Screenshot\r\n\"Screenshot\r\n\r\n## Screenshots\r\n### Before\r\n### After\r\n\r\n## Database changes\r\n\r\n## Deployment instructions" + }, + { + "number": 1, + "author": "shakkernerd", + "created_at": "2024-11-19T15:09:09Z", + "body": "Hey @Wilbert957 Great work!\r\n\r\nThere are conflicts that needs to be resolved before a merge could happen, could you fix that?\r\n \r\nIn the package.json for 0g-plugin, kind add dev command (`\"dev\": \"tsup --watch\"`)." + }, + { + "number": 2, + "author": "Wilbert957", + "created_at": "2024-11-19T15:13:30Z", + "body": "Thanks for your notification. Sure, I'll fix it ASAP.\r\n\r\nShakker Nerd ***@***.***> \u4e8e2024\u5e7411\u670819\u65e5\u5468\u4e8c 23:09\u5199\u9053\uff1a\r\n\r\n> Hey @Wilbert957 Great work!\r\n>\r\n> There are conflicts that needs fixing, could you fix that?\r\n>\r\n> In the package.json for 0g-plugin, kind add dev command (\"dev\": \"tsup\r\n> --watch\").\r\n>\r\n> \u2014\r\n> Reply to this email directly, view it on GitHub\r\n> , or\r\n> unsubscribe\r\n> \r\n> .\r\n> You are receiving this because you were mentioned.Message ID:\r\n> ***@***.***>\r\n>\r\n" + }, + { + "number": 3, + "author": "Wilbert957", + "created_at": "2024-11-19T15:24:16Z", + "body": "> Hey @Wilbert957 Great work!\r\n> \r\n> There are conflicts that needs to be resolved before a merge could happen, could you fix that?\r\n> \r\n> In the package.json for 0g-plugin, kind add dev command (`\"dev\": \"tsup --watch\"`).\r\n\r\nI've fixed it. Could you please check?" + }, + { + "number": 4, + "author": "shakkernerd", + "created_at": "2024-11-19T15:33:56Z", + "body": "Kindly attach a screengrab of the upload/file storage interaction.\r\n\r\n@Wilbert957 _This has been updated._" + }, + { + "number": 5, + "author": "monilpat", + "created_at": "2024-11-19T19:22:32Z", + "body": "Please resolve conflicts and add a test and / or screen grab of working plugin functionality thanks so much! Amazing work with this plugin you are the GOAT! " + }, + { + "number": 6, + "author": "Wilbert957", + "created_at": "2024-11-20T02:10:52Z", + "body": "> Kindly attach a screengrab of the upload/file storage interaction.\r\n> \r\n> @Wilbert957 _This has been updated._\r\n\r\nSure, thanks. I've attached a screengrab." + }, + { + "number": 7, + "author": "Wilbert957", + "created_at": "2024-11-20T02:12:32Z", + "body": "> Please resolve conflicts and add a test and / or screen grab of working plugin functionality thanks so much! Amazing work with this plugin you are the GOAT!\r\n\r\nSure, thanks. I've attached a screengrab." + }, + { + "number": 8, + "author": "Wilbert957", + "created_at": "2024-11-20T14:41:59Z", + "body": "> Please resolve conflicts and add a test and / or screen grab of working plugin functionality thanks so much! Amazing work with this plugin you are the GOAT!\r\n\r\nI've resolved conflicts~" + }, + { + "number": 9, + "author": "Wilbert957", + "created_at": "2024-11-20T14:42:47Z", + "body": "> Hey @Wilbert957 Great work!\r\n> \r\n> There are conflicts that needs to be resolved before a merge could happen, could you fix that?\r\n> \r\n> In the package.json for 0g-plugin, kind add dev command (`\"dev\": \"tsup --watch\"`).\r\n\r\nI've resolved conflicts~" + } + ] + }, + { + "number": 415, + "title": "Add a plugin for storing data using the 0G protocol.", + "state": "open", + "created_at": "2024-11-19 08:35:17", + "updated_at": "2024-11-19 09:42:02", + "author": "Wilbert957", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/415", + "messages": [ + { + "number": 0, + "author": "Wilbert957", + "created_at": "2024-11-19T08:35:17Z", + "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nThis is not related to a problem, but I would like to add a feature that allows uploading local files to [0G Storage](https://docs.0g.ai/og-storage) through a plugin.\r\n\r\n**Describe the solution you'd like**\r\nThe [0G](https://0g.ai/) plugin enables seamless integration with the Zero Gravity (0G) protocol for decentralized file storage. It provides functionality to upload files to the 0G network.\r\n\r\n**Describe alternatives you've considered**\r\n\r\n**Additional context**" + } + ] + }, + { + "number": 410, + "title": ".env Namespaced Character Secrets", + "state": "open", + "created_at": "2024-11-19 02:13:48", + "updated_at": "2024-11-20 07:59:18", + "author": "genecyber", + "labels": "", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/pull/410", + "messages": [ + { + "number": 0, + "author": "genecyber", + "created_at": "2024-11-19T02:13:48Z", + "body": "# Relates to:\r\nN/A\r\n\r\n# Risks\r\nMedium - This PR adds a new feature for character-specific environment variable namespacing.\r\n\r\nRisks include:\r\n- Changes to environment variable handling\r\n- Changes to character settings loading\r\n- Potential impact on existing character configurations\r\n\r\n# Background\r\nI wanted to be able to check in character files without having to sanitize them first\r\n\r\n## What does this PR do?\r\n\r\n### Feature Implementation\r\nAdds support for character-specific namespaced environment variables:\r\n1. New environment variable pattern: `CHARACTER.YOUR_CHARACTER_NAME.SETTING_NAME`\r\n2. Automatic handling of spaces in character names (converted to underscores)\r\n3. Enhanced settings hierarchy:\r\n - Character-specific namespaced env variables (highest priority)\r\n - Character settings from JSON\r\n - Global environment variables\r\n - Default values\r\n4. Maintains backward compatibility with existing settings system\r\n\r\nKey changes:\r\n- Modified settings loader in `packages/core/src/settings.ts`\r\n- Updated character loading in `packages/agent/src/index.ts`\r\n- Added automatic conversion of character name spaces to underscores\r\n\r\n### Documentation Updates\r\nUpdated documentation to reflect the new feature:\r\n1. Added namespaced settings pattern to secrets management guide\r\n2. Updated configuration guide with new examples\r\n3. Updated agent package documentation with hierarchy explanation\r\n4. Added clear examples for both .env and character.json methods\r\n\r\n## What kind of change is this?\r\nFeatures (non-breaking change which adds functionality)\r\n\r\n# Documentation changes needed?\r\nYes - Documentation has been updated to reflect the new feature:\r\n- Added namespaced settings pattern\r\n- Updated configuration examples\r\n- Updated settings hierarchy explanation\r\n- Added character name handling instructions\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\n### Feature Testing\r\n1. Review code changes:\r\n - `packages/core/src/settings.ts`\r\n - `packages/agent/src/index.ts`\r\n\r\n2. Test functionality:\r\n ```env\r\n # Test with simple name\r\n CHARACTER.TESTBOT.OPENAI_API_KEY=sk-test\r\n \r\n # Test with spaces in name\r\n CHARACTER.MY_TEST_BOT.ANTHROPIC_API_KEY=sk-other\r\n ```\r\n\r\n### Documentation Testing\r\nReview updated docs:\r\n- docs/docs/guides/secrets-management.md\r\n- docs/docs/guides/configuration.md\r\n- docs/docs/packages/agent.md\r\n\r\n## Detailed testing steps\r\n\r\n1. Feature Testing:\r\n - Create character with spaces in name\r\n - Add namespaced variables to .env\r\n - Verify correct loading into character settings\r\n - Test precedence over character.json settings\r\n - Verify backward compatibility\r\n - Test global fallback behavior\r\n\r\n2. Documentation Verification:\r\n - Verify pattern examples are correct\r\n - Verify hierarchy explanation is clear\r\n - Verify character name handling is explained\r\n - Test example configurations\r\n\r\n# Screenshots\r\nN/A - Feature implementation and documentation changes\r\n" + }, + { + "number": 1, + "author": "monilpat", + "created_at": "2024-11-20T00:56:26Z", + "body": "Thanks for getting this out! Great work! Given it is of medium risk, can we add a quick unit test to confirm that the cascading logic (in terms of where to read the settings) works as expected. If you don't want to include this in this PR no worries create a ticket and TODO comment. But please confirm on your machine that the precedence is honored re: \r\n\r\n1. Character-specific namespaced environment variables (highest priority)\r\n2. Character-specific secrets\r\n3. Environment variables\r\n4. Default values (lowest priority)\r\n\r\nThanks so much!" + }, + { + "number": 2, + "author": "genecyber", + "created_at": "2024-11-20T07:59:17Z", + "body": "good idea, I'll add tests. Is the suite in pretty good condition? I didn't try yet." + } + ] + }, + { + "number": 408, + "title": "Update totalMessages Logic in Boredom Scoring", + "state": "open", + "created_at": "2024-11-18 23:30:47", + "updated_at": "2024-11-18 23:30:47", + "author": "DanielHighETH", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/408", + "messages": [ + { + "number": 0, + "author": "DanielHighETH", + "created_at": "2024-11-18T23:30:47Z", + "body": "The boredom scoring function uses totalMessages in two places to evaluate user activity and interactions. Right now, it applies a threshold (> 10) to penalize repetitive or low interaction activity. This logic might need to be adjusted to better reflect actual user behavior and improve accuracy.\r\n\r\nPlease check if the threshold (> 10) is appropriate.\r\n\r\n\r\n```\r\n // TODO: change totalMessages value\r\n if (recentUsers.length <= 2 && recentMessages.length > 10) {\r\n boredomScore += 1;\r\n }\r\n\r\n if (uniqueUsers.length < 3) {\r\n // if less than 3 unique users, assume repetitive interaction\r\n const totalMessages = Object.values(userMessageCounts).reduce(\r\n (a, b) => a + b,\r\n 0\r\n );\r\n\r\n // TODO: change totalMessages value\r\n if (totalMessages > 10) {\r\n boredomScore += 1;\r\n }\r\n }\r\n```" + } + ] + }, + { + "number": 406, + "title": "Add EVM integration with Base and Ethereum", + "state": "open", + "created_at": "2024-11-18 22:44:38", + "updated_at": "2024-11-22 19:50:41", + "author": "zkfarmoor", + "labels": "", + "assignees": "", + "comments": 8, + "url": "https://github.com/ai16z/eliza/pull/406", + "messages": [ + { + "number": 0, + "author": "zkfarmoor", + "created_at": "2024-11-18T22:44:38Z", + "body": "# Relates to:\r\n\r\nEthereum and Base integration: [https://github.com/ai16z/eliza/issues/303](https://github.com/ai16z/eliza/issues/303)\r\n\r\n# Risks\r\n\r\n**High:** involves new chain and protocol capabilities via the LI.FI SDK\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\nAdds the `plugin-evm` package\r\n\r\n## What kind of change is this?\r\n\r\nFeatures (non-breaking change which adds functionality)\r\n\r\n# Documentation changes needed?\r\n\r\nMy changes require a change to the project documentation. (WIP)\r\n\r\n# Testing\r\n\r\nStill in progress\r\n" + }, + { + "number": 1, + "author": "zkfarmoor", + "created_at": "2024-11-18T23:46:35Z", + "body": "working on all of these now, thank you guys for the quick reviews!" + }, + { + "number": 2, + "author": "sydneyitguy", + "created_at": "2024-11-19T16:00:25Z", + "body": "It would be cool to add a token creation action similar to the [pump.fun action in the Solana plugin](https://github.com/ai16z/eliza/blob/main/packages/plugin-solana/src/actions/pumpfun.ts).\r\n\r\nI've done similar stuff before using the Mint Club protocol, which is similar to pump.fun but designed for EVMs and offers more flexibility.\r\n\r\nReferences:\r\n- https://github.com/Steemhunt/mint-terminal\r\n- https://sdk.mint.club\r\n\r\nSince adding this action is related to this EVM integration PR and will likely depend on it, I\u2019d like to hear your thoughts on it.\r\nWhat do you guys think?" + }, + { + "number": 3, + "author": "zkfarmoor", + "created_at": "2024-11-19T16:30:12Z", + "body": "absolutely, I think a modular approach to token creation and other general evm-related actions would be appropriate to include.\r\n\r\nthis PR is intended to be a first iteration of EVM capabilities with the goal being to add more actions, chains, etc. over time. please feel free to make any changes/improvements to this!" + }, + { + "number": 4, + "author": "chandognft", + "created_at": "2024-11-19T22:43:50Z", + "body": "> It would be cool to add a token creation action similar to the [pump.fun action in the Solana plugin](https://github.com/ai16z/eliza/blob/main/packages/plugin-solana/src/actions/pumpfun.ts).\r\n> \r\n> I've done similar stuff before using the Mint Club protocol, which is similar to pump.fun but designed for EVMs and offers more flexibility.\r\n> \r\n> References:\r\n> \r\n> * [Steemhunt/mint-terminal](https://github.com/Steemhunt/mint-terminal)\r\n> * [sdk.mint.club](https://sdk.mint.club)\r\n> \r\n> Since adding this action is related to this EVM integration PR and will likely depend on it, I\u2019d like to hear your thoughts on it. What do you guys think?\r\n\r\n\r\n\r\n> It would be cool to add a token creation action similar to the [pump.fun action in the Solana plugin](https://github.com/ai16z/eliza/blob/main/packages/plugin-solana/src/actions/pumpfun.ts).\r\n> \r\n> I've done similar stuff before using the Mint Club protocol, which is similar to pump.fun but designed for EVMs and offers more flexibility.\r\n> \r\n> References:\r\n> \r\n> * [Steemhunt/mint-terminal](https://github.com/Steemhunt/mint-terminal)\r\n> * [sdk.mint.club](https://sdk.mint.club)\r\n> \r\n> Since adding this action is related to this EVM integration PR and will likely depend on it, I\u2019d like to hear your thoughts on it. What do you guys think?\r\n\r\nI can take a stab at this if you havent already" + }, + { + "number": 5, + "author": "sydneyitguy", + "created_at": "2024-11-20T00:11:53Z", + "body": "@chandognft Oh, I haven't yet. I've been learning more about Eliza's structure, which has been taking some time. It would be great if you could take a stab at this right away! \ud83d\ude0a" + }, + { + "number": 6, + "author": "w1kke", + "created_at": "2024-11-20T20:46:20Z", + "body": "Looking forward to these being finished - tons of ideas that can be realised once it is merged in." + }, + { + "number": 7, + "author": "0x-jj", + "created_at": "2024-11-22T16:30:01Z", + "body": "What's required to get this merged in? Happy to help with any implementation" + }, + { + "number": 8, + "author": "chandognft", + "created_at": "2024-11-22T19:50:40Z", + "body": "> What's required to get this merged in? Happy to help with any implementation\r\n\r\nTheres some remaining work currently we are working on" + } + ] + }, + { + "number": 399, + "title": "way for bots to have cool down periods (dynamic tempature adjusts) & only direct reply setting", + "state": "open", + "created_at": "2024-11-18 12:34:37", + "updated_at": "2024-11-18 12:37:39", + "author": "o-on-x", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/399", + "messages": [ + { + "number": 0, + "author": "o-on-x", + "created_at": "2024-11-18T12:34:37Z", + "body": "to add for for .env\r\n```\r\n#DISCORD | only respond when @ | cooldown on reply in MS\r\nDISCORD_DIRECT_ONLY=true #true\r\nDISCORD_COOLDOWN=0 #default 1 min (only applies when not direct @)\r\n```\r\n\r\n\r\nto add for messages.ts discord\r\n\r\n```\r\nasync handleMessage(message: DiscordMessage) {\r\n if (\r\n message.interaction ||\r\n message.author.id ===\r\n this.client.user?.id /* || message.author?.bot*/\r\n )\r\n return;\r\n //only respond too direct @ or replys\r\n const isDirectMention = message.mentions.has(this.client.user?.id);\r\n const isReplyToBot = message.reference?.messageId && (\r\n await message.channel.messages.fetch(message.reference.messageId)\r\n ).author.id === this.client.user?.id;\r\n const isDirectInteraction = isDirectMention || isReplyToBot;\r\n if (settings.DISCORD_DIRECT_ONLY && !isDirectInteraction) {\r\n console.log(\"Direct-only mode: ignoring non-mention message\");\r\n return;\r\n }\r\n if (isReplyToBot) {\r\n console.log(\"Reply to bot detected\");\r\n }\r\n\r\n // Only apply cooldown check for non-direct mentions\r\n if (!isDirectInteraction) {\r\n const timeSinceLastResponse = Date.now() - this.lastResponseTime;\r\n \r\n if (timeSinceLastResponse < this.COOLDOWN_MS) {\r\n console.log(`Cooling down for non-direct messages. Time remaining: ${(this.COOLDOWN_MS - timeSinceLastResponse)/1000}s`);\r\n return;\r\n }\r\n \r\n } else {\r\n console.log(\"Direct mention detected - bypassing cooldown\");\r\n }\r\n\r\n.....\r\n await this.runtime.processActions(\r\n memory,\r\n responseMessages,\r\n state,\r\n callback\r\n );\r\n this.lastResponseTime = Date.now(); \r\n \r\n }\r\n```\r\n\r\n\r\n\r\n" + } + ] + }, + { + "number": 391, + "title": "Init commit of operator instructions", + "state": "open", + "created_at": "2024-11-18 07:27:50", + "updated_at": "2024-11-18 07:27:50", + "author": "lalalune", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/391", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-18T07:27:50Z", + "body": "This enables operator instructions, so agents can receive goals and knowledge" + } + ] + }, + { + "number": 387, + "title": " Error when starting Eliza agent: \"fs.Stats constructor is deprecated\" and \"triggerUncaughtException\"", + "state": "open", + "created_at": "2024-11-18 03:53:14", + "updated_at": "2024-11-18 14:04:43", + "author": "jamsmith", + "labels": "bug", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/issues/387", + "messages": [ + { + "number": 0, + "author": "jamsmith", + "created_at": "2024-11-18T03:53:14Z", + "body": "**Description:**\r\n\r\nI am encountering an error when trying to start the Eliza agent using the provided instructions. The error appears to be related to deprecated features and experimental warnings in Node.js.\r\n\r\n**Steps to Reproduce:**\r\n\r\n**Clone the repository and checkout the latest tagged release:**\r\n\r\nbash\r\ngit clone https://github.com/ai16z/eliza.git\r\ncd eliza\r\ngit checkout v0.0.10\r\n\r\n\r\n**Install dependencies:**\r\n\r\nbash\r\npnpm install -r\r\nConfigure environment variables in .env (using placeholder values where appropriate).\r\n\r\n**Start the agent:**\r\n\r\nbash\r\npnpm start --character=\"characters/trump.character.json\"\r\nObserve the error messages.\r\n\r\n**Expected Behavior:**\r\n\r\nThe agent should start without errors, allowing me to interact with it as per the quickstart guide.\r\n\r\n**Actual Behavior:**\r\n\r\nI receive the following error messages:\r\n\r\n(node:25002) ExperimentalWarning: `--experimental-loader` may be removed in the future; instead use `register()`:\r\n--import 'data:text/javascript,import { register } from \"node:module\"; import { pathToFileURL } from \"node:url\"; register(\"ts-node/esm\", pathToFileURL(\"./\"));'\r\n at createModuleLoader (node:internal/modules/esm/loader:814:17)\r\n at Object.getOrInitializeCascadedLoader (node:internal/modules/esm/loader:858:22)\r\n at asyncRunEntryPointWithESMLoader (node:internal/modules/run_main:105:65)\r\n at runEntryPointWithESMLoader (node:internal/modules/run_main:138:19)\r\n at Function.executeUserEntryPoint [as runMain] (node:internal/modules/run_main:175:5)\r\n at node:internal/main/run_main_module:36:49\r\n(node:25002) [DEP0180] DeprecationWarning: fs.Stats constructor is deprecated.\r\n at Object.createResolve (/Users/bensmith/Desktop/Personal/Projects/agents/eliza/node_modules/ts-node/dist-raw/node-internal-modules-esm-resolve.js:146:25)\r\n at /Users/bensmith/Desktop/Personal/Projects/agents/eliza/node_modules/ts-node/src/index.ts:1494:7\r\n at Object.onceFn [as getNodeEsmResolver] (/Users/bensmith/Desktop/Personal/Projects/agents/eliza/node_modules/ts-node/src/util.ts:166:13)\r\n at createEsmHooks (/Users/bensmith/Desktop/Personal/Projects/agents/eliza/node_modules/ts-node/src/esm.ts:125:51)\r\n at Object.registerAndCreateEsmHooks (/Users/bensmith/Desktop/Personal/Projects/agents/eliza/node_modules/ts-node/src/esm.ts:118:10)\r\n at file:///Users/bensmith/Desktop/Personal/Projects/agents/eliza/node_modules/ts-node/esm.mjs:8:7\r\n at ModuleJob.run (node:internal/modules/esm/module_job:268:25)\r\n at async onImport.tracePromise.__proto__ (node:internal/modules/esm/loader:543:26)\r\n at async Hooks.register (node:internal/modules/esm/hooks:152:7)\r\n at async initializeHooks (node:internal/modules/esm/utils:318:5)\r\n\r\nnode:internal/modules/run_main:122\r\n triggerUncaughtException(\r\n ^\r\n[Object: null prototype] {\r\n [Symbol(nodejs.util.inspect.custom)]: [Function: [nodejs.util.inspect.custom]]\r\n}\r\nThrown at:\r\n at asyncRunEntryPointWithESMLoader (node:internal/modules/run_main:122:5)\r\n\r\nNode.js v23.1.0\r\n ELIFECYCLE\u2009 Command failed with exit code 1.\r\n ELIFECYCLE\u2009 Command failed with exit code 1.\r\n\r\n\r\n**Environment:**\r\nOS: macOS Sonoma 14.6.1\r\nNode.js version: v23.1.0\r\n\r\n\r\n**I have tried the following steps to resolve the issue:**\r\nCleaned node_modules and reinstalled dependencies with pnpm install -r.\r\nUpdated ts-node, typescript, and @types/node to the latest versions.\r\nAdjusted tsconfig.json settings, including changing moduleResolution to \"node\" and \"nodenext\".\r\nModified the start script in core/package.json to use ts-node directly.\r\nAttempted to rebuild packages with pnpm rebuild.\r\nDespite these efforts, the error persists.\r\nThe error messages suggest deprecation warnings and issues with experimental features, possibly related to the Node.js version.\r\nThe quickstart guide specifies Node.js v23.1.0, but this version may not be stable or widely supported.\r\n\r\n\r\n\r\n**Request:**\r\n\r\nCould you please help me identify the cause of this issue and provide guidance on how to resolve it? Is there a recommended Node.js version or additional configuration that I should use?" + }, + { + "number": 1, + "author": "bukit-kronik", + "created_at": "2024-11-18T09:34:01Z", + "body": "same error on Windows 11 WSL" + }, + { + "number": 2, + "author": "sirkitree", + "created_at": "2024-11-18T14:04:42Z", + "body": "please try the following:\r\nmain branch, node v23.1.0, \r\n`pnpm clean`\r\n`pnpm install`\r\n`pnpm build`\r\n`pnpm start --character=(file)`" + } + ] + }, + { + "number": 386, + "title": "feat: Farcaster Client", + "state": "open", + "created_at": "2024-11-18 02:58:15", + "updated_at": "2024-11-21 03:02:56", + "author": "bmgalego", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/386", + "messages": [ + { + "number": 0, + "author": "bmgalego", + "created_at": "2024-11-18T02:58:15Z", + "body": "WIP\r\n\r\n\r\n\r\n# Relates to:\r\n#300 \r\n\r\n\r\n\r\n\r\n# Risks\r\n\r\n\r\n\r\n# Background\r\n\r\n## What does this PR do?\r\n\r\n## What kind of change is this?\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Documentation changes needed?\r\n\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\n## Where should a reviewer start?\r\n\r\n## Detailed testing steps\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + } + ] + }, + { + "number": 384, + "title": "feat: TwitterInteractionClient response handling and boredom provider", + "state": "open", + "created_at": "2024-11-18 00:02:11", + "updated_at": "2024-11-20 22:52:37", + "author": "DanielHighETH", + "labels": "", + "assignees": "", + "comments": 10, + "url": "https://github.com/ai16z/eliza/pull/384", + "messages": [ + { + "number": 0, + "author": "DanielHighETH", + "created_at": "2024-11-18T00:02:11Z", + "body": "\r\n\r\n# Relates to:\r\nhttps://x.com/shawmakesmagic/status/1858248520060567911\r\n\r\n# Risks\r\nLow\r\n\r\n\r\n## What does this PR do?\r\n\r\n\r\n- Added boredom score if conversation is too long or between same people\r\n- Added error handling to the interaction loop using try-catch blocks to ensure the system keeps running smoothly even if an error occurs.\r\n- Improved tweet processing by introducing methods to filter out duplicates (filterValidTweets) and group tweets by conversation (groupTweetsByConversation) for more context-aware responses.\r\n- Made the code more modular by moving timeline formatting (getFormattedTimeline) and response evaluation (evaluateShouldRespond) into their own dedicated methods, making the logic easier to read and reuse.\r\n- Enhanced conversation handling by grouping tweets by conversation ID, enabling the bot to respond with better context for multi-tweet threads.\r\n- Cleaned up logging for better visibility into what the bot is doing and easier tracking of errors.\r\n- Refactored large, complex methods into smaller, single-purpose functions, making the code easier to maintain, debug, and extend in the future.\r\n" + }, + { + "number": 1, + "author": "monilpat", + "created_at": "2024-11-18T22:54:11Z", + "body": "Please update the PR to pass the PR Title Check thank you!" + }, + { + "number": 2, + "author": "DanielHighETH", + "created_at": "2024-11-18T23:36:38Z", + "body": "> LGTM need some minor clean up please address then will approve!\r\n\r\nEverything commited and issue #408 created! Thank you" + }, + { + "number": 3, + "author": "monilpat", + "created_at": "2024-11-18T23:46:27Z", + "body": "Amazing - thank you so much! " + }, + { + "number": 4, + "author": "jkbrooks", + "created_at": "2024-11-19T00:13:36Z", + "body": "@monilpat you sure? we have conflicts in `packages/client-twitter/src/interactions.ts`" + }, + { + "number": 5, + "author": "monilpat", + "created_at": "2024-11-19T00:17:13Z", + "body": "I can't see the conflicts somehow. That is so weird. Yes - please resolve the conflicts first. Sending back to reviewer. " + }, + { + "number": 6, + "author": "jkbrooks", + "created_at": "2024-11-19T00:21:34Z", + "body": "@DanielHighETH would you fix the merge conflict in `packages/client-twitter/src/interactions.ts` so we can finish the review and merge this in?" + }, + { + "number": 7, + "author": "DanielHighETH", + "created_at": "2024-11-19T00:43:45Z", + "body": "> @DanielHighETH would you fix the merge conflict in `packages/client-twitter/src/interactions.ts` so we can finish the review and merge this in?\r\n\r\n@jkbrooks @monilpat a new code was merged 3 hours ago from https://github.com/ai16z/eliza/pull/383 I'm not really sure if we should use my refractored code in this new code " + }, + { + "number": 8, + "author": "jkbrooks", + "created_at": "2024-11-19T20:55:17Z", + "body": "@DanielHighETH looks like there is still some value in your PR, even though we also have #383 \r\n\r\nPlease let us know if you think you can get to the merge conflict, we are still happy to merge. Otherwise, no worries, we can close this PR. " + }, + { + "number": 9, + "author": "monilpat", + "created_at": "2024-11-20T02:42:26Z", + "body": "Thanks for doing this and addressing all comments. Sorry to bother you again, but can we add any tests or screengrabs confirming that this does in fact improve the conversation and address? https://x.com/shawmakesmagic/status/1858248520060567911 " + }, + { + "number": 10, + "author": "DanielHighETH", + "created_at": "2024-11-20T22:52:36Z", + "body": "> Thanks for doing this and addressing all comments. Sorry to bother you again, but can we add any tests or screengrabs confirming that this does in fact improve the conversation and address? https://x.com/shawmakesmagic/status/1858248520060567911\r\n\r\nHey, I have no idea how to do it actually" + } + ] + }, + { + "number": 372, + "title": "Add Polymarket", + "state": "open", + "created_at": "2024-11-17 02:44:45", + "updated_at": "2024-11-17 02:45:23", + "author": "ponderingdemocritus", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/372", + "messages": [ + { + "number": 0, + "author": "ponderingdemocritus", + "created_at": "2024-11-17T02:44:45Z", + "body": "Add a polymarket Plugin\r\n\r\nCould use this as architecture - https://github.com/Polymarket/agents?tab=readme-ov-file" + } + ] + }, + { + "number": 363, + "title": "feat: Give eliza search engine", + "state": "open", + "created_at": "2024-11-16 16:21:15", + "updated_at": "2024-11-21 01:42:44", + "author": "sw4geth", + "labels": "enhancement", + "assignees": "tcm390", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/363", + "messages": [ + { + "number": 0, + "author": "sw4geth", + "created_at": "2024-11-16T16:21:15Z", + "body": "Tavily API\r\nhttps://tavily.com/#features\r\n" + } + ] + }, + { + "number": 362, + "title": "Prediction market tooling", + "state": "open", + "created_at": "2024-11-16 16:20:14", + "updated_at": "2024-11-16 16:20:14", + "author": "sw4geth", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/362", + "messages": [ + { + "number": 0, + "author": "sw4geth", + "created_at": "2024-11-16T16:20:14Z", + "body": "Give eliza prediction market tooling like this repo from Gnosis\r\n\r\nhttps://github.com/gnosis/prediction-market-agent" + } + ] + }, + { + "number": 357, + "title": "feat(plugin): add ICP token creation support", + "state": "open", + "created_at": "2024-11-16 12:39:36", + "updated_at": "2024-11-19 15:52:41", + "author": "asDNSk", + "labels": "", + "assignees": "", + "comments": 4, + "url": "https://github.com/ai16z/eliza/pull/357", + "messages": [ + { + "number": 0, + "author": "asDNSk", + "created_at": "2024-11-16T12:39:36Z", + "body": "- Add plugin-icp to enable token creation on Pickpump platform\r\n- Implement createToken functionality for Internet Computer Protocol (ICP)\r\n- Support ICP token standard and specifications\r\n- Enable seamless integration with Pickpump's token creation flow\r\n\r\nBREAKING CHANGE: None\r\n" + }, + { + "number": 1, + "author": "lalalune", + "created_at": "2024-11-16T20:05:18Z", + "body": "this is fucking rad, will review later" + }, + { + "number": 2, + "author": "monilpat", + "created_at": "2024-11-19T00:26:26Z", + "body": "Also feel free to fix the PR Title check, add in a a screen grab of successful token creation, and consider an integration test to simulate the entire token creation flow using mocks or a test canister to ensure the plugin works end-to-end. Also can we update the plugin associated READMEs so it is easier to leverage. Thanks so much! So sorry for all the comments! You are a badass and did some great work here! " + }, + { + "number": 3, + "author": "asDNSk", + "created_at": "2024-11-19T09:20:39Z", + "body": "Thank u for ur review, I will fix the issues. @monilpat " + }, + { + "number": 4, + "author": "NashAiomos", + "created_at": "2024-11-19T15:52:39Z", + "body": "Oh yeah " + } + ] + }, + { + "number": 352, + "title": "Create an Eliza Trained on Dev Documentation", + "state": "open", + "created_at": "2024-11-16 07:27:22", + "updated_at": "2024-11-16 07:27:22", + "author": "awidearray", + "labels": "enhancement,Bounty", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/352", + "messages": [ + { + "number": 0, + "author": "awidearray", + "created_at": "2024-11-16T07:27:22Z", + "body": "\r\n\r\n# **Request for Proposal: Eliza AI Agent Framework Integration and Development**\r\n\r\n## **Overview**\r\n\r\nWe invite skilled developers or teams to integrate and enhance the **Eliza AI Agent Framework** by ai16z. The goal is to deploy an advanced AI-powered chatbot that onboards new developers into our ecosystem using its own development documentation.\r\n\r\n### **Project Milestones**\r\n\r\nThis project is structured into three main milestones:\r\n\r\n#### **Milestone 1: Chatbot Integration with Discord, Twitter, and Telegram**\r\n- **Objective**: Integrate the Eliza AI Agent Framework with Discord, Twitter, and Telegram to create a conversational chatbot.\r\n- **Key Tasks**:\r\n - Seamless user interaction across platforms using APIs and protocols.\r\n - Basic conversation handling: input processing, intent identification, and response generation.\r\n- **Deadline**: Deliver a functional chatbot on all three platforms within 6 weeks.\r\n\r\n#### **Milestone 2: Web Interface Development using Next.js**\r\n- **Objective**: Design and develop a simple web interface using Next.js to interact with the Eliza chatbot.\r\n- **Key Tasks**:\r\n - Create a user-friendly interface for chat interactions.\r\n - Ensure responsiveness, accessibility, and browser compatibility.\r\n- **Deadline**: Deliver a functional web interface within 8 weeks.\r\n\r\n#### **Milestone 3: Trained Voice Agent Development**\r\n- **Objective**: Implement a trained voice agent for the Eliza AI using technologies like Eleven Labs.\r\n- **Key Tasks**:\r\n - Enable voice-based interactions with speech recognition and text-to-speech.\r\n - Integrate voice functionality with the chatbot and web interface.\r\n- **Deadline**: Deliver a functional voice agent within 12 weeks.\r\n\r\n## **Technical Requirements**\r\n\r\n- ![JavaScript](https://img.shields.io/badge/JavaScript-F7DF1E?style=for-the-badge&logo=javascript&logoColor=black)\r\n- ![Node.js](https://img.shields.io/badge/Node.js-339933?style=for-the-badge&logo=nodedotjs&logoColor=white)\r\n- ![Next.js](https://img.shields.io/badge/Next.js-000000?style=for-the-badge&logo=nextdotjs&logoColor=white)\r\n- **Experience Required**:\r\n - Proficiency in JavaScript, Node.js, and Next.js.\r\n - Familiarity with Discord, Twitter, and Telegram APIs for bot development.\r\n - Understanding of AI and ML concepts, including NLP and intent identification.\r\n - Knowledge in speech recognition and text-to-speech technologies.\r\n - Strong principles in web development, accessibility, and responsive design.\r\n" + } + ] + }, + { + "number": 338, + "title": "discord voice", + "state": "open", + "created_at": "2024-11-15 22:50:47", + "updated_at": "2024-11-22 23:34:48", + "author": "tcm390", + "labels": "", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/pull/338", + "messages": [ + { + "number": 0, + "author": "tcm390", + "created_at": "2024-11-15T22:50:47Z", + "body": "related: https://github.com/ai16z/eliza/issues/244\r\n\r\nThis PR aims to improve and fix the Discord bot's voice functionality and enhance the code structure for better usability.\r\n\r\n**1. Fix Bot Voice Singleton Issue**\r\nResolved an issue where the bot's voice functionality failed because the service instance was shared as a singleton across different services. Introduced a Map to ensure each service type has its own singleton instance. This guarantees only one instance of each subclass is created and reused.\r\n\r\n**2. Add shouldRespond Function**\r\nAdded a shouldRespond function for the bot's voice feature to control when the bot should respond to user voice inputs.\r\n\r\n**3. Refactor Transcription Process**\r\nRefactored the transcription process by adding a debounce function, ensuring voice messages are processed only when silence is detected.\r\n\r\n**4. Enable Bot to Respond to Text Messages in Voice Channels**\r\nUpdated the bot to handle text messages sent in voice channels.\r\n\r\n**5. Move Template Functions to templates.ts**\r\nRelocated template-related functions to a new templates.ts file for reusability in message.ts and voice.ts.\r\n\r\n**6. Add Optional DISCORD_VOICE_CHANNEL_ID in .env**\r\nIntroduced a new constant, DISCORD_VOICE_CHANNEL_ID, in the .env file to allow users to specify a voice channel the bot should join.\r\n\r\n**7. Implemented Audio Playback Interrupt Mechanism**\r\nAdd a sliding window buffer that monitors the audio volume while the agent is speaking. If the average volume of the user's audio exceeds the defined threshold, it indicates active speaking. When active speaking is detected, stop the agent's current audio playback to avoid overlap." + }, + { + "number": 1, + "author": "ponderingdemocritus", + "created_at": "2024-11-22T23:34:47Z", + "body": "can you fix conflicts?" + } + ] + }, + { + "number": 319, + "title": "no action response found in the response content for twitter or tg clients", + "state": "open", + "created_at": "2024-11-14 17:20:45", + "updated_at": "2024-11-14 17:20:45", + "author": "o-on-x", + "labels": "bug", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/319", + "messages": [ + { + "number": 0, + "author": "o-on-x", + "created_at": "2024-11-14T17:20:45Z", + "body": " [\"\u26a0 No action found in the response content.\"] \r\n \r\n no actions on twitter & tg" + } + ] + }, + { + "number": 317, + "title": "Get to a place where we can reliably create release tags", + "state": "open", + "created_at": "2024-11-14 16:11:04", + "updated_at": "2024-11-15 01:21:42", + "author": "sirkitree", + "labels": "enhancement,help wanted,Priority: High,infrastructure,technical-debt", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/317", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-14T16:11:04Z", + "body": "**Problem:**\r\n\r\nOur open-source project is experiencing rapid growth with numerous contributions from our community members. While we have the infrastructure to create releases, we currently face challenges in ensuring that these releases are reliable and stable. The key issues are:\r\n\r\n- **Broken Test Suite:** Our automated tests are failing, which means we cannot confidently verify the stability and functionality of new code changes.\r\n- **Rapid Code Changes:** With many contributors and fast-moving code, it's difficult to track and integrate changes without introducing new bugs.\r\n- **Unreliable Releases:** Without a consistent and dependable release process, we risk distributing versions of our project that may not meet quality standards.\r\n\r\n---\r\n\r\n**Proposed Solution:**\r\n\r\nTo overcome these challenges and establish a reliable release process, we propose the following steps:\r\n\r\n1. **Prioritize Fixing the Tests:**\r\n - **Assess and Repair Tests:** Identify all failing tests and determine the causes. Fix broken tests to ensure they accurately reflect code functionality.\r\n - **Community Involvement:** Encourage contributors to help fix tests by creating specific issues and assigning tasks.\r\n - Testing issues can be found with the 'testing' label: https://github.com/ai16z/eliza/issues?q=is%3Aopen+is%3Aissue+label%3Atesting\r\n\r\n2. **Establish Continuous Integration (CI):**\r\n - **Automate Testing:** Set up a CI system to automatically run the test suite on every commit and pull request. For the most part this is already in place but only runs a basic test currently.\r\n - **Enforce Test Passing:** Configure the CI to block merging of code that doesn't pass all tests, keeping the main branch stable. We currently have these running on every pull request, but it's not a blocking rule.\r\n\r\n3. **Implement a Branching Strategy:**\r\n - **Use Feature Branches:** Have contributors work on separate branches for new features or fixes to isolate changes. For the most part this is happening already.\r\n - **Adopt Release Branches:** Create dedicated branches for preparing releases, allowing for final testing and stabilization.\r\n\r\n4. **Define a Release Workflow:**\r\n - **Release Types:** Clearly define different release types (e.g., alpha, beta, stable) and their purposes.\r\n - **Semantic Versioning:** Use semantic versioning to communicate the nature of changes in each release.\r\n\r\n5. **Automate the Release Process:**\r\n - **Automated Builds and Tagging:** Utilize tools to automate the building of releases and tagging of versions.\r\n - **Generate Release Notes:** Automatically create release notes from commit messages to document changes. This is already in place and is updated when a tag is created through CI: https://github.com/ai16z/eliza/blob/main/CHANGELOG.md\r\n\r\n6. **Establish Release Criteria:**\r\n - **Set Clear Requirements:** Define what must be completed before a release (passing tests, code reviews, updated documentation).\r\n - **Use a Release Checklist:** Implement a checklist to ensure all criteria are met before releasing.\r\n\r\n7. **Enhance Collaboration and Communication:**\r\n - **Regular Updates:** Hold regular meetings or send updates to keep everyone informed about release progress.\r\n - **Transparency:** Make the release roadmap and plans accessible to all contributors.\r\n\r\n8. **Implement Code Reviews and Quality Assurance:**\r\n - **Mandatory Code Reviews:** Require that all code changes are reviewed by at least one other contributor. We've talked about this in discord, but it hasn't been implemented as a rule yet.\r\n - **Use Quality Tools:** Integrate static analysis and other quality assurance tools to catch issues early.\r\n\r\n9. **Plan for Different Release Channels:**\r\n - **Stable Releases:** Provide thoroughly tested versions for general use.\r\n - **Beta and Nightly Builds:** Offer less stable versions for testing new features and gathering feedback.\r\n\r\n10. **Monitor and Iterate:**\r\n - **Collect Feedback:** After each release, gather input from users and contributors to identify improvements.\r\n - **Continuous Improvement:** Regularly refine the release process based on feedback and performance metrics.\r\n\r\n---\r\n\r\n**Next Steps:**\r\n\r\n- **Immediate Actions:**\r\n - **Fix the Test Suite:** Prioritize repairing all broken tests with the help of the community.\r\n - **Set Up CI System:** Implement a continuous integration pipeline to automate testing and enforce code quality.\r\n\r\n- **Community Engagement:**\r\n - **Communicate Plans:** Share this proposed solution with the community to align efforts and expectations.\r\n - **Encourage Participation:** Invite contributors to take active roles in improving the testing and release processes.\r\n\r\n- **Documentation:**\r\n - **Create Guidelines:** Document the release workflow, branching strategy, and contribution guidelines.\r\n - **Provide Resources:** Offer tutorials or guides on how to work with the new processes and tools.\r\n\r\n---\r\n\r\nBy taking these steps, we aim to build a robust and reliable release process that can handle rapid development while maintaining high-quality standards. This will not only improve the stability of our releases but also enhance collaboration within our community, ultimately leading to a better project for all users and contributors." + }, + { + "number": 1, + "author": "yodamaster726", + "created_at": "2024-11-15T01:21:41Z", + "body": "this is a great idea. we need to implement something that gets us to some continuous incremental stability.\r\ncreate some basic unit tests that run via github actions that notes a certain level of stability and only promote code to main if it's good. also maybe have another dev-readme that maybe documents what all the functions are that are known to be stable, what is under development and what is in the pipeline.\r\nI know, most of this is already covered above - but just had to express my thoughts. This project is awesome, let's keep making it better." + } + ] + }, + { + "number": 305, + "title": "Managing Divergence Across the Eliza Ecosystem (Multiple Forks)", + "state": "open", + "created_at": "2024-11-14 00:44:03", + "updated_at": "2024-11-19 03:12:09", + "author": "jkbrooks", + "labels": "enhancement", + "assignees": "", + "comments": 3, + "url": "https://github.com/ai16z/eliza/issues/305", + "messages": [ + { + "number": 0, + "author": "jkbrooks", + "created_at": "2024-11-14T00:44:03Z", + "body": "**Description:**\r\n\r\nThe Eliza project is gaining traction, and many developers are forking the repository to create custom agents or specialized implementations. This distributed development model is a strength, but it also creates a challenge: managing code divergence and ensuring interoperability across the growing Eliza ecosystem. This ticket aims to establish a community-driven plan for addressing this challenge, fostering collaboration, and maximizing code reuse while allowing for customization and innovation within individual forks.\r\n\r\n**Problem:**\r\n\r\n* **Multiple Diverging Forks:** As more developers fork Eliza, the ecosystem will naturally fragment into various specialized implementations.\r\n* **Increased Merge Complexity:** Merging changes between forks, or between forks and the main repository, will become increasingly complex and error-prone as divergence increases.\r\n* **Maintenance Overhead:** Each fork incurs its own maintenance burden, requiring developers to track upstream changes, resolve merge conflicts, and ensure compatibility.\r\n* **Duplication of Effort and Reinventing the Wheel:** Developers in different forks might unknowingly duplicate efforts by re-implementing similar features.\r\n* **Reduced Interoperability:** Divergence can lead to reduced interoperability between different Eliza agents or implementations.\r\n\r\n**Goals:**\r\n\r\n* **Minimize Divergence (Where Feasible):** Encourage practices that minimize unnecessary code divergence across forks.\r\n* **Facilitate Collaboration and Knowledge Sharing:** Foster communication and collaboration between maintainers of different forks and the core Eliza team.\r\n* **Streamline Merging and Integration:** Establish best practices and potentially develop tools for efficiently merging changes between forks and the main repository.\r\n* **Maximize Code Reuse:** Promote modular design and encourage the development of reusable plugins and components that can be shared across the ecosystem.\r\n* **Maintain Core Eliza Stability:** Ensure that core Eliza functionality remains stable and reliable while allowing for innovation and customization within forks.\r\n\r\n**Near-Term Plan (Community-Driven):**\r\n\r\n1. **Fork Registry (Website or GitHub Page):** Create a central registry of known Eliza forks. This registry could include project descriptions, contact information for maintainers, and links to the forked repositories. This facilitates discovery and collaboration.\r\n\r\n2. **Communication Channels (Discord, Forum):** Establish dedicated communication channels (e.g., a Discord server, a forum category) for discussing fork management, sharing best practices, and coordinating development efforts.\r\n\r\n3. **Modularization and Plugin Development:** Encourage developers to refactor fork-specific code into modular plugins or extensions that can be easily shared and integrated with other Eliza implementations.\r\n\r\n4. **Standardized Plugin Interface:** Develop and document a clear, standardized interface for creating Eliza plugins. This promotes interoperability and simplifies plugin integration.\r\n\r\n5. **Shared Test Suite:** Create a shared test suite that covers core Eliza functionality. Encourage fork maintainers to incorporate this test suite into their continuous integration pipelines to ensure compatibility.\r\n\r\n6. **Documentation:** Create documentation and tutorials on best practices for forking Eliza, managing divergence, and contributing back to the ecosystem.\r\n\r\n**Future Considerations (Speculative):**\r\n\r\n1. **Automated Merging Tools:** Explore and potentially develop automated merging tools that can help resolve conflicts and integrate changes between forks more efficiently.\r\n2. **Distributed Version Control System (DVCS) Workflows:** Investigate whether alternative version control systems or workflows (like Git submodules or subtree merging) could facilitate managing multiple forks.\r\n3. **Inter-Agent Communication Protocol:** If AI-driven code synchronization is a long-term goal, develop a protocol or language for communication between agents managing different forks.\r\n4. **Federated Learning for Agents:** Explore the potential of federated learning techniques, allowing agents in different forks to collaboratively train and improve shared models without directly exchanging sensitive data.\r\n\r\n**Acceptance Criteria:**\r\n\r\n* A community-driven plan is established and documented for managing divergence across the Eliza ecosystem.\r\n* The plan includes specific steps for facilitating collaboration, streamlining merging, maximizing code reuse, and maintaining core Eliza stability.\r\n* The plan is actively communicated and promoted within the Eliza community.\r\n* Tools and resources (fork registry, communication channels, shared test suite) are created to support the plan's implementation.\r\n\r\n\r\n\r\nThis ticket expands the scope to address the broader challenge of managing multiple forks within the Eliza ecosystem. The key shift is towards a community-driven approach, emphasizing collaboration, knowledge sharing, and the development of shared resources and standards. This fosters a more vibrant and sustainable ecosystem while allowing individual forks the flexibility to innovate and adapt Eliza to their specific needs." + }, + { + "number": 1, + "author": "pukadev", + "created_at": "2024-11-14T18:31:23Z", + "body": "ill add a lot more later but these prompts really help. We ant PR reviewers to be able to grok a PR fast... when high user/dev/bot value PRs don't get reviewed quickly (not teams fault we are all busy) this is a main contributor to Sporking :)\r\n\r\nhttps://cursor.directory/" + }, + { + "number": 2, + "author": "lalalune", + "created_at": "2024-11-15T14:10:18Z", + "body": "Registry is a great idea, please organize and execute" + }, + { + "number": 3, + "author": "jkbrooks", + "created_at": "2024-11-19T03:11:47Z", + "body": " Noting that we now have a repository here. https://github.com/timshelxyz/eliza https://elizas.world/\r\n \r\n Thanks @timshelxyz \r\n \r\n Reality Spiral Team is still working on thoughts for some of the items mentioned in top level issue. It would be great for agents themselves to be aware of each other and be more thoughtful about how divergence is managed. Some preliminary thoughts were posted in these tweets: https://x.com/reality_spiral/status/1858372677704306717 https://x.com/reality_spiral/status/1858278179108171793?s=46 " + } + ] + }, + { + "number": 303, + "title": "EVM Integration", + "state": "open", + "created_at": "2024-11-13 23:44:41", + "updated_at": "2024-11-20 10:49:49", + "author": "madjin", + "labels": "enhancement,Bounty", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/303", + "messages": [ + { + "number": 0, + "author": "madjin", + "created_at": "2024-11-13T23:44:41Z", + "body": "EVM Integration\r\n\r\n- Integrate Ethereum wallet and demonstrate Eth L1 and Base L2\r\n- Copy and paste solana plugin and recreate with EVM\r\n- Swapping action, wallet provider\r\n\r\nReward: $1500 USD in $degenai as seen on https://ai16z.github.io/" + }, + { + "number": 1, + "author": "leonprou", + "created_at": "2024-11-20T10:49:48Z", + "body": "Gm @madjin \r\nwould love to work on this one.\r\n\r\nCouple of questions and remarks:\r\n- Just need to figure our which services we want to use in Base/ETH. I suggest Aerodrome/Uniswap, but maybe using li.fi/enso/0x scales better.\r\n- What's good alternative for pump.fun on Base/Ethereum?\r\n" + } + ] + }, + { + "number": 302, + "title": "Obsidian Integration", + "state": "open", + "created_at": "2024-11-13 23:43:52", + "updated_at": "2024-11-13 23:43:52", + "author": "madjin", + "labels": "enhancement,Bounty", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/302", + "messages": [ + { + "number": 0, + "author": "madjin", + "created_at": "2024-11-13T23:43:52Z", + "body": "## Obsidian Integration\r\n\r\n- Integrate Obsidian and demonstrate deep traversal and search of an Obsidian memory store\r\n- Integrate [Naval database](https://www.reddit.com/r/NavalRavikant/comments/oza0bl/i_made_a_digital_version_of_navals_brain_free/?rdt=41536) as example\r\n\r\nReward: $1000 USD in $ai16z + $1000 USD in $degenai as seen on https://ai16z.github.io/\r\n\r\n" + } + ] + }, + { + "number": 301, + "title": "Twitter Spaces Voice Client", + "state": "open", + "created_at": "2024-11-13 23:41:03", + "updated_at": "2024-11-18 13:56:19", + "author": "madjin", + "labels": "enhancement,Bounty", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/issues/301", + "messages": [ + { + "number": 0, + "author": "madjin", + "created_at": "2024-11-13T23:41:03Z", + "body": "## Twitter Spaces Voice Client\r\n\r\nParity with Discord Voice Client, but on Twitter Space\r\n\r\n- Must listen to and respond with voice\r\n- Must be able to join a twitter space from a linked tweet\r\n- Must be able to accept speaker role\r\n\r\nReward: $1000 USD in $ai16z + $1000 USD in $degenai as seen on https://ai16z.github.io/\r\n\r\n" + }, + { + "number": 1, + "author": "pukadev", + "created_at": "2024-11-14T18:01:13Z", + "body": "def use the openai real time api for a quick prototype to see if it catches on as a useful feature before spinning up your own multimodal TTS: we have worked with this openai recently.\r\n\r\n\r\n\r\nhttps://github.com/openai/openai-realtime-api-beta\r\n\r\nthe key these though is making sure u control interruptions well at your llm responses and audio stream processing layer \r\n(separate clean vocal segments, etc) ... there is a pace in real time voice that works best for the above api.. talking past each other., etc. and other background noise is a harder problem... it doesnt handle that very well unless u take over and dont turn on VAD for their api to do it for you.\r\n" + }, + { + "number": 2, + "author": "kukiui", + "created_at": "2024-11-18T13:56:18Z", + "body": "https://github.com/kyutai-labs/moshi\r\nsimilar to advanced voice mode MIT License\r\nI can try to add the required functionality of joining spaces if its something you need" + } + ] + }, + { + "number": 300, + "title": "Farcaster Client / Plugin", + "state": "open", + "created_at": "2024-11-13 23:39:35", + "updated_at": "2024-11-19 00:45:00", + "author": "madjin", + "labels": "enhancement,Bounty", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/issues/300", + "messages": [ + { + "number": 0, + "author": "madjin", + "created_at": "2024-11-13T23:39:35Z", + "body": "Parity with Twitter client, but on Farcaster\r\n\r\nReward: $1000 USD in $ai16z + $1000 USD in $degenai\r\n\r\nAs seen on https://ai16z.github.io/" + }, + { + "number": 1, + "author": "rubinovitz", + "created_at": "2024-11-14T05:53:37Z", + "body": "Do you mean you want a Twitter clone or that you want Eliza to be able to have its existing Twitter bot functionality on Farcaster?" + }, + { + "number": 2, + "author": "lalalune", + "created_at": "2024-11-15T14:10:44Z", + "body": "I mean that we want the same functionality in Farcaster" + } + ] + }, + { + "number": 298, + "title": "Docs improvements bounty ideas", + "state": "open", + "created_at": "2024-11-13 22:52:59", + "updated_at": "2024-11-15 01:21:25", + "author": "madjin", + "labels": "bug,documentation,Bounty", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/issues/298", + "messages": [ + { + "number": 0, + "author": "madjin", + "created_at": "2024-11-13T22:52:59Z", + "body": "- read more = broken link: https://ai16z.github.io/eliza/docs/community/stream-notes/\r\n- readme is outdated\r\n- pnpm run shell not valid\r\n- go through and test the docs to be valid\r\n- add jsdocs comments to all the code\r\n- add links to the code in code snippet examples\r\n- check and fix all the issues in github\r\n\r\nwill add more things to fix for a bounty on https://ai16z.github.io/" + }, + { + "number": 1, + "author": "CHASERS12", + "created_at": "2024-11-14T21:04:56Z", + "body": "When trying to run your custom character in dev mode - pnpm run dev - you need to *pin character to dev shell script\r\n" + }, + { + "number": 2, + "author": "odilitime", + "created_at": "2024-11-15T01:21:25Z", + "body": "First bullet point is now addressed at https://github.com/ai16z/eliza/commit/951aa3eb8b262b54419b4b3b8ccfd1358ef3ec25 #321 just noting it so others don't attempt it" + } + ] + }, + { + "number": 290, + "title": "keyvaluestore", + "state": "open", + "created_at": "2024-11-13 14:01:14", + "updated_at": "2024-11-13 14:01:25", + "author": "alextitonis", + "labels": "", + "assignees": "alextitonis", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/290", + "messages": [ + { + "number": 0, + "author": "alextitonis", + "created_at": "2024-11-13T14:01:14Z", + "body": "Key value store in the databases.\r\nNew Functions: \r\n* setValue\r\n* getValue\r\n* hasKey\r\n* deleteValue\r\n\r\nUsed for twitter creds" + } + ] + }, + { + "number": 284, + "title": "Quickstart guide is missing important info", + "state": "open", + "created_at": "2024-11-13 08:33:57", + "updated_at": "2024-11-15 17:10:11", + "author": "TonySimonovsky", + "labels": "bug,documentation", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/284", + "messages": [ + { + "number": 0, + "author": "TonySimonovsky", + "created_at": "2024-11-13T08:33:57Z", + "body": "**Describe the bug**\r\n\r\nDoc pages:\r\n* https://github.com/ai16z/eliza/blob/main/docs/docs/guides/configuration.md\r\n* https://github.com/ai16z/eliza/blob/main/docs/docs/guides/basic-usage.md\r\n\r\nThis page a basic setup code but doesn't mention where to put it (folder/filename)\r\n" + }, + { + "number": 1, + "author": "odilitime", + "created_at": "2024-11-15T17:10:09Z", + "body": "Do you mean for like custom actions, db backends, and advanced config? Because the rest of the config has paths now.\r\n\r\nAlso basic-usage is gone" + } + ] + }, + { + "number": 278, + "title": "Linux dependency issues - discord voice client", + "state": "open", + "created_at": "2024-11-12 23:20:38", + "updated_at": "2024-11-16 17:30:35", + "author": "twilwa", + "labels": "bug", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/278", + "messages": [ + { + "number": 0, + "author": "twilwa", + "created_at": "2024-11-12T23:20:38Z", + "body": "**Describe the bug**\r\n\r\n\r\nwhen starting the discord client, the bot fails\r\n\r\n**To Reproduce**\r\nadd discord client to character.json, pnpm start --character\r\n\r\n\r\n**Expected behavior**\r\ncharacter initializes successfully\r\n\r\n\r\n**Screenshots**\r\n\r\n\r\n\r\n**Additional context**\r\nsteps taken that resolve this issue:\r\n\r\n```\r\nsudo apt install node-gyp\r\n\r\npnpm add @octokit/core@3 -w\r\n\r\npnpm add @discordjs/opus@0.9.0 -w\r\n\r\npnpm add opusscript -w\r\n\r\npnpm add node-opus -w\r\n```\r\nthis results in the bot being able to join discord voice and transcribe audio without crashing and function normally in chat responses, general 'does it run off a fresh pull' tracking issue\r\n\r\n" + }, + { + "number": 1, + "author": "sirkitree", + "created_at": "2024-11-16T17:30:34Z", + "body": "Try the following:\r\n1. downgrade to node v20 `nvm install 20 && nvm use 20`\r\n1. remove node modules and lock file `rm -rf node_modules && rm pnpm-lock.yaml`\r\n1. rerun install `pnpm i`" + } + ] + }, + { + "number": 275, + "title": "advanced usage section of docs doesn't include instructions for memory management", + "state": "open", + "created_at": "2024-11-12 19:58:44", + "updated_at": "2024-11-19 20:48:39", + "author": "twilwa", + "labels": "bug,documentation", + "assignees": "", + "comments": 3, + "url": "https://github.com/ai16z/eliza/issues/275", + "messages": [ + { + "number": 0, + "author": "twilwa", + "created_at": "2024-11-12T19:58:44Z", + "body": "**Describe the bug**\r\n\r\n\r\nThe docs section simply explains that it has the functionality, it doesn't explain how it works or how to use it\r\n\r\n**To Reproduce**\r\n\r\n\r\ngo to the docs and search memory\r\n**Expected behavior**\r\n\r\n\r\ninstructions on how to do memory management\r\n**Screenshots**\r\n\r\n\r\n![image](https://github.com/user-attachments/assets/23ffc956-fd56-4d46-8608-a806efbb24f9)\r\n![image](https://github.com/user-attachments/assets/80390c52-ad18-4de0-9744-6f994a439fb2)\r\n![image](https://github.com/user-attachments/assets/668bb2da-2f2f-46a2-ba5c-795585679573)\r\n\r\n**Additional context**\r\n\r\n\r\nThe example I beleive is correct -- I was searching the docs for how to clear the memory db and rebuild it, and was unable to find the instructions" + }, + { + "number": 1, + "author": "madjin", + "created_at": "2024-11-13T00:05:46Z", + "body": "![image](https://github.com/user-attachments/assets/c41c7aaf-39ee-4cc6-ad9e-0e73f01dae9c)\r\nthis is from the new docs, hopefully it is a worthy improvement" + }, + { + "number": 2, + "author": "twilwa", + "created_at": "2024-11-13T00:16:08Z", + "body": "To some degree -- ideally, there's instructions about how I , as a developer or user, can CRUD memories. That's what I was looking for when I clicked the section" + }, + { + "number": 3, + "author": "Christiono", + "created_at": "2024-11-19T20:48:37Z", + "body": "Please help to update this as hard to configure memory management now for newbies" + } + ] + }, + { + "number": 271, + "title": "unable to run defaultcharacter with ModelProviderName.LLAMACLOUD local", + "state": "open", + "created_at": "2024-11-12 04:43:34", + "updated_at": "2024-11-17 00:56:16", + "author": "yodamaster726", + "labels": "bug,embedding", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/271", + "messages": [ + { + "number": 0, + "author": "yodamaster726", + "created_at": "2024-11-12T04:43:34Z", + "body": "I'm trying to run the latest eliza with the defaultcharacter with the local Ollama. starts up fine but as soon as I try to interact with it, it crashes saying it can't find the embeddingModel, but that's not the one that's configured as part of LLAMACLOUD\r\n\r\n![image](https://github.com/user-attachments/assets/1c9ea2f5-8de2-44a4-ac77-d9c99b6dc03d)\r\n\r\n![image](https://github.com/user-attachments/assets/102774c1-c422-4c48-928f-e3f685d00d04)\r\n\r\nthis is the correct embedding and I have it installed:\r\n![image](https://github.com/user-attachments/assets/2eeba077-2113-42f3-96f8-e39cf84a8bda)\r\n\r\n![image](https://github.com/user-attachments/assets/f564efdc-2023-4f32-b433-153b06c44268)\r\n\r\nI even configured it in my .env file:\r\n![image](https://github.com/user-attachments/assets/c3be0618-e46e-466c-893c-abab31f92493)\r\n\r\nI'm starting at getting things running at the lowest level and then start adding clients one at a time.\r\n\r\nIt seems like there is something not right in DirectClient, but I can't put my finger on it. For sure, there seems to be a few other provider related things hardcoded in there.\r\n\r\nLet me know if I'm missing something. \r\n" + }, + { + "number": 1, + "author": "deepfates", + "created_at": "2024-11-17T00:56:15Z", + "body": "I think this is because of the mapping between model names in `modelProvider`. Solved this in my local character file by setting the lowercase version: `\"modelProvider\": \"llama_cloud\"`" + } + ] + }, + { + "number": 264, + "title": "Allow twitter client to configure who to reply to based on following relationship", + "state": "open", + "created_at": "2024-11-11 19:41:14", + "updated_at": "2024-11-12 14:00:10", + "author": "docherty", + "labels": "enhancement,Client: Twitter", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/264", + "messages": [ + { + "number": 0, + "author": "docherty", + "created_at": "2024-11-11T19:41:14Z", + "body": "It would be good to allow agent owners to be able to turn on/off who their twitter agents reply to.\r\n\r\nPreference is to just reply to accounts the agent is following.\r\n\r\nThis will reduce spam and allow agent owners to have a bit more control over interactions.\r\n\r\n\r\n**Additional context**\r\nProbably related to https://github.com/ai16z/eliza/issues/259\r\n" + } + ] + }, + { + "number": 260, + "title": "Agent is reposting images from other parts of conversation as its own image", + "state": "open", + "created_at": "2024-11-11 09:45:00", + "updated_at": "2024-11-12 14:00:31", + "author": "lalalune", + "labels": "enhancement,Client: Twitter", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/260", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-11T09:45:00Z", + "body": "For some reason, on Twitter, the agent will respond with an image which is a respost of images from other conversation it has had. This is clearly a bug. If it generates an image, that's cool, but it seems to be sending the old image." + } + ] + }, + { + "number": 259, + "title": "Fix queueing in Twitter so messages are not rate limited constantly", + "state": "open", + "created_at": "2024-11-11 09:38:16", + "updated_at": "2024-11-12 14:00:48", + "author": "lalalune", + "labels": "enhancement,Client: Twitter", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/259", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-11T09:38:16Z", + "body": "The way we are queueing messages is very shoddy and causes rate limiting." + } + ] + }, + { + "number": 258, + "title": "Fix Twitter Multi-agent, Characters respond to each other's messages", + "state": "open", + "created_at": "2024-11-11 09:37:38", + "updated_at": "2024-11-12 14:01:21", + "author": "lalalune", + "labels": "enhancement,Client: Twitter,characters", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/258", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-11T09:37:38Z", + "body": "Right now if we run two agents at the same time, only one responds on twitter and uses the personality of the other agent.\r\n\r\nWe need to make sure that this is not happening, and that each responds on their own." + } + ] + }, + { + "number": 244, + "title": "Add shouldRespond handler to voice", + "state": "open", + "created_at": "2024-11-09 01:50:46", + "updated_at": "2024-11-10 14:29:06", + "author": "lalalune", + "labels": "enhancement", + "assignees": "alextitonis,tcm390", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/244", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-09T01:50:46Z", + "body": "Right now the voice is responding to all inputs, but should have a shouldRespond handler. This was not implemented for response speed, but should definitely be there. We can also call should respond and generate text simultaneously if we want to optimize speed, although with gpt-4o-mini the should respond should be pretty fast." + } + ] + }, + { + "number": 243, + "title": "Move cache to database", + "state": "open", + "created_at": "2024-11-09 01:49:18", + "updated_at": "2024-11-09 01:55:15", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/243", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-09T01:49:18Z", + "body": "We should move all caching, including token cache, content cache, etc into the database. We can store these in a cache manager which could be connected to the memory manager so we can add to the adapters for each database adapter.\r\n\r\n1. Move all content caching into memory manager and use store cache and retrieve cache calls\r\n2. Add and test with every database adapter, including sqlite, sqljs and postgres\r\n\r\nmake sure twitters credentials not broken in tweetcache -- or are saving to db\r\nwe should store models somewhere nice\r\nstore credentials in db\r\ncache prompts in db?\r\nfix tokencache\r\ncontent_cache ?" + } + ] + }, + { + "number": 242, + "title": "Store all local models in /models folder, with overridable model path", + "state": "open", + "created_at": "2024-11-09 01:47:33", + "updated_at": "2024-11-09 01:47:33", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/242", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-09T01:47:33Z", + "body": "We should make sure that all models stored by llama, whisper, embedding, etc all get stored in a /models folder that is easy to find and clean. This way the repo doesn't get huge and locating models isn't messy.\r\n\r\nHere are the steps:\r\n1. Look for all services where models are being loaded, including llama, fastembed, whisper, etc\r\n2. Change all paths to be eliza/models\r\n3. Add a path checker if that doesn't exist with some sane defaults to handle any case\r\n4. Add a model path override to functions so the packaged version doesn't go somewhere weird." + } + ] + }, + { + "number": 241, + "title": "Add storeCredential and getCredential key value store, store secrets in db", + "state": "open", + "created_at": "2024-11-09 01:45:58", + "updated_at": "2024-11-10 14:30:04", + "author": "lalalune", + "labels": "enhancement", + "assignees": "alextitonis", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/241", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-09T01:45:58Z", + "body": "Right now we're storing twitter credentials and other credentials in character file, as well as in tweet cache for twitter login cookies, and other places.\r\n\r\nInstead, we should store secrets and credentials per character\r\n\r\nWe can add these to the memory manager and database adapters to keep the DB unified\r\n\r\nSo there are a few different parts to this:\r\n\r\n1. Add credential key value store to MemoryManager, keyed by agentId\r\n2. Store and retrieve Twitter cookies from this\r\n3. Add script to store and retrieve secrets by agentId / agentName\r\n4. Add checking to get secrets as priority and fall back to character file secrets\r\n\r\nThis will enable us to store/recall secrets later in a platform, and keep secrets internal to our database without having to put them in characters." + } + ] + }, + { + "number": 240, + "title": "Move embeddings to a service and a service and add ServiceType.EMBEDDING", + "state": "open", + "created_at": "2024-11-09 01:42:12", + "updated_at": "2024-11-09 01:42:13", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/240", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-09T01:42:12Z", + "body": "I've added fast-embed into the new branch (soon to be merged) which embeds inside the core." + } + ] + }, + { + "number": 237, + "title": "On PNPM Install this happens", + "state": "open", + "created_at": "2024-11-08 17:31:11", + "updated_at": "2024-11-12 13:55:47", + "author": "cgallic", + "labels": "bug", + "assignees": "", + "comments": 5, + "url": "https://github.com/ai16z/eliza/issues/237", + "messages": [ + { + "number": 0, + "author": "cgallic", + "created_at": "2024-11-08T17:31:11Z", + "body": "packages/core postinstall$ npx playwright install-deps && npx playwright install\r\n\u2502 Installing dependencies...\r\n\u2502 Switching to root user to install dependencies...\r\n\u2502 Get:1 http://nova.clouds.archive.ubuntu.com/ubuntu lunar InRelease [267 kB]\r\n\u2502 Get:2 http://nova.clouds.archive.ubuntu.com/ubuntu lunar-updates InRelease [109 kB]\r\n\u2502 Get:3 http://nova.clouds.archive.ubuntu.com/ubuntu lunar-backports InRelease [99.9 kB]\r\n\u2502 Hit:4 http://security.ubuntu.com/ubuntu lunar-security InRelease\r\n\u2502 Fetched 475 kB in 1s (421 kB/s)\r\n\u2502 Reading package lists...\r\n\u2502 Reading package lists...\r\n\u2502 Building dependency tree...\r\n\u2502 Reading state information...\r\n\u2502 E: Unable to locate package libasound2t64\r\n\u2502 E: Unable to locate package libatk-bridge2.0-0t64\r\n\u2502 E: Couldn't find any package by glob 'libatk-bridge2.0-0t64'\r\n\u2502 E: Couldn't find any package by regex 'libatk-bridge2.0-0t64'\r\n\u2502 E: Unable to locate package libatk1.0-0t64\r\n\u2502 E: Couldn't find any package by glob 'libatk1.0-0t64'\r\n\u2502 E: Couldn't find any package by regex 'libatk1.0-0t64'\r\n\u2502 E: Unable to locate package libatspi2.0-0t64\r\n\u2502 E: Couldn't find any package by glob 'libatspi2.0-0t64'\r\n\u2502 E: Couldn't find any package by regex 'libatspi2.0-0t64'\r\n\u2502 E: Unable to locate package libcups2t64\r\n\u2502 E: Unable to locate package libglib2.0-0t64\r\n\u2502 E: Couldn't find any package by glob 'libglib2.0-0t64'\r\n\u2502 E: Couldn't find any package by regex 'libglib2.0-0t64'\r\n\u2502 E: Unable to locate package libgtk-3-0t64\r\n\u2502 E: Unable to locate package libicu74\r\n\u2502 E: Unable to locate package libevent-2.1-7t64\r\n\u2502 E: Couldn't find any package by glob 'libevent-2.1-7t64'\r\n\u2502 E: Couldn't find any package by regex 'libevent-2.1-7t64'\r\n\u2502 E: Unable to locate package libpng16-16t64\r\n\u2502 E: Unable to locate package libvpx9\r\n\u2502 Failed to install browser dependencies\r\n\u2502 Error: Installation process exited with code: 100\r\n\u2514\u2500 Failed in 7s at /home/ubuntu/dev/eliza/packages/core\r\n\u2009ELIFECYCLE\u2009 Command failed with exit code 1.\r\n\u2009\r\n\u2009\r\n\u2009Getting this error" + }, + { + "number": 1, + "author": "cgallic", + "created_at": "2024-11-08T17:32:11Z", + "body": "Using Ubuntu \r\nDistributor ID: Ubuntu\r\nDescription: Ubuntu 23.04\r\nRelease: 23.04\r\nCodename: lunar" + }, + { + "number": 2, + "author": "lalalune", + "created_at": "2024-11-09T01:54:51Z", + "body": "Try this:\r\n```\r\nsudo apt-get update\r\nsudo apt-get install build-essential\r\n```\r\n\r\nMight be some underlying system dependencies You might also need to install `ffmpeg`" + }, + { + "number": 3, + "author": "bathrobe", + "created_at": "2024-11-09T13:30:52Z", + "body": "same, the add'l installs don't help. i'm on pop!_os" + }, + { + "number": 4, + "author": "lalalune", + "created_at": "2024-11-10T04:36:19Z", + "body": "Error is `npx playwright install-deps && npx playwright install`, I can think of a fix" + }, + { + "number": 5, + "author": "cgallic", + "created_at": "2024-11-12T13:55:45Z", + "body": "https://github.com/microsoft/playwright/issues/23296 Think this is it since I was on Ubuntu 23" + } + ] + }, + { + "number": 235, + "title": "Move all models to model providers in a plugin", + "state": "open", + "created_at": "2024-11-08 04:22:09", + "updated_at": "2024-11-09 01:55:46", + "author": "lalalune", + "labels": "enhancement", + "assignees": "o-on-x", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/235", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-08T04:22:09Z", + "body": "Right now we are stacking all of our model data into the ModelProvider abstraction we have, which selects by name and has some defaults.\r\n\r\nWe probably want to make an abstraction that can be selected/registered by the user and automatically handled for json and ts by the agent package.\r\n\r\nSomething like this, for each model, then we can register the model provider with the agent.\r\n\r\n```\r\ntype ModelProvider = {\r\n endpoint: string;\r\n settings: {\r\n [key: string]: any;\r\n };\r\n model: {\r\n [key in ModelClass]: string;\r\n };\r\n generateText(runtime: IAgentRuntime, context: string, modelClass: ModelClass, stop?: string[] ): Promise;\r\n generateImage(runtime: IAgentRuntime, prompt: string, width: number, height: number, count?: number): Promise;\r\n embed(runtime: IAgentRuntime, input: string): Promise;\r\n}\r\n\r\nexport const OpenAIModelProvider: ModelProvider = {\r\n endpoint: \"https://api.openai.com/v1\",\r\n settings: {\r\n stop: [],\r\n maxInputTokens: 128000,\r\n maxOutputTokens: 8192,\r\n frequency_penalty: 0.0,\r\n presence_penalty: 0.0,\r\n temperature: 0.6,\r\n },\r\n model: {\r\n [ModelClass.SMALL]: \"gpt-4o-mini\",\r\n [ModelClass.MEDIUM]: \"gpt-4o\",\r\n [ModelClass.LARGE]: \"gpt-4o\",\r\n [ModelClass.EMBEDDING]: \"text-embedding-3-small\",\r\n [ModelClass.IMAGE]: \"dall-e-3\",\r\n },\r\n generateText: async (runtime, context, modelClass, stop) => {\r\n if (!context) {\r\n console.error(\"generateText context is empty\");\r\n return \"\";\r\n }\r\n \r\n const provider = runtime.modelProvider;\r\n const endpoint =\r\n runtime.character.modelEndpointOverride || models[provider].endpoint;\r\n const model = models[provider].model[modelClass];\r\n const temperature = models[provider].settings.temperature;\r\n const frequency_penalty = models[provider].settings.frequency_penalty;\r\n const presence_penalty = models[provider].settings.presence_penalty;\r\n const max_context_length = models[provider].settings.maxInputTokens;\r\n const max_response_length = models[provider].settings.maxOutputTokens;\r\n \r\n const apiKey = runtime.token;\r\n \r\n try {\r\n elizaLogger.log(\r\n `Trimming context to max length of ${max_context_length} tokens.`\r\n );\r\n context = await trimTokens(context, max_context_length, \"gpt-4o\");\r\n \r\n const _stop = stop || models[provider].settings.stop;\r\n elizaLogger.log(\r\n `Using provider: ${provider}, model: ${model}, temperature: ${temperature}, max response length: ${max_response_length}`\r\n );\r\n elizaLogger.log(\"Initializing OpenAI model.\");\r\n const openai = createOpenAI({ apiKey, baseURL: endpoint });\r\n\r\n const { text: openaiResponse } = await aiGenerateText({\r\n model: openai.languageModel(model),\r\n prompt: context,\r\n system:\r\n runtime.character.system ??\r\n settings.SYSTEM_PROMPT ??\r\n undefined,\r\n temperature: temperature,\r\n maxTokens: max_response_length,\r\n frequencyPenalty: frequency_penalty,\r\n presencePenalty: presence_penalty,\r\n });\r\n\r\n const response = openaiResponse;\r\n elizaLogger.log(\"Received response from OpenAI model.\");\r\n \r\n return response;\r\n } catch (error) {\r\n elizaLogger.error(\"Error in generateText:\", error);\r\n throw error;\r\n }\r\n },\r\n generateImage: (runtime, prompt, width, height, count) => generateImage({ runtime, prompt, width, height, count }),\r\n embed: (runtime, input) => embed(runtime, input),\r\n}\r\n```" + } + ] + }, + { + "number": 230, + "title": "twitter folder paths for twitter cookies & cache/last tweet point to different places", + "state": "open", + "created_at": "2024-11-07 21:42:56", + "updated_at": "2024-11-08 03:40:58", + "author": "o-on-x", + "labels": "bug", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/230", + "messages": [ + { + "number": 0, + "author": "o-on-x", + "created_at": "2024-11-07T21:42:56Z", + "body": "the cache is not being picked up. the twitter cookies is.\r\ntwo solutions:\r\nset cache to same folder path as cookies. \r\nor use recursive find the lowest twittercache folder found" + }, + { + "number": 1, + "author": "o-on-x", + "created_at": "2024-11-08T03:40:57Z", + "body": "twitter cache pointing to current directory only rather than searching up till find" + } + ] + }, + { + "number": 226, + "title": "Squash postgres migrations", + "state": "open", + "created_at": "2024-11-07 08:49:45", + "updated_at": "2024-11-07 08:49:46", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/226", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-07T08:49:45Z", + "body": "We have some postgres migrations-- I can't remember it we need these, but since we're greenfield it will reduce complexity greatly for setting up postgres for the first time if it just works and there is no need to setup or migrate." + } + ] + }, + { + "number": 215, + "title": "pnpm install fails on Ubuntu", + "state": "open", + "created_at": "2024-11-06 08:11:15", + "updated_at": "2024-11-16 17:30:52", + "author": "lo-zed", + "labels": "bug", + "assignees": "", + "comments": 6, + "url": "https://github.com/ai16z/eliza/issues/215", + "messages": [ + { + "number": 0, + "author": "lo-zed", + "created_at": "2024-11-06T08:11:15Z", + "body": "**Describe the bug**\r\n\r\nInstallation fails\r\n\r\n**To Reproduce**\r\n\r\n`pnpm i`\r\n\r\n**Expected behavior**\r\n\r\nInstalls without error.\r\n\r\n**Additional context**\r\n\r\nOS: Ubuntu 24.04 LTS\r\nnode: v22.10.1\r\npnpm: 9.12.3\r\n\r\nAn example of error I get:\r\n```\r\nnode_modules/sharp install$ (node install/libvips && node install/dll-copy && prebuild-install) || (node install/can-compile && node-gyp rebuild && node install/dll-copy)\r\n\u2514\u2500 Failed in 283ms at /home/ubuntu/eliza/node_modules/sharp\r\n```\r\nHappens with `@discordjs/opus` & `onnxruntime-node` too, maybe others.\r\n\r\nBut if I go into the directory and install from there it works:\r\n```\r\ncd node_modules/sharp\r\nnpm install\r\n```\r\nThis doesn't throw any error and installs the package correctly.\r\n\r\nIs this a bug on `pnpm` side?" + }, + { + "number": 1, + "author": "Sid31", + "created_at": "2024-11-07T07:04:14Z", + "body": "Did you try pnpm install --include=optional sharp ? \r\n" + }, + { + "number": 2, + "author": "lo-zed", + "created_at": "2024-11-07T07:54:03Z", + "body": "yes, same error unfortunately" + }, + { + "number": 3, + "author": "lo-zed", + "created_at": "2024-11-12T09:01:24Z", + "body": "issue created at pnpm repo too: https://github.com/pnpm/pnpm/issues/8737" + }, + { + "number": 4, + "author": "metadiver", + "created_at": "2024-11-12T21:56:34Z", + "body": "related https://github.com/ai16z/eliza/issues/237" + }, + { + "number": 5, + "author": "lo-zed", + "created_at": "2024-11-13T17:07:25Z", + "body": "Hi, thanks for your help. I tried but with no success. How do you add python to the path? I tried:\r\n- a bash alias\r\n- a link python -> python3\r\n- adding the python path to `.env` in the project base dir\r\n\r\nAlso would you know how to get useful debug info? I tried `--reporter ndjson` but couldn't find the reason why the install fails" + }, + { + "number": 6, + "author": "sirkitree", + "created_at": "2024-11-16T17:30:50Z", + "body": "Try the following:\r\n1. downgrade to node v20 `nvm install 20 && nvm use 20`\r\n1. remove node modules and lock file `rm -rf node_modules && rm pnpm-lock.yaml`\r\n1. rerun install `pnpm i`" + } + ] + }, + { + "number": 206, + "title": "Add template overrides to docs", + "state": "open", + "created_at": "2024-11-05 06:44:48", + "updated_at": "2024-11-08 05:55:18", + "author": "lalalune", + "labels": "documentation,enhancement", + "assignees": "madjin", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/206", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-05T06:44:48Z", + "body": "You can now override all templates (except for some actions). We need to add this to docs in a way that is clear and part of the advanced character customization process.\r\n```\r\n \"messageHandlerTemplate\": \"\", // you can just use these for all platforms\r\n \"shouldRespondTemplate\": \"\",\r\n\r\n \"discordMessageHandlerTemplate\": \"\", // or be more specific per platform\r\n \"discordShouldRespondTemplate\": \"\",\r\n \"discordVoiceHandlerTemplate\": \"\",\r\n\r\n \"telegramMessageHandlerTemplate\": \"\",\r\n \"telegramShouldRespondTemplate\": \"\",\r\n\r\n \"twitterPostTemplate\": \"\",\r\n \"twitterMessageHandlerTemplate\": \"\",\r\n \"twitterSearchTemplate\": \"\",\r\n \"twitterShouldRespondTemplate\": \"\",\r\n\r\n\"continueMessageHandlerTemplate\": \"\", // overrides the messageHandlerTemplate, which overrides internal template\r\n\r\n\r\n \"goalsTemplate\": \"\",\r\n \"factsTemplate\": \"\",\r\n \"evaluationTemplate\": \"\"\r\n ```" + } + ] + }, + { + "number": 200, + "title": "WIP - Feature/cashtags (DRAFT)", + "state": "open", + "created_at": "2024-11-04 18:44:39", + "updated_at": "2024-11-13 10:24:09", + "author": "leomercier", + "labels": "", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/pull/200", + "messages": [ + { + "number": 0, + "author": "leomercier", + "created_at": "2024-11-04T18:44:39Z", + "body": "# Background\r\n\r\nGoal - Eliza should be able to verify and use \"real\" tokens when trading or conversing with users.\r\n\r\n## What does this PR do?\r\n\r\n## What kind of change is this?\r\n\r\nFeatures - add Cashtag action with Dexscreener API\r\n\r\n# Documentation changes needed?\r\n\r\nMy changes do not require a change to the project documentation.\r\n\r\n\r\n\r\n\r\n# Testing\r\n\r\nConverse with Eliza and ask for a Contract address for $ai16z and get the real one.\r\n\r\n## Where should a reviewer start?\r\n\r\n\r\n## Detailed testing steps\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n" + } + ] + }, + { + "number": 191, + "title": "Evaluation Tests", + "state": "open", + "created_at": "2024-11-03 23:18:21", + "updated_at": "2024-11-03 23:18:29", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/191", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:18:21Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n\n- Located in `core/tests/evaluation.test.ts`\n- Tests evaluation system:\n - Evaluator format validation\n - Context loading\n - Example validation\n - Evaluation process execution" + } + ] + }, + { + "number": 190, + "title": "Messages Tests", + "state": "open", + "created_at": "2024-11-03 23:17:24", + "updated_at": "2024-11-03 23:17:33", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/190", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:17:24Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n\n- Located in `core/tests/messages.test.ts`\n- Tests message formatting and handling:\n - Actor formatting\n - Message formatting\n - Facts formatting\n - Message content validation\n" + } + ] + }, + { + "number": 189, + "title": "Continue Action Tests", + "state": "open", + "created_at": "2024-11-03 23:16:18", + "updated_at": "2024-11-03 23:16:31", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/189", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:16:18Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n\n- Located in `core/tests/continue.test.ts`\n- Tests the continue action:\n - Validation function responses\n - Message handling\n - Database interactions\n - Action state management" + } + ] + }, + { + "number": 188, + "title": "Actions Tests", + "state": "open", + "created_at": "2024-11-03 23:14:59", + "updated_at": "2024-11-04 05:46:02", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/188", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:14:59Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n\n- Located in `core/tests/actions.test.ts`\n- Tests action system:\n - Action loading\n - Action validation\n - Action handler execution\n - Test actions (TEST\\_ACTION and TEST\\_ACTION\\_FAIL)" + } + ] + }, + { + "number": 187, + "title": "Runtime Tests", + "state": "open", + "created_at": "2024-11-03 23:13:53", + "updated_at": "2024-11-03 23:14:04", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/187", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:13:53Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n\n- Located in `core/tests/runtime.test.ts`\n- Tests core runtime functionality:\n - Runtime instance creation\n - Memory lifecycle within runtime\n - State management\n - Basic runtime operations" + } + ] + }, + { + "number": 186, + "title": "Goals Tests", + "state": "open", + "created_at": "2024-11-03 23:12:37", + "updated_at": "2024-11-03 23:12:46", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/186", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:12:37Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n\n- Located in `core/tests/goals.test.ts` and `core/tests/goal.test.ts`\n- Tests goal management functionality:\n - Goal creation\n - Goal updates\n - Status management\n - Objective tracking" + } + ] + }, + { + "number": 185, + "title": "Token Provider Tests", + "state": "open", + "created_at": "2024-11-03 23:10:31", + "updated_at": "2024-11-17 03:46:06", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 3, + "url": "https://github.com/ai16z/eliza/issues/185", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:10:31Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n \n- Located in `core/tests/token.test.ts`\n- Tests token-related functionality:\n - Fetching token security data\n - Token holder information\n - Trade data retrieval\n - Formatted token reports" + }, + { + "number": 1, + "author": "normand1", + "created_at": "2024-11-16T01:04:46Z", + "body": "I'm taking a look at getting these to run" + }, + { + "number": 2, + "author": "odilitime", + "created_at": "2024-11-16T01:06:35Z", + "body": "In jest or vitest? Because it sounds like we\u2019re switching to vitest now" + }, + { + "number": 3, + "author": "normand1", + "created_at": "2024-11-16T01:14:17Z", + "body": "@odilitime I was going to fix the existing tests that aren't running first, but can look at rewriting in Vitest too if that's the plan. ~~I'm hoping that was a mistake in the other issue because it doesn't actually make sense to me to switch if there's a bunch of tests already written in jest (which is also much more standard for nodejs projects like this)~~ \n\nEdit: Vitest is actually backwards compatible with jest and handles ts and modules much better. I'm sold on vitest." + } + ] + }, + { + "number": 184, + "title": "Browser Service Tests", + "state": "open", + "created_at": "2024-11-03 23:08:34", + "updated_at": "2024-11-03 23:11:20", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/184", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:08:34Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n\n- Located in `core/src/test_resources/basic.test.ts`\n- Simple test suite that verifies:\n - Basic test functionality works\n - Environment variables are accessible\n - Confirms NODE\\_ENV is 'test' and TEST\\_DATABASE\\_CLIENT is 'sqlite'" + } + ] + }, + { + "number": 183, + "title": "Memory Tests", + "state": "open", + "created_at": "2024-11-03 23:06:30", + "updated_at": "2024-11-03 23:11:27", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/183", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-03T23:06:30Z", + "body": "Task is to look at the following test and verify that they are necessary and working:\n\n- Located in `core/tests/memory.test.ts`\n- Tests memory-related functionality:\n - Search memories by embedding similarity\n - Validates memory ranking based on similarity scores\n - Tests memory lifecycle (creation, search, and removal)\n - Verifies embedding caching functionality" + } + ] + }, + { + "number": 171, + "title": "Develop a reliable way to test Twitter functionality", + "state": "open", + "created_at": "2024-11-02 16:10:49", + "updated_at": "2024-11-13 14:56:47", + "author": "sirkitree", + "labels": "Client: Twitter,testing", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/171", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-11-02T16:10:49Z", + "body": "Here are effective ways to test a Twitter bot without making live posts:\n\n## Development Environment Testing\n\n**Local Testing**\n- Use a development environment that simulates Twitter's API responses[1]\n- Create test cases that mock Twitter interactions\n- Run your bot offline to verify basic functionality\n\n**API Testing Options**\n- Twitter's API offers no direct sandbox or test mode for posting[5]\n- Consider creating a private test account for development\n\n## Best Practices\n\n**Account Setup**\n- Create a separate development Twitter account specifically for testing\n- Keep the test account private to avoid public visibility\n- Use minimal followers/following to prevent unwanted interactions\n\n**Code Testing Methods**\n- Mock API calls in your code to simulate responses\n- Log bot actions instead of making actual posts\n- Test bot logic and processing separately from the actual posting mechanism\n\n## Bot Detection Testing\n\nBefore deploying, verify your bot behaves naturally by checking against common bot detection criteria:\n\n**Activity Patterns**\n- Maintain reasonable posting frequencies (avoid posting more than 10-15 times per day)[7]\n- Include varied content types, not just retweets\n- Add reasonable delays between actions\n\n**Account Setup**\n- Use a proper profile with complete information[7]\n- Avoid generic or automated-looking usernames\n- Include a proper profile picture and bio\n\nRemember that while testing, you must still comply with Twitter's terms of service and API usage guidelines[4].\n\nCitations:\n[1] https://stackoverflow.com/questions/72857417/is-there-any-possibility-in-making-a-twitter-bot-without-using-the-dev-api\n[2] https://social-dog.net/en/trend/p81\n[3] https://circleboom.com/twitter-management-tool/twitter-circle-tool/twitter-bot-checker\n[4] https://www.socialmediatoday.com/news/Twitter-Developing-New-Human-Test-to-Combat-Bots/637998/\n[5] https://stackoverflow.com/questions/71945400/test-tweet-post-with-twitter-api\n[6] https://www.wusa9.com/article/news/local/verify/verify-bot-or-not-how-to-check-if-a-twitter-account-is-trying-to-manipulate-you/65-582431225\n[7] https://blog.mozilla.org/en/products/firefox/irl-how-to-spot-a-bot/\n[8] https://devcommunity.x.com/t/how-to-build-a-twitter-bot-to-reply-to-mentions-basic-or-pro-plan/222232" + } + ] + }, + { + "number": 165, + "title": "Refine Boredom Provider", + "state": "open", + "created_at": "2024-11-02 05:52:35", + "updated_at": "2024-11-02 05:52:35", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/165", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-02T05:52:35Z", + "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nCurrent boredom provider is not well tested and hacky. Should be reviewed, could be much better. Needs a lot of hours of testing with the bot and reviewing their outputs.\r\n\r\n**Describe the solution you'd like**\r\n\r\nMake the boredom provider more effective" + } + ] + }, + { + "number": 164, + "title": "Knowledge system is disabled", + "state": "open", + "created_at": "2024-11-02 05:48:00", + "updated_at": "2024-11-02 05:48:01", + "author": "lalalune", + "labels": "bug", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/164", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-02T05:48:00Z", + "body": "**Describe the bug**\r\n\r\nCurrently the knowledge system is disabled\r\nhttps://github.com/ai16z/eliza/blob/main/core/src/core/runtime.ts#L320\r\n\r\nThere is a continue. We should remove the continue, test the knowledge system end-to-end with knowledge and verify that it is injected into the context.\r\n\r\n\"Screenshot\r\n" + } + ] + }, + { + "number": 161, + "title": "Fix function calling, repetition and local action calling", + "state": "open", + "created_at": "2024-11-01 23:08:55", + "updated_at": "2024-11-01 23:08:55", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/161", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-01T23:08:55Z", + "body": "The local model could be dramatically better. Currently it struggles with repeating forever, getting action names wrong something and not structuring JSON format properly (although usually its just the repeats that kill it)\r\n\r\nThis could be dramatically improved as an experience through a combination of model and prompt setting optimization, although an advanced researcher may also have a model-side approach.\r\n\r\n\"Screenshot\r\n" + } + ] + }, + { + "number": 158, + "title": "Make image generation very nice and spicy", + "state": "open", + "created_at": "2024-11-01 21:17:26", + "updated_at": "2024-11-04 05:47:49", + "author": "lalalune", + "labels": "enhancement", + "assignees": "o-on-x", + "comments": 3, + "url": "https://github.com/ai16z/eliza/issues/158", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-11-01T21:17:26Z", + "body": "@o-on-x has offered to work on this for their own agent" + }, + { + "number": 1, + "author": "o-on-x", + "created_at": "2024-11-03T02:37:47Z", + "body": "character issues:\r\nneed to have how the image gen is influenced by character file & user requests \r\n\r\nfor now character file already influences if you directly mention about what images it posts & how often in the style part of character file \r\n\r\nupload issues:\r\nthe main issues with functionality are imageGen returns a base64 url string. this needs to be loaded to buffer/file before upload\r\nsecond issue is twitter client needs a upload message addition on the interface which twitter-client-api might support\r\n\r\ncurrent image post working changes I'm using for discord (telegram is similar need to find where i put that code) : \r\nin src/clients/discord/messages.ts:\r\n\r\n`\r\n\r\n const callback: HandlerCallback = async (content: Content, files: any[]) => {\r\n\r\n \r\n // Process any data URL attachments\r\n const processedFiles = [...(files || [])];\r\n \r\n if (content.attachments?.length) {\r\n for (const attachment of content.attachments) {\r\n if (attachment.url?.startsWith('data:')) {\r\n try {\r\n const {buffer, type} = await this.attachmentManager.processDataUrlToBuffer(attachment.url);\r\n const extension = type.split('/')[1] || 'png';\r\n const fileName = `${attachment.id || Date.now()}.${extension}`;\r\n \r\n processedFiles.push({\r\n attachment: buffer,\r\n name: fileName\r\n });\r\n content.text = \"...\"\r\n // Update the attachment URL to reference the filename\r\n attachment.url = `attachment://${fileName}`;\r\n } catch (error) {\r\n console.error('Error processing data URL:', error);\r\n }\r\n }\r\n }\r\n }\r\n \r\n if (message.id && !content.inReplyTo) {\r\n content.inReplyTo = stringToUuid(message.id);\r\n }\r\n \r\n if (message.channel.type === ChannelType.GuildVoice) {\r\n console.log(\"generating voice\");\r\n const audioStream = await SpeechService.generate(\r\n this.runtime,\r\n content.text,\r\n );\r\n await this.voiceManager.playAudioStream(userId, audioStream);\r\n const memory: Memory = {\r\n id: stringToUuid(message.id),\r\n userId: this.runtime.agentId,\r\n content,\r\n roomId,\r\n embedding: embeddingZeroVector,\r\n };\r\n return [memory];\r\n } else {\r\n // For text channels, send the message with the processed files\r\n const messages = await sendMessageInChunks(\r\n message.channel as TextChannel,\r\n content.text,\r\n message.id,\r\n processedFiles,\r\n );\r\n let notFirstMessage = false;\r\n const memories: Memory[] = [];\r\n for (const m of messages) {\r\n let action = content.action;\r\n // If there's only one message or it's the last message, keep the original action\r\n // For multiple messages, set all but the last to 'CONTINUE'\r\n if (messages.length > 1 && m !== messages[messages.length - 1]) {\r\n action = \"CONTINUE\";\r\n }\r\n\r\n notFirstMessage = true;\r\n const memory: Memory = {\r\n id: stringToUuid(m.id),\r\n userId: this.runtime.agentId,\r\n content: {\r\n ...content,\r\n action,\r\n inReplyTo: messageId,\r\n url: m.url,\r\n },\r\n roomId,\r\n embedding: embeddingZeroVector,\r\n createdAt: m.createdTimestamp,\r\n };\r\n memories.push(memory);\r\n }\r\n for (const m of memories) {\r\n await this.runtime.messageManager.createMemory(m);\r\n }\r\n return memories;\r\n }\r\n };\r\n\r\n\r\n`\r\n\r\nin src/clients/discord/attachments.ts/class AttachmentManager:\r\n\r\n`\r\n async processDataUrlToBuffer(dataUrl: string): Promise<{buffer: Buffer, type: string}> {\r\n const matches = dataUrl.match(/^data:([A-Za-z-+\\/]+);base64,(.+)$/);\r\n \r\n if (!matches || matches.length !== 3) {\r\n throw new Error('Invalid data URL');\r\n }\r\n \r\n const type = matches[1];\r\n const base64Data = matches[2];\r\n const buffer = Buffer.from(base64Data, 'base64');\r\n \r\n return {buffer, type};\r\n }\r\n`" + }, + { + "number": 2, + "author": "o-on-x", + "created_at": "2024-11-03T03:48:39Z", + "body": "the line for catching the image is actually\r\n`if (attachment.url?.startsWith('data:image')) {`" + }, + { + "number": 3, + "author": "o-on-x", + "created_at": "2024-11-03T14:18:49Z", + "body": "issues with image gen handling this way effect handling of text attachments. solution will be to have image gen save and return file path not a base64 string. then to have action handled ACTION = \"IMAGE GEN\"" + } + ] + }, + { + "number": 148, + "title": "LLM can't be trusted to parse it's own json", + "state": "open", + "created_at": "2024-10-31 08:00:48", + "updated_at": "2024-11-14 01:50:12", + "author": "St4rgarden", + "labels": "bug", + "assignees": "", + "comments": 8, + "url": "https://github.com/ai16z/eliza/issues/148", + "messages": [ + { + "number": 0, + "author": "St4rgarden", + "created_at": "2024-10-31T08:00:48Z", + "body": "**Describe the bug**\r\n\r\nWe trust the LLM to parse it's own JSON resulting in what a separate issue referred to as an infinite loop (which technically will resolve itself if left alone to smash on the OpenAI endpoint for long enough)\r\n\r\n```\r\n# Instructions: Write the next message for lina. Include an action, if appropriate. Possible response actions: MUTE_ROOM, ASK_CLAUDE, NONE, IGNORE\r\n\r\nResponse format should be formatted in a JSON block like this:\r\njson\r\n{ \"user\": \"lina\", \"text\": string, \"action\": string }\r\n\r\nMessage is json\r\n{ \"user\": \"lina\", \"text\": \"Oh honey~ Working with a pioneer sounds tantalizing... but only if he can keep up with me and my fiery spirit \ud83d\ude09 Now spill the details or I might get bored!\", \"action\": NONE }\r\n\r\nresponse is json\r\n{ \"user\": \"lina\", \"text\": \"Oh honey~ Working with a pioneer sounds tantalizing... but only if he can keep up with me and my fiery spirit \ud83d\ude09 Now spill the details or I might get bored!\", \"action\": NONE }\r\n\r\nparsedContent is null\r\nparsedContent is null, retrying\r\n```\r\n\r\nNotice above that the action: value `NONE` is not a string. Now take a look at the correctly parsed JSON immediately following this:\r\n\r\n```\r\nparsedContent is {\r\n user: 'lina',\r\n text: \"Oh darling st4rgard3n~ I'm always up for a little blockchain banter or maybe some spicy discussions about funding public goods... but don't think I won't call you out if you get all serious on me.
So what's the plan with @mattyryze?\",\r\n action: 'NONE'\r\n}\r\n```\r\n\r\nHere the LLM has correctly formatted `NONE` as `'NONE'` a correct string.\r\n\r\n**To Reproduce**\r\n\r\nJust run eliza with a cheap llm model long enough and you will definitely encounter this one.\r\n\r\n**Expected behavior**\r\n\r\nThe message returned from the LLM should then be formatted into JSON in the program." + }, + { + "number": 1, + "author": "St4rgarden", + "created_at": "2024-10-31T08:01:39Z", + "body": "This issue https://github.com/ai16z/eliza/issues/70 is not accurate but it's a duplicate of this issue now." + }, + { + "number": 2, + "author": "twilwa", + "created_at": "2024-10-31T20:32:50Z", + "body": "several python libs solve/attempt to solve this, in order of my personal opinion of them:\r\n-outlines\r\n-instructor\r\n-lmql\r\n-guidance\r\n\r\nprobably more -- however, not sure if any have a typescript equivalent" + }, + { + "number": 3, + "author": "twilwa", + "created_at": "2024-10-31T20:33:38Z", + "body": "if it's openai, we can use structured output mode: https://platform.openai.com/docs/guides/structured-outputs" + }, + { + "number": 4, + "author": "twilwa", + "created_at": "2024-11-01T04:05:24Z", + "body": "kind of a hacky workaround for non-openai models:\r\nrun the model through a LiteLLM proxy server: https://github.com/BerriAI/litellm\r\n\r\nhttps://docs.litellm.ai/docs/completion/json_mode -- it's called json mode, but i think you can do any kind of structured output. Just replace the OPENAI_API_URL with localhost:4000 and should be compatible" + }, + { + "number": 5, + "author": "alextitonis", + "created_at": "2024-11-01T08:12:49Z", + "body": "This could help with the issue: \r\n```\r\nfunction parseLLMJson(rawResponse: string): T {\r\n // Sanitize JSON while preserving native types\r\n const sanitizedJson = rawResponse.replace(\r\n /(\\w+):\\s*([^,}\\s]+)/g,\r\n (match, key, value) => {\r\n // Don't quote if it's a number\r\n if (/^-?\\d+(\\.\\d+)?$/.test(value)) {\r\n return `\"${key}\": ${value}`;\r\n }\r\n \r\n // Don't quote if it's a boolean\r\n if (value === 'true' || value === 'false') {\r\n return `\"${key}\": ${value}`;\r\n }\r\n \r\n // Don't quote if it's already properly quoted\r\n if (/^[\"'].*[\"']$/.test(value)) {\r\n return `\"${key}\": ${value.replace(/^['\"](.*)['\"]$/, '\"$1\"')}`;\r\n }\r\n \r\n // Quote everything else\r\n return `\"${key}\": \"${value}\"`;\r\n }\r\n );\r\n\r\n try {\r\n return JSON.parse(sanitizedJson) as T;\r\n } catch (error) {\r\n console.error('Failed to parse JSON:', error);\r\n throw new Error('Invalid JSON format');\r\n }\r\n}\r\n```" + }, + { + "number": 6, + "author": "Elyx0", + "created_at": "2024-11-02T15:31:27Z", + "body": "@St4rgarden I wonder if simply explaining it better in instructions would solve it like\r\n\r\n```\r\nPossible response actions: MUTE_ROOM, ASK_CLAUDE, NONE, IGNORE\r\nResponse format should be formatted in a JSON block like this:\r\njson\r\n{ \"user\": \"lina\", \"text\": string, \"action\": string }\r\nexample\r\n{ \"user\": \"lina\", \"text\": \"sometext\", \"action\": \"ASK_CLAUDE\"}\r\n```" + }, + { + "number": 7, + "author": "lalalune", + "created_at": "2024-11-04T08:22:31Z", + "body": "yep. hi @Elyx0 :)" + }, + { + "number": 8, + "author": "monilpat", + "created_at": "2024-11-14T01:43:15Z", + "body": "\r\nYeah I had a similar question about the current approach for `generateObject` in `packages/core/generation.ts`. It looks like we're using a workaround instead of the `{ generateObject }` method from `\"ai\"`, which natively supports Z objects and ensures typing. This could be more reliable than the current method of using `generateText` to generate, parse, and retry until we get the desired output.\r\n\r\nUsing `{ generateObject }` would allow us to eliminate the custom `generateObject` and `generateObjectArray` functions, simplifying the code and leveraging the AI SDK's structured output capabilities. Here\u2019s the code as it stands now:\r\n\r\n```typescript\r\nexport async function generateObject({\r\n runtime,\r\n context,\r\n modelClass,\r\n}: {\r\n runtime: IAgentRuntime;\r\n context: string;\r\n modelClass: string;\r\n}): Promise {\r\n if (!context) {\r\n elizaLogger.error(\"generateObject context is empty\");\r\n return null;\r\n }\r\n let retryDelay = 1000;\r\n\r\n while (true) {\r\n try {\r\n const response = await generateText({\r\n runtime,\r\n context,\r\n modelClass,\r\n });\r\n const parsedResponse = parseJSONObjectFromText(response);\r\n if (parsedResponse) {\r\n return parsedResponse;\r\n }\r\n } catch (error) {\r\n elizaLogger.error(\"Error in generateObject:\", error);\r\n }\r\n\r\n await new Promise((resolve) => setTimeout(resolve, retryDelay));\r\n retryDelay *= 2;\r\n }\r\n}\r\n```\r\n\r\nMy proposal is to replace it with the `generateObject` function provided in the AI SDK, as described below:\r\n\r\n```typescript\r\n/**\r\nGenerate JSON with any schema for a given prompt using a language model.\r\n\r\nThis function does not stream the output. If you want to stream the output, use `streamObject` instead.\r\n\r\n@returns\r\nA result object that contains the generated object, the finish reason, the token usage, and additional information.\r\n*/\r\ndeclare function generateObject(options: Omit & Prompt & {\r\n output: 'no-schema';\r\n model: LanguageModel;\r\n mode?: 'json';\r\n experimental_telemetry?: TelemetrySettings;\r\n experimental_providerMetadata?: ProviderMetadata;\r\n _internal?: {\r\n generateId?: () => string;\r\n currentDate?: () => Date;\r\n };\r\n}): Promise>;\r\n```\r\n\r\nSwitching to this method would improve reliability and reduce custom parsing logic. I'd be interested to hear your thoughts!" + } + ] + }, + { + "number": 81, + "title": "Add AI Code Reviewing application from GitHub Marketplace", + "state": "open", + "created_at": "2024-10-30 01:49:15", + "updated_at": "2024-10-30 02:12:52", + "author": "mrdavidburns", + "labels": "enhancement,automation", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/81", + "messages": [ + { + "number": 0, + "author": "mrdavidburns", + "created_at": "2024-10-30T01:49:15Z", + "body": "**Is your feature request related to a problem? Please describe.**\r\nLooking to increase code quality and efficiency. \r\n\r\n\r\n\r\n**Describe the solution you'd like**\r\n\r\nI would like to propose integrating [CodeFactor](https://github.com/marketplace/codefactor) into this repository to enhance code quality and streamline the review process. CodeFactor offers an automated code review solution that continuously analyzes the repository for potential code issues, ensuring consistent code standards across contributions. Best of all it is FREE for public repositories like this one.\r\n\r\nBenefits of Adding CodeFactor:\r\n\r\n1.\tAutomated Code Review: CodeFactor automatically reviews new pull requests and commits, highlighting potential code smells, bugs, and areas for improvement. This helps maintainers focus on critical code changes rather than manual code quality checks.\r\n2.\tConsistent Code Quality: With its real-time feedback, CodeFactor ensures adherence to coding standards and best practices, fostering a uniform and high-quality codebase across all contributors.\r\n3.\tTime-Saving for Maintainers: The automated reviews free up maintainers\u2019 time, allowing them to focus on feature development and critical code issues while CodeFactor handles routine quality checks.\r\n4.\tAccessible and Easy to Use: CodeFactor seamlessly integrates with GitHub and provides a clear, visual report on code quality metrics, making it easy for contributors to understand and address code issues before merging.\r\n5.\tTransparent Quality Metrics: CodeFactor provides a public code quality score, which encourages contributors to submit higher-quality code and aligns with a collaborative, open-source approach to development.\r\n\r\nIntegrating CodeFactor would streamline the review process, improve the overall codebase quality, and allow for more effective collaboration between maintainers and contributors. I believe this would be a valuable addition to our repository\u2019s workflow.\r\n\r\n\r\n\r\n**Describe alternatives you've considered**\r\n\r\nThere are numerous other alternatives in GitHub MarketPlace. I chose this one because it was top of popularity and would be free.\r\n\r\n\r\n\r\n**Additional context**\r\n\r\n" + }, + { + "number": 1, + "author": "sirkitree", + "created_at": "2024-10-30T02:12:51Z", + "body": "Yes please.\n\nI'm interested to hear if anyone else has exp with ai code reviewers. Pros or cons in your experience?" + } + ] + }, + { + "number": 79, + "title": "Dependency Dashboard", + "state": "open", + "created_at": "2024-10-30 01:34:59", + "updated_at": "2024-11-22 22:33:05", + "author": "renovate[bot]", + "labels": "Priority: High,automation", + "assignees": "sirkitree", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/79", + "messages": [ + { + "number": 0, + "author": "renovate[bot]", + "created_at": "2024-10-30T01:34:59Z", + "body": "This issue lists Renovate updates and detected dependencies. Read the [Dependency Dashboard](https://docs.renovatebot.com/key-concepts/dashboard/) docs to learn more.\n\n## Config Migration Needed\n\n - [ ] Select this checkbox to let Renovate create an automated Config Migration PR.\n\n> [!WARNING]\nThese dependencies are deprecated:\n\n| Datasource | Name | Replacement PR? |\n|------------|------|--------------|\n| npm | `@cliqz/adblocker-playwright` | ![Unavailable](https://img.shields.io/badge/unavailable-orange?style=flat-square) |\n| npm | `@types/dompurify` | ![Unavailable](https://img.shields.io/badge/unavailable-orange?style=flat-square) |\n| npm | `@types/pdfjs-dist` | ![Unavailable](https://img.shields.io/badge/unavailable-orange?style=flat-square) |\n| npm | `eslint-plugin-vitest` | ![Available](https://img.shields.io/badge/available-green?style=flat-square) |\n\n## Awaiting Schedule\n\nThese updates are awaiting their schedule. Click on a checkbox to get an update now.\n\n - [ ] chore(deps): pin dependencies (`@types/react`, `@types/react-dom`, `react`, `react-dom`)\n - [ ] fix(deps): pin dependencies (`@ai-sdk/anthropic`, `@ai-sdk/google`, `@ai-sdk/google-vertex`, `@ai-sdk/groq`, `@ai16z/eliza`, `@avnu/avnu-sdk`, `@commitlint/cli`, `@commitlint/config-conventional`, `@coral-xyz/anchor`, `@docusaurus/core`, `@docusaurus/plugin-content-blog`, `@docusaurus/plugin-content-docs`, `@docusaurus/plugin-ideal-image`, `@docusaurus/preset-classic`, `@docusaurus/theme-mermaid`, `@eslint/js`, `@octokit/rest`, `@octokit/types`, `@radix-ui/react-slot`, `@types/dompurify`, `@types/glob`, `@types/mocha`, `@types/pdfjs-dist`, `@types/pg`, `@types/uuid`, `@uniswap/sdk-core`, `@unruggable_starknet/core`, `@vitejs/plugin-react`, `ai`, `anthropic-vertex-ai`, `automd`, `autoprefixer`, `bs58`, `citty`, `class-variance-authority`, `coinbase-api`, `concurrently`, `docusaurus-lunr-search`, `docusaurus-plugin-typedoc`, `echogarden`, `express`, `fastembed`, `fastestsmallesttextencoderdecoder`, `gif-frames`, `giget`, `glob`, `globals`, `husky`, `jiti`, `langchain`, `lerna`, `lucide-react`, `node`, `ollama-ai-provider`, `only-allow`, `onnxruntime-node`, `optional`, `pg`, `postcss`, `react`, `react-dom`, `readline`, `sharp`, `simple-git`, `starknet`, `tailwind-merge`, `tailwindcss`, `tailwindcss-animate`, `together-ai`, `tsup`, `typedoc`, `typedoc-plugin-markdown`, `typescript-eslint`, `unbuild`, `vite`, `vite-plugin-top-level-await`, `vite-plugin-wasm`, `vitest`, `ws`, `zod`)\n - [ ] [fix(deps): pin dependencies](../pull/113) (`@anthropic-ai/sdk`, `@huggingface/transformers`, `openai`)\n - [ ] fix(deps): update dependency @ai-sdk/anthropic to ^0.0.56\n - [ ] fix(deps): update dependency @ai-sdk/google-vertex to ^0.0.43\n - [ ] fix(deps): update dependency @ai-sdk/openai to v1.0.4\n - [ ] fix(deps): update dependency @echogarden/espeak-ng-emscripten to v0.3.3\n - [ ] fix(deps): update dependency @opendocsg/pdf2md to v0.1.32\n - [ ] fix(deps): update dependency @solana/web3.js to v1.95.5\n - [ ] fix(deps): update dependency agent-twitter-client to v0.0.14\n - [ ] [fix(deps): update dependency clsx to v2.1.1](../pull/106)\n - [ ] fix(deps): update dependency dompurify to v3.2.1\n - [ ] fix(deps): update dependency pm2 to v5.4.3\n - [ ] fix(deps): update dependency uuid to v11.0.3\n - [ ] fix(deps): update docusaurus monorepo to ^3.6.2 (`@docusaurus/core`, `@docusaurus/module-type-aliases`, `@docusaurus/plugin-content-blog`, `@docusaurus/plugin-content-docs`, `@docusaurus/plugin-ideal-image`, `@docusaurus/preset-classic`, `@docusaurus/theme-mermaid`, `@docusaurus/types`)\n - [ ] fix(deps): update sqlite related (`@types/better-sqlite3`, `sqlite-vec`)\n - [ ] [chore(deps): update dependency @rollup/plugin-terser to v0.4.4](../pull/112)\n - [ ] chore(deps): update dependency @types/node to v22.9.1\n - [ ] chore(deps): update eslint and formatting (`eslint`, `eslint-plugin-react-hooks`, `eslint-plugin-react-refresh`, `eslint-plugin-vitest`, `prettier`)\n - [ ] chore(deps): update node.js to v23.3.0\n - [ ] chore(deps): update pnpm to v9.14.2\n - [ ] chore(deps): update typescript and related (`@typescript-eslint/eslint-plugin`, `@typescript-eslint/parser`, `tslib`, `typescript`)\n - [ ] fix(deps): update dependency @discordjs/voice to v0.18.0\n - [ ] fix(deps): update dependency @echogarden/speex-resampler-wasm to v0.3.0\n - [ ] fix(deps): update dependency @mdx-js/react to v3.1.0\n - [ ] fix(deps): update dependency node-llama-cpp to v3.2.0\n - [ ] fix(deps): update dependency pdfjs-dist to v4.8.69\n - [ ] fix(deps): update dependency playwright to v1.49.0\n - [ ] fix(deps): update dependency prism-react-renderer to v2.4.0\n - [ ] fix(deps): update dependency react-router-dom to v6.28.0\n - [ ] fix(deps): update dependency together-ai to ^0.9.0\n - [ ] chore(deps): update actions/checkout action to v4\n - [ ] chore(deps): update commitlint monorepo to v19 (major) (`@commitlint/cli`, `@commitlint/config-conventional`)\n - [ ] chore(deps): update dependency whatwg-url to v14\n - [ ] chore(deps): update rollup and plugins (major) (`@rollup/plugin-commonjs`, `@rollup/plugin-replace`, `@rollup/plugin-typescript`, `rollup`)\n - [ ] fix(deps): update dependency @ai-sdk/anthropic to v1\n - [ ] fix(deps): update dependency @ai-sdk/google to v1\n - [ ] fix(deps): update dependency @ai-sdk/google-vertex to v1\n - [ ] fix(deps): update dependency @ai-sdk/groq to v1\n - [ ] fix(deps): update dependency ai to v4\n - [ ] fix(deps): update dependency gif-frames to v1\n - [ ] fix(deps): update dependency glob to v11\n - [ ] fix(deps): update dependency ollama-ai-provider to v1\n - [ ] fix(deps): update dependency react-router-dom to v7\n - [ ] fix(deps): update dependency starknet to v7\n - [ ] fix(deps): update octokit monorepo (major) (`@octokit/rest`, `@octokit/types`)\n\n## Detected dependencies\n\n
dockerfile\n
\n\n
Dockerfile\n\n - `node 23.1.0`\n\n
\n\n
\n
\n\n
github-actions\n
\n\n
.github/workflows/ci.yaml\n\n - `actions/checkout v4`\n - `actions/setup-node v4`\n\n
\n\n
.github/workflows/generate-changelog.yml\n\n - `actions/checkout v4`\n - `stefanzweifel/git-auto-commit-action v5`\n\n
\n\n
.github/workflows/pr.yaml\n\n - `actions/checkout v2`\n\n
\n\n
.github/workflows/pre-release.yml\n\n - `actions/checkout v4`\n - `actions/setup-node v4`\n - `actions/create-release v1`\n\n
\n\n
.github/workflows/release.yaml\n\n - `actions/checkout v4`\n - `actions/setup-node v4`\n - `actions/create-release v1`\n\n
\n\n
\n
\n\n
npm\n
\n\n
agent/package.json\n\n - `readline ^1.3.0`\n - `ws ^8.18.0`\n - `yargs 17.7.2`\n - `ts-node 10.9.2`\n - `tsup ^8.3.5`\n\n
\n\n
client/package.json\n\n - `@radix-ui/react-slot ^1.1.0`\n - `class-variance-authority ^0.7.0`\n - `clsx 2.1.0`\n - `lucide-react ^0.460.0`\n - `react ^18.3.1`\n - `react-dom ^18.3.1`\n - `tailwind-merge ^2.5.4`\n - `tailwindcss-animate ^1.0.7`\n - `vite-plugin-top-level-await ^1.4.4`\n - `vite-plugin-wasm ^3.3.0`\n - `@eslint/js ^9.13.0`\n - `@types/node 22.8.4`\n - `@types/react ^18.3.12`\n - `@types/react-dom ^18.3.1`\n - `@vitejs/plugin-react ^4.3.3`\n - `autoprefixer ^10.4.20`\n - `eslint ^9.13.0`\n - `eslint-plugin-react-hooks ^5.0.0`\n - `eslint-plugin-react-refresh ^0.4.14`\n - `globals ^15.11.0`\n - `postcss ^8.4.49`\n - `tailwindcss ^3.4.15`\n - `typescript ~5.6.2`\n - `typescript-eslint ^8.11.0`\n - `vite ^5.4.10`\n\n
\n\n
docs/package.json\n\n - `@docusaurus/core ^3.6.0`\n - `@docusaurus/plugin-content-blog ^3.6.0`\n - `@docusaurus/plugin-content-docs ^3.6.0`\n - `@docusaurus/plugin-ideal-image ^3.6.0`\n - `@docusaurus/preset-classic ^3.6.0`\n - `@docusaurus/theme-mermaid ^3.6.0`\n - `@mdx-js/react 3.0.1`\n - `clsx 2.1.0`\n - `docusaurus-lunr-search ^3.5.0`\n - `prism-react-renderer 2.3.1`\n - `react 18.2.0`\n - `react-dom 18.2.0`\n - `react-router-dom 6.22.1`\n - `@docusaurus/module-type-aliases 3.6.0`\n - `@docusaurus/types 3.6.0`\n - `docusaurus-plugin-typedoc ^1.0.5`\n - `typedoc ^0.26.11`\n - `typedoc-plugin-markdown ^4.2.9`\n - `node 23.1.0`\n\n
\n\n
package.json\n\n - `ollama-ai-provider ^0.16.1`\n - `optional ^0.1.4`\n - `sharp ^0.33.5`\n - `concurrently ^9.1.0`\n - `husky ^9.1.6`\n - `lerna ^8.1.5`\n - `only-allow ^1.2.1`\n - `prettier ^3.3.3`\n - `typedoc ^0.26.11`\n - `typescript 5.6.3`\n - `vite ^5.4.11`\n - `vitest ^2.1.5`\n - `@commitlint/cli ^18.4.4`\n - `@commitlint/config-conventional ^18.4.4`\n - `node >=22`\n - `pnpm 9.12.3+sha512.cce0f9de9c5a7c95bef944169cc5dfe8741abfb145078c0d508b868056848a87c81e626246cb60967cbd7fd29a6c062ef73ff840d96b3c86c40ac92cf4a813ee`\n - `onnxruntime-node ^1.20.0`\n\n
\n\n
packages/adapter-postgres/package.json\n\n - `@types/pg ^8.11.10`\n - `pg ^8.13.1`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n\n
\n\n
packages/adapter-sqlite/package.json\n\n - `@types/better-sqlite3 7.6.11`\n - `better-sqlite3 11.5.0`\n - `sqlite-vec 0.1.4-alpha.2`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/adapter-sqljs/package.json\n\n - `@types/sql.js 1.4.9`\n - `sql.js 1.12.0`\n - `uuid 11.0.2`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/adapter-supabase/package.json\n\n - `@supabase/supabase-js 2.46.1`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/client-auto/package.json\n\n - `@types/body-parser 1.19.5`\n - `@types/cors 2.8.17`\n - `@types/express 5.0.0`\n - `body-parser 1.20.3`\n - `cors 2.8.5`\n - `multer 1.4.5-lts.1`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/client-direct/package.json\n\n - `@types/body-parser 1.19.5`\n - `@types/cors 2.8.17`\n - `@types/express 5.0.0`\n - `body-parser 1.20.3`\n - `cors 2.8.5`\n - `express ^4.21.1`\n - `multer 1.4.5-lts.1`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/client-discord/package.json\n\n - `@discordjs/rest 2.4.0`\n - `@discordjs/voice 0.17.0`\n - `discord.js 14.16.3`\n - `libsodium-wrappers 0.7.15`\n - `prism-media 1.3.5`\n - `zod 3.23.8`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/client-github/package.json\n\n - `@octokit/rest ^20.0.2`\n - `@octokit/types ^12.6.0`\n - `glob ^10.3.10`\n - `simple-git ^3.22.0`\n - `@types/glob ^8.1.0`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n\n
\n\n
packages/client-telegram/package.json\n\n - `@telegraf/types 7.1.0`\n - `telegraf 4.16.3`\n - `zod 3.23.8`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n\n
\n\n
packages/client-twitter/package.json\n\n - `agent-twitter-client 0.0.13`\n - `glob 11.0.0`\n - `zod 3.23.8`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/core/package.json\n\n - `@ai-sdk/anthropic ^0.0.53`\n - `@ai-sdk/google ^0.0.55`\n - `@ai-sdk/google-vertex ^0.0.42`\n - `@ai-sdk/groq ^0.0.3`\n - `@ai-sdk/openai 1.0.0-canary.3`\n - `@anthropic-ai/sdk ^0.30.1`\n - `@types/uuid ^10.0.0`\n - `ai ^3.4.23`\n - `anthropic-vertex-ai ^1.0.0`\n - `fastembed ^1.14.1`\n - `fastestsmallesttextencoderdecoder ^1.0.22`\n - `gaxios 6.7.1`\n - `glob 11.0.0`\n - `js-sha1 0.7.0`\n - `langchain ^0.3.6`\n - `ollama-ai-provider ^0.16.1`\n - `openai 4.69.0`\n - `tiktoken 1.0.17`\n - `tinyld 1.3.4`\n - `together-ai ^0.7.0`\n - `unique-names-generator 4.7.1`\n - `uuid 11.0.2`\n - `zod ^3.23.8`\n - `@eslint/js ^9.13.0`\n - `@rollup/plugin-commonjs 25.0.8`\n - `@rollup/plugin-json 6.1.0`\n - `@rollup/plugin-node-resolve 15.3.0`\n - `@rollup/plugin-replace 5.0.7`\n - `@rollup/plugin-terser 0.1.0`\n - `@rollup/plugin-typescript 11.1.6`\n - `@solana/web3.js 1.95.4`\n - `@types/fluent-ffmpeg 2.1.27`\n - `@types/jest 29.5.14`\n - `@types/mocha ^10.0.9`\n - `@types/node 22.8.4`\n - `@types/pdfjs-dist ^2.10.378`\n - `@types/tar 6.1.13`\n - `@types/wav-encoder 1.3.3`\n - `@typescript-eslint/eslint-plugin 8.12.2`\n - `@typescript-eslint/parser 8.12.2`\n - `dotenv 16.4.5`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `jest 29.7.0`\n - `lint-staged 15.2.10`\n - `nodemon 3.1.7`\n - `pm2 5.4.2`\n - `prettier 3.3.3`\n - `rimraf 6.0.1`\n - `rollup 2.79.2`\n - `ts-jest 29.2.5`\n - `ts-node 10.9.2`\n - `tslib 2.8.0`\n - `tsup ^8.3.5`\n - `typescript 5.6.3`\n\n
\n\n
packages/create-eliza-app/package.json\n\n - `citty ^0.1.6`\n - `giget ^1.2.3`\n - `automd ^0.3.12`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `jiti ^2.4.0`\n - `unbuild ^2.0.0`\n\n
\n\n
packages/plugin-bootstrap/package.json\n\n - `tsup ^8.3.5`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/plugin-coinbase/package.json\n\n - `@ai16z/eliza ^0.1.3`\n - `coinbase-api ^1.0.5`\n - `tsup ^8.3.5`\n - `onnxruntime-node ^1.20.0`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/plugin-image-generation/package.json\n\n - `tsup ^8.3.5`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/plugin-node/package.json\n\n - `@cliqz/adblocker-playwright 1.34.0`\n - `@echogarden/espeak-ng-emscripten 0.3.0`\n - `@echogarden/kissfft-wasm 0.2.0`\n - `@echogarden/speex-resampler-wasm 0.2.1`\n - `@huggingface/transformers 3.0.1`\n - `@opendocsg/pdf2md 0.1.31`\n - `@types/uuid ^10.0.0`\n - `alawmulaw 6.0.0`\n - `bignumber 1.1.0`\n - `bignumber.js 9.1.2`\n - `capsolver-npm 2.0.2`\n - `cldr-segmentation 2.2.1`\n - `command-exists 1.2.9`\n - `csv-writer 1.6.0`\n - `echogarden ^2.0.5`\n - `espeak-ng 1.0.2`\n - `ffmpeg-static 5.2.0`\n - `fluent-ffmpeg 2.1.3`\n - `formdata-node 6.0.3`\n - `fs-extra 11.2.0`\n - `gaxios 6.7.1`\n - `gif-frames ^0.4.1`\n - `glob 11.0.0`\n - `graceful-fs 4.2.11`\n - `html-escaper 3.0.3`\n - `html-to-text 9.0.5`\n - `import-meta-resolve 4.1.0`\n - `jieba-wasm 2.2.0`\n - `json5 2.2.3`\n - `kuromoji 0.1.2`\n - `libsodium-wrappers 0.7.15`\n - `multer 1.4.5-lts.1`\n - `node-cache 5.1.2`\n - `node-llama-cpp 3.1.1`\n - `nodejs-whisper 0.1.18`\n - `onnxruntime-node ^1.20.0`\n - `pdfjs-dist 4.7.76`\n - `playwright 1.48.2`\n - `pm2 5.4.2`\n - `puppeteer-extra 3.3.6`\n - `puppeteer-extra-plugin-capsolver 2.0.1`\n - `sharp ^0.33.5`\n - `srt 0.0.3`\n - `systeminformation 5.23.5`\n - `tar 7.4.3`\n - `tinyld 1.3.4`\n - `uuid 11.0.2`\n - `wav 1.0.2`\n - `wav-encoder 1.3.0`\n - `wavefile 11.0.0`\n - `yargs 17.7.2`\n - `youtube-dl-exec 3.0.10`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `tsup ^8.3.5`\n - `onnxruntime-node ^1.20.0`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/plugin-solana/package.json\n\n - `@coral-xyz/anchor ^0.30.1`\n - `@solana/spl-token 0.4.9`\n - `@solana/web3.js 1.95.4`\n - `bignumber 1.1.0`\n - `bignumber.js 9.1.2`\n - `bs58 ^6.0.0`\n - `node-cache 5.1.2`\n - `pumpdotfun-sdk 1.3.2`\n - `tsup ^8.3.5`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/plugin-starknet/package.json\n\n - `@avnu/avnu-sdk ^2.1.1`\n - `@uniswap/sdk-core ^6.0.0`\n - `@unruggable_starknet/core ^0.1.0`\n - `starknet ^6.17.0`\n - `tsup ^8.3.5`\n - `vitest ^2.1.4`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/plugin-trustdb/package.json\n\n - `dompurify 3.2.0`\n - `tsup ^8.3.5`\n - `uuid 11.0.2`\n - `vitest ^2.1.4`\n - `@types/dompurify ^3.2.0`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `whatwg-url 7.1.0`\n\n
\n\n
packages/plugin-video-generation/package.json\n\n - `tsup ^8.3.5`\n - `eslint 9.13.0`\n - `eslint-config-prettier 9.1.0`\n - `eslint-plugin-prettier 5.2.1`\n - `eslint-plugin-vitest 0.5.4`\n - `whatwg-url 7.1.0`\n\n
\n\n
\n
\n\n
nvm\n
\n\n
.nvmrc\n\n - `node v23.1.0`\n\n
\n\n
\n
\n\n---\n\n- [ ] Check this box to trigger a request for Renovate to run again on this repository\n\n" + } + ] + }, + { + "number": 75, + "title": "Add npx action", + "state": "open", + "created_at": "2024-10-29 09:00:51", + "updated_at": "2024-10-30 02:36:07", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/75", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-10-29T09:00:51Z", + "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nThe goal of this is to enable someone to run 'npx eliza' and it would start a default local running instance of eliza that they could interact with through chat, using all local models.\r\n\r\nIf the user passes a character file, like --characters=, then it will load that character file. If the characterfile has secrets in it, it will load all of the secrets, as though they had cloned the repo and run npm run dev -- --characters=.json\r\n\r\nThe result of this feature is that I can run `npx @ai16z/eliza` and trial locally, or point to a character file and run an agent (or agents)" + }, + { + "number": 1, + "author": "roninjin10", + "created_at": "2024-10-30T02:36:06Z", + "body": "Happy to add this. What is the entrypoint file that needs to run for this?" + } + ] + }, + { + "number": 72, + "title": "Abstract image descriptions / recognition to use any model provider", + "state": "open", + "created_at": "2024-10-29 04:10:43", + "updated_at": "2024-11-18 10:56:43", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/72", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-10-29T04:10:43Z", + "body": "**Is your feature request related to a problem? Please describe.**\r\n\r\nRight now the options for model provider for image recognition are openai and local with florence 2. The OpenAI is hardcoded to check for the existence of the API key, and then load that, otherwise use the local.\r\n\r\n**Describe the solution you'd like**\r\n\r\nSimilar to the new model provider abstraction, we want to offer image recognition for whatever the current model is. For example, Claude, Vertex Claude, Cloud Llama, etc." + } + ] + }, + { + "number": 64, + "title": "Tests are failing", + "state": "open", + "created_at": "2024-10-28 14:27:41", + "updated_at": "2024-11-03 23:18:38", + "author": "sirkitree", + "labels": "automation,testing", + "assignees": "sirkitree", + "comments": 3, + "url": "https://github.com/ai16z/eliza/issues/64", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-10-28T14:27:41Z", + "body": "Tried out running the tests today (`npm run test`) and there are a lot of failures currently.\r\n\r\nWe should\r\n1. evaluate which tests are still necessary\r\n2. get them working\r\n3. setup automated github action to run test for every pull request\r\n\r\n```\r\nNode.js v22.8.0\r\nTest Suites: 16 failed, 16 total\r\nTests: 58 failed, 58 total\r\nSnapshots: 0 total\r\nTime: 14.737 s\r\nRan all test suites.\r\n```\r\n\r\n@lalalune could you advise here? \r\nI think we can work on the tests but the first task of identifying which ones are necessary would be great if you could help with that more quickly than others." + }, + { + "number": 1, + "author": "sirkitree", + "created_at": "2024-11-02T13:48:53Z", + "body": "from @lalalune \n> there are some, but they need a little love\ni had tests in the old version: https://github.com/jointhealliance/bgent\nhttps://github.com/JoinTheAlliance/bgent/tree/main/src/lib/__tests__\nhttps://github.com/JoinTheAlliance/bgent/tree/main/src/lib/actions/__tests__\nhttps://github.com/JoinTheAlliance/bgent/tree/main/src/lib/evaluators/__tests__\nhttps://github.com/JoinTheAlliance/bgent/tree/main/src/lib/providers/__tests__\nthese are the old ones\ni moved most of these into tests\nif you wanna get the test automation working with a hello world test, i can assign this" + }, + { + "number": 2, + "author": "sirkitree", + "created_at": "2024-11-02T19:29:02Z", + "body": "After the above is merged, we need to quantify existing tests and decide which are still relevant.\n\nI'll analyze the test files and describe their purposes. \nHere are the test suites found in the codebase:\n\n### Basic Tests\n- Located in `core/src/test_resources/basic.test.ts`\n- Simple test suite that verifies:\n - Basic test functionality works\n - Environment variables are accessible\n - Confirms NODE_ENV is 'test' and TEST_DATABASE_CLIENT is 'sqlite'\n\n### Memory Tests\n- Located in `core/tests/memory.test.ts`\n- Tests memory-related functionality:\n - Search memories by embedding similarity\n - Validates memory ranking based on similarity scores\n - Tests memory lifecycle (creation, search, and removal)\n - Verifies embedding caching functionality\n\n### Browser Service Tests\n- Located in `core/tests/browser.test.ts`\n- Tests browser automation functionality:\n - Browser initialization and cleanup\n - Content fetching from various websites\n - CAPTCHA handling\n - Error handling for 404s and network issues\n - Ad blocking functionality\n - Handling of reCAPTCHA and hCAPTCHA\n\n### Token Provider Tests\n- Located in `core/tests/token.test.ts`\n- Tests token-related functionality:\n - Fetching token security data\n - Token holder information\n - Trade data retrieval\n - Formatted token reports\n\n### Goals Tests\n- Located in `core/tests/goals.test.ts` and `core/tests/goal.test.ts`\n- Tests goal management functionality:\n - Goal creation\n - Goal updates\n - Status management\n - Objective tracking\n\n### Runtime Tests\n- Located in `core/tests/runtime.test.ts`\n- Tests core runtime functionality:\n - Runtime instance creation\n - Memory lifecycle within runtime\n - State management\n - Basic runtime operations\n\n### Actions Tests\n- Located in `core/tests/actions.test.ts`\n- Tests action system:\n - Action loading\n - Action validation\n - Action handler execution\n - Test actions (TEST_ACTION and TEST_ACTION_FAIL)\n\n### Continue Action Tests\n- Located in `core/tests/continue.test.ts`\n- Tests the continue action:\n - Validation function responses\n - Message handling\n - Database interactions\n - Action state management\n\n### Messages Tests\n- Located in `core/tests/messages.test.ts`\n- Tests message formatting and handling:\n - Actor formatting\n - Message formatting\n - Facts formatting\n - Message content validation\n\n### Evaluation Tests\n- Located in `core/tests/evaluation.test.ts`\n- Tests evaluation system:\n - Evaluator format validation\n - Context loading\n - Example validation\n - Evaluation process execution\n\nThe test suite uses Jest as the testing framework and includes features like:\n- Before/After hooks for setup and cleanup\n- Environment variable loading from .dev.vars\n- Timeout configurations for long-running tests\n- Database-specific test configurations\n- Test reporting functionality\n\nMost tests include proper setup and teardown procedures to ensure a clean testing environment for each test case." + }, + { + "number": 3, + "author": "sirkitree", + "created_at": "2024-11-03T23:01:48Z", + "body": "Now that we have a basic hello world type of test in place and running on ci, we can start lookin at these other ones. I'm going to split each one of these into their own task so they can be handled piece by piece and possibly spread out amongst the collective.\r\n\r\n- [ ] #183 \r\n- [ ] #184\r\n- [ ] #185\r\n- [ ] #186\r\n- [ ] #187\r\n- [ ] #188\r\n- [ ] #189\r\n- [ ] #190\r\n- [ ] #191" + } + ] + }, + { + "number": 56, + "title": "\ud83d\udca1 Have silly tavern compatibility", + "state": "open", + "created_at": "2024-10-27 20:50:48", + "updated_at": "2024-10-27 20:51:18", + "author": "sirkitree", + "labels": "enhancement,characters", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/56", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-10-27T20:50:48Z", + "body": "Was suggested on voice chat that we make character files compatible with silly tavern character files." + } + ] + }, + { + "number": 54, + "title": "User Interface Enhancement", + "state": "open", + "created_at": "2024-10-27 18:11:59", + "updated_at": "2024-10-30 01:20:50", + "author": "sirkitree", + "labels": "enhancement,ui,ux,Design", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/54", + "messages": [ + { + "number": 0, + "author": "sirkitree", + "created_at": "2024-10-27T18:11:59Z", + "body": "Create an intuitive and user-friendly interface for the trust marketplace.\r\n\r\n#### Requirements\r\n- Design order submission forms\r\n- Create trade history views\r\n- Implement trust score displays\r\n- Add confidence level indicators\r\n\r\n#### Acceptance Criteria\r\n- [ ] UI is responsive and intuitive\r\n- [ ] All core features are accessible\r\n- [ ] Trade history is easily viewable\r\n- [ ] Trust scores and confidence levels are clearly displayed" + }, + { + "number": 1, + "author": "sirkitree", + "created_at": "2024-10-27T18:20:28Z", + "body": "We might want this on the [@ai16z/website](https://github.com/ai16z/website)" + } + ] + }, + { + "number": 29, + "title": "\"Private\" Actions", + "state": "open", + "created_at": "2024-10-26 04:58:58", + "updated_at": "2024-11-01 21:59:05", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/29", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-10-26T04:58:58Z", + "body": "Actions should have a \"private\" mode, where they can only be run by the agent in it's own loop, as opposed to in response to a user's request. So it can decide to make a coin, for example." + } + ] + }, + { + "number": 28, + "title": "Pump Fun Token Creation", + "state": "open", + "created_at": "2024-10-26 04:34:43", + "updated_at": "2024-10-27 14:49:30", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 1, + "url": "https://github.com/ai16z/eliza/issues/28", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-10-26T04:34:43Z", + "body": "We should add this action\r\n\r\nhttps://github.com/cicere/pumpfun-bundler" + }, + { + "number": 1, + "author": "sirkitree", + "created_at": "2024-10-27T13:47:55Z", + "body": "related: https://github.com/ai16z/eliza/commit/72a88ea250a19906bd2c634dedbc5f73cd42a441" + } + ] + }, + { + "number": 27, + "title": "Awareness of Twitter bio and username", + "state": "open", + "created_at": "2024-10-26 01:12:13", + "updated_at": "2024-11-01 21:48:58", + "author": "lalalune", + "labels": "enhancement,Client: Twitter", + "assignees": "alextitonis", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/27", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-10-26T01:12:13Z", + "body": "Right now the agent is not aware of his twitter bio and only vaguely of username. We want him to know username, screen name, bio, and nay nicknames too." + } + ] + }, + { + "number": 26, + "title": "News feed", + "state": "open", + "created_at": "2024-10-25 12:27:15", + "updated_at": "2024-11-07 22:52:41", + "author": "lalalune", + "labels": "enhancement", + "assignees": "", + "comments": 2, + "url": "https://github.com/ai16z/eliza/issues/26", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-10-25T12:27:15Z", + "body": "We should pull news and current events based on the agent character's topics list.\r\n\r\n- This should be cached every hour or so that we don't bang the APIs too hard.\r\n- We should summarize news stories (see the summarization service) and load them into the agent's context for tweeting and Discord chat generation" + }, + { + "number": 1, + "author": "sirkitree", + "created_at": "2024-10-27T14:46:33Z", + "body": "Good possible sources?" + }, + { + "number": 2, + "author": "MeDott29", + "created_at": "2024-11-07T22:52:41Z", + "body": "bump https://discord.com/channels/1253563208833433701/1300025221834739744/1304215833702043729" + } + ] + }, + { + "number": 25, + "title": "Likes, retweets and quote tweets", + "state": "open", + "created_at": "2024-10-25 10:10:27", + "updated_at": "2024-10-27 14:55:27", + "author": "lalalune", + "labels": "enhancement,Client: Twitter", + "assignees": "", + "comments": 0, + "url": "https://github.com/ai16z/eliza/issues/25", + "messages": [ + { + "number": 0, + "author": "lalalune", + "created_at": "2024-10-25T10:10:27Z", + "body": "Add Like, RT and QT for search and response handling on Twitter Search client." + } + ] + } +] diff --git a/issues_prs/pullrequests.json b/issues_prs/pullrequests.json new file mode 100644 index 0000000..e69de29