diff --git a/packages/subquery/__init__.py b/packages/subquery/__init__.py new file mode 100644 index 00000000..ba697745 --- /dev/null +++ b/packages/subquery/__init__.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# ------------------------------------------------------------------------------ +# +# Copyright 2024 Valory AG +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------------ diff --git a/packages/subquery/customs/__init__.py b/packages/subquery/customs/__init__.py new file mode 100644 index 00000000..ba697745 --- /dev/null +++ b/packages/subquery/customs/__init__.py @@ -0,0 +1,19 @@ +#!/usr/bin/env python3 +# -*- coding: utf-8 -*- +# ------------------------------------------------------------------------------ +# +# Copyright 2024 Valory AG +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------------ diff --git a/packages/subquery/customs/graphql_response_analyser/__init__.py b/packages/subquery/customs/graphql_response_analyser/__init__.py new file mode 100644 index 00000000..4888dbc7 --- /dev/null +++ b/packages/subquery/customs/graphql_response_analyser/__init__.py @@ -0,0 +1,20 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------------------------ +# +# Copyright 2023-2024 Valory AG +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------------ + +"""This module explains the output of the GraphQL server within a given context, including a description of the indexed data being served and the query itself.""" diff --git a/packages/subquery/customs/graphql_response_analyser/component.yaml b/packages/subquery/customs/graphql_response_analyser/component.yaml new file mode 100644 index 00000000..fcd53c8b --- /dev/null +++ b/packages/subquery/customs/graphql_response_analyser/component.yaml @@ -0,0 +1,19 @@ +name: graphql_response_analyser +author: subquery +version: 0.1.0 +type: custom +description: This module explains the output of the GraphQL chain data indexer within a given context, including a description of the indexed data being served and the query itself. +license: Apache-2.0 +aea_version: '>=1.0.0, <2.0.0' +entry_point: graphql_response_analyser.py +fingerprint: + __init__.py: + graphql_response_analyser.py: +fingerprint_ignore_patterns: [] +callable: run +dependencies: + openai: + version: ==1.11.0 + tiktoken: + version: ==0.5.1 + requests: {} diff --git a/packages/subquery/customs/graphql_response_analyser/graphql_response_analyser.py b/packages/subquery/customs/graphql_response_analyser/graphql_response_analyser.py new file mode 100644 index 00000000..ac61215f --- /dev/null +++ b/packages/subquery/customs/graphql_response_analyser/graphql_response_analyser.py @@ -0,0 +1,186 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------------------------ +# +# Copyright 2023-2024 Valory AG +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------------ + +"""Contains the job definitions""" + +from typing import Any, Dict, Optional, Tuple +import os +from openai import OpenAI +import json +import requests + +client: Optional[OpenAI] = None + + +def generate_graphql_query(user_request, schema, description, examples): + return ( + f""" + You are a GraphQL query generator. Based on the following GraphQL schema and the user's natural language request, generate a valid GraphQL query. + + GraphQL Project Description: "{description}" + + User Request: "{user_request}" + + GraphQL Schema: {json.dumps(schema)} + + Example Queries: + + """ + + examples + + """ + + GraphQL Query: + +""" + ) + + +# Analyze data and generate response using OpenAI +def analyze_data_and_generate_response(data): + return f""" + + Once the query you have given was executed, the following data was fetched: + + JSON Data: {json.dumps(data)} + + Based on the provided context, please generate a bullet-pointed summary in a machine-readable JSON format. The JSON structure should have an array object named 'analysis_result,' with each analytical conclusion represented as a separate string element within the array. + """ + + +class OpenAIClientManager: + """Client context manager for OpenAI.""" + + def __init__(self, api_key: str): + self.api_key = api_key + + def __enter__(self) -> OpenAI: + global client + if client is None: + client = OpenAI(api_key=self.api_key) + return client + + def __exit__(self, exc_type, exc_value, traceback) -> None: + global client + if client is not None: + client.close() + client = None + + +DEFAULT_OPENAI_SETTINGS = { + "max_tokens": 500, + "temperature": 0.7, +} +PREFIX = "openai-" +ENGINES = { + "chat": ["gpt-3.5-turbo", "gpt-4"], + "completion": ["gpt-3.5-turbo-instruct"], +} +ALLOWED_TOOLS = [PREFIX + value for values in ENGINES.values() for value in values] + + +# Fetch the GraphQL schema using introspection query +def fetch_graphql_schema(endpoint): + introspection_query = """ + { + __schema { + types { + name + fields { + name + type { + kind + name + } + } + } + } + } + """ + response = requests.post(endpoint, json={"query": introspection_query}) + if response.status_code == 200: + return response.json() + else: + raise Exception( + f"Failed to fetch schema: {response.status_code}, {response.text}" + ) + + +def fetch_data_from_indexer(endpoint, query): + response = requests.post(endpoint, json={"query": query}) + if response.status_code == 200: + return response.json() + else: + raise Exception( + f"Failed to fetch schema: {response.status_code}, {response.text}" + ) + + +def run(**kwargs) -> Tuple[Optional[str], Optional[Dict[str, Any]], Any, Any]: + """Run the task""" + with OpenAIClientManager(kwargs["api_keys"]["openai"]): + max_tokens = kwargs.get("max_tokens", DEFAULT_OPENAI_SETTINGS["max_tokens"]) + temperature = kwargs.get("temperature", DEFAULT_OPENAI_SETTINGS["temperature"]) + endpoint = kwargs.get("endpoint") + description = kwargs.get("description") + request = kwargs.get("request") + examples = kwargs.get("examples") + tool = kwargs["tool"] + schema = fetch_graphql_schema(endpoint) + prompt = generate_graphql_query(request, schema, description, examples) + if tool not in ALLOWED_TOOLS: + return ( + f"Tool {tool} is not in the list of supported tools.", + None, + None, + None, + ) + engine = tool.replace(PREFIX, "") + messages = [ + {"role": "user", "content": prompt}, + ] + response = client.chat.completions.create( + model=engine, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + n=1, + timeout=120, + stop=None, + ) + query_to_be_used = response.choices[0].message.content + print(query_to_be_used) + requested_data = fetch_data_from_indexer(endpoint, query_to_be_used) + messages = [ + {"role": "user", "content": prompt}, + {"role": "user", "content": query_to_be_used}, + { + "role": "user", + "content": analyze_data_and_generate_response(requested_data), + }, + ] + response = client.chat.completions.create( + model=engine, + messages=messages, + temperature=temperature, + max_tokens=max_tokens, + n=1, + timeout=120, + stop=None, + ) + return response.choices[0].message.content, prompt, None, None \ No newline at end of file diff --git a/tests/subquery/examples.py b/tests/subquery/examples.py new file mode 100644 index 00000000..39204f24 --- /dev/null +++ b/tests/subquery/examples.py @@ -0,0 +1,72 @@ +# -*- coding: utf-8 -*- +# ------------------------------------------------------------------------------ +# +# Copyright 2024 Valory AG +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# +# ------------------------------------------------------------------------------ + +query_examples = """ +1. + +# Provide me a list of addresses that have made the highest number of transfers to a specific address + +query MyQuery { + account(id: "0x0000000000000000000000000000000000000000") { + id + receivedTransfers { + groupedAggregates(groupBy: FROM_ID) { + keys + distinctCount { + id + } + } + } + } +} + +2. + +# Please provide a list of addresses who transfered the highest amounts within the certain timeframe. + +query MyQuery { + account(id: "0x0000000000000000000000000000000000000000") { + id + receivedTransfers( + first: 5 + filter: {and: [{timestamp: {greaterThan: "0"}}, {timestamp: {lessThan: "1000"}}]} + ) { + groupedAggregates(groupBy: FROM_ID) { + keys + sum { + value + } + } + } + } +} + +3. + +# Please provide a first transfer ever indexed + +query MyQuery { + transfers(first: 1, orderBy: TIMESTAMP_ASC) { + nodes { + id + value + } + } +} +""" \ No newline at end of file diff --git a/tests/test_tools.py b/tests/test_tools.py index c98f6cb6..53b7cd27 100644 --- a/tests/test_tools.py +++ b/tests/test_tools.py @@ -34,6 +34,12 @@ from packages.valory.customs.prediction_request import prediction_request from packages.valory.skills.task_execution.utils.apis import KeyChain from packages.valory.skills.task_execution.utils.benchmarks import TokenCounterCallback + +from packages.subquery.customs.graphql_response_analyser import ( + graphql_response_analyser +) +from tests.subquery.examples import query_examples + from tests.constants import ( OPENAI_SECRET_KEY, STABILITY_API_KEY, @@ -191,7 +197,6 @@ class TestDALLEGeneration(BaseToolTest): ] tool_module = dalle_request - class TestPredictionSentenceEmbeddings(BaseToolTest): """Test Prediction Sum URL Content.""" @@ -202,7 +207,6 @@ class TestPredictionSentenceEmbeddings(BaseToolTest): ] tool_module = prediction_sentence_embeddings - class TestOfvMarketResolverTool(BaseToolTest): """Test OFV Market Resolver Tool.""" @@ -212,3 +216,23 @@ class TestOfvMarketResolverTool(BaseToolTest): 'Please take over the role of a Data Scientist to evaluate the given question. With the given question "Will Apple release iPhone 17 by March 2025?" and the `yes` option represented by `Yes` and the `no` option represented by `No`, what are the respective probabilities of `p_yes` and `p_no` occurring?' ] tool_module = ofv_market_resolver + +class TestGraphResponseAnalyser: + """Check successful query output analysis""" + + tool_callable: str = "run" + tool_module = graphql_response_analyser + + def test_run(self) -> None: + """Test run method.""" + kwargs = dict( + tool="openai-gpt-3.5-turbo", + request="When was the first transfer?", + examples=query_examples, + endpoint="https://api.subquery.network/sq/subquery/cusdnew", + description="This project manages and indexes data pertaining to cUSD (CELO USD) ERC-20 token transfers and approvals recorded within a dedicated smart contract. The stored data includes information on approvals granted and transfers executed. These entities provide insights into the authorization and movement of cUSD tokens within the CELO ecosystem, facilitating analysis and monitoring of token transactions.", + api_keys={"openai": OPENAI_SECRET_KEY}, + ) + func = getattr(self.tool_module, self.tool_callable) + response = func(**kwargs) + assert "analysis_result" in response[0] \ No newline at end of file diff --git a/tox.ini b/tox.ini index 1ba5decf..66edfe99 100644 --- a/tox.ini +++ b/tox.ini @@ -20,7 +20,7 @@ deps = {[deps-tests]deps} open-autonomy==0.14.10 fastapi==0.110.3 - openai==0.27.2 + openai==1.11.0 requests==2.28.1 mech-client==0.2.5 py-multibase==1.0.3