Skip to content

Commit

Permalink
Merge pull request #689 from ScrapeGraphAI/687-smartscrapermulticonca…
Browse files Browse the repository at this point in the history
…tgraph-error-with-bedrock

687 smartscrapermulticoncatgraph error with bedrock
  • Loading branch information
VinciGit00 authored Sep 23, 2024
2 parents dd0f260 + 8ce08ba commit 390ad82
Show file tree
Hide file tree
Showing 4 changed files with 55 additions and 97 deletions.
13 changes: 4 additions & 9 deletions scrapegraphai/graphs/smart_scraper_multi_concat_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,19 +60,14 @@ def _create_graph(self) -> BaseGraph:
BaseGraph: A graph instance representing the web scraping and searching workflow.
"""

smart_scraper_instance = SmartScraperGraph(
prompt="",
source="",
config=self.copy_config,
schema=self.copy_schema
)

graph_iterator_node = GraphIteratorNode(
input="user_prompt & urls",
output=["results"],
node_config={
"graph_instance": smart_scraper_instance,
}
"graph_instance": SmartScraperGraph,
"scraper_config": self.copy_config,
},
schema=self.copy_schema,
)

concat_answers_node = ConcatAnswersNode(
Expand Down
133 changes: 47 additions & 86 deletions scrapegraphai/nodes/generate_answer_node.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,3 @@
"""
GenerateAnswerNode Module
"""
from typing import List, Optional
from langchain.prompts import PromptTemplate
from langchain_core.output_parsers import JsonOutputParser
Expand All @@ -12,29 +9,12 @@
from tqdm import tqdm
from .base_node import BaseNode
from ..utils.output_parser import get_structured_output_parser, get_pydantic_output_parser
from ..prompts import (TEMPLATE_CHUNKS,
TEMPLATE_NO_CHUNKS, TEMPLATE_MERGE,
TEMPLATE_CHUNKS_MD, TEMPLATE_NO_CHUNKS_MD,
TEMPLATE_MERGE_MD)
from ..prompts import (
TEMPLATE_CHUNKS, TEMPLATE_NO_CHUNKS, TEMPLATE_MERGE,
TEMPLATE_CHUNKS_MD, TEMPLATE_NO_CHUNKS_MD, TEMPLATE_MERGE_MD
)

class GenerateAnswerNode(BaseNode):
"""
A node that generates an answer using a large language model (LLM) based on the user's input
and the content extracted from a webpage. It constructs a prompt from the user's input
and the scraped content, feeds it to the LLM, and parses the LLM's response to produce
an answer.
Attributes:
llm_model: An instance of a language model client, configured for generating answers.
verbose (bool): A flag indicating whether to show print statements during execution.
Args:
input (str): Boolean expression defining the input keys needed from the state.
output (List[str]): List of output keys to be updated in the state.
node_config (dict): Additional configuration for the node.
node_name (str): The unique identifier name for the node, defaulting to "GenerateAnswer".
"""

def __init__(
self,
input: str,
Expand All @@ -43,121 +23,102 @@ def __init__(
node_name: str = "GenerateAnswer",
):
super().__init__(node_name, "node", input, output, 2, node_config)

self.llm_model = node_config["llm_model"]

if isinstance(node_config["llm_model"], ChatOllama):
self.llm_model.format="json"

self.verbose = (
True if node_config is None else node_config.get("verbose", False)
)
self.force = (
False if node_config is None else node_config.get("force", False)
)
self.script_creator = (
False if node_config is None else node_config.get("script_creator", False)
)
self.is_md_scraper = (
False if node_config is None else node_config.get("is_md_scraper", False)
)
self.llm_model.format = "json"

self.verbose = node_config.get("verbose", False)
self.force = node_config.get("force", False)
self.script_creator = node_config.get("script_creator", False)
self.is_md_scraper = node_config.get("is_md_scraper", False)
self.additional_info = node_config.get("additional_info")

def execute(self, state: dict) -> dict:
"""
Generates an answer by constructing a prompt from the user's input and the scraped
content, querying the language model, and parsing its response.
Args:
state (dict): The current state of the graph. The input keys will be used
to fetch the correct data from the state.
Returns:
dict: The updated state with the output key containing the generated answer.
Raises:
KeyError: If the input keys are not found in the state, indicating
that the necessary information for generating an answer is missing.
"""

self.logger.info(f"--- Executing {self.node_name} Node ---")

input_keys = self.get_input_keys(state)
input_keys = self.get_input_keys(state)
input_data = [state[key] for key in input_keys]
user_prompt = input_data[0]
doc = input_data[1]

if self.node_config.get("schema", None) is not None:

if isinstance(self.llm_model, (ChatOpenAI, ChatMistralAI)):
self.llm_model = self.llm_model.with_structured_output(
schema = self.node_config["schema"])
schema=self.node_config["schema"]
)
output_parser = get_structured_output_parser(self.node_config["schema"])
format_instructions = "NA"
else:
if not isinstance(self.llm_model, ChatBedrock):
output_parser = get_pydantic_output_parser(self.node_config["schema"])
format_instructions = output_parser.get_format_instructions()

else:
output_parser = None
format_instructions = ""
else:
if not isinstance(self.llm_model, ChatBedrock):
output_parser = JsonOutputParser()
format_instructions = output_parser.get_format_instructions()
else:
output_parser = None
format_instructions = ""

if isinstance(self.llm_model, (ChatOpenAI, AzureChatOpenAI)) \
and not self.script_creator \
or self.force \
and not self.script_creator or self.is_md_scraper:

template_no_chunks_prompt = TEMPLATE_NO_CHUNKS_MD
template_chunks_prompt = TEMPLATE_CHUNKS_MD
template_merge_prompt = TEMPLATE_MERGE_MD
template_no_chunks_prompt = TEMPLATE_NO_CHUNKS_MD
template_chunks_prompt = TEMPLATE_CHUNKS_MD
template_merge_prompt = TEMPLATE_MERGE_MD
else:
template_no_chunks_prompt = TEMPLATE_NO_CHUNKS
template_chunks_prompt = TEMPLATE_CHUNKS
template_merge_prompt = TEMPLATE_MERGE
template_no_chunks_prompt = TEMPLATE_NO_CHUNKS
template_chunks_prompt = TEMPLATE_CHUNKS
template_merge_prompt = TEMPLATE_MERGE

if self.additional_info is not None:
template_no_chunks_prompt = self.additional_info + template_no_chunks_prompt
template_chunks_prompt = self.additional_info + template_chunks_prompt
template_merge_prompt = self.additional_info + template_merge_prompt
template_no_chunks_prompt = self.additional_info + template_no_chunks_prompt
template_chunks_prompt = self.additional_info + template_chunks_prompt
template_merge_prompt = self.additional_info + template_merge_prompt

if len(doc) == 1:
prompt = PromptTemplate(
template=template_no_chunks_prompt ,
template=template_no_chunks_prompt,
input_variables=["question"],
partial_variables={"context": doc,
"format_instructions": format_instructions})
chain = prompt | self.llm_model | output_parser
partial_variables={"context": doc, "format_instructions": format_instructions}
)
chain = prompt | self.llm_model
if output_parser:
chain = chain | output_parser
answer = chain.invoke({"question": user_prompt})

state.update({self.output[0]: answer})
return state

chains_dict = {}
for i, chunk in enumerate(tqdm(doc, desc="Processing chunks", disable=not self.verbose)):

prompt = PromptTemplate(
template=TEMPLATE_CHUNKS,
template=template_chunks_prompt,
input_variables=["question"],
partial_variables={"context": chunk,
"chunk_id": i + 1,
"format_instructions": format_instructions})
partial_variables={"context": chunk, "chunk_id": i + 1, "format_instructions": format_instructions}
)
chain_name = f"chunk{i+1}"
chains_dict[chain_name] = prompt | self.llm_model | output_parser
chains_dict[chain_name] = prompt | self.llm_model
if output_parser:
chains_dict[chain_name] = chains_dict[chain_name] | output_parser

async_runner = RunnableParallel(**chains_dict)

batch_results = async_runner.invoke({"question": user_prompt})
batch_results = async_runner.invoke({"question": user_prompt})

merge_prompt = PromptTemplate(
template = template_merge_prompt ,
input_variables=["context", "question"],
partial_variables={"format_instructions": format_instructions},
)
template=template_merge_prompt,
input_variables=["context", "question"],
partial_variables={"format_instructions": format_instructions}
)

merge_chain = merge_prompt | self.llm_model | output_parser
merge_chain = merge_prompt | self.llm_model
if output_parser:
merge_chain = merge_chain | output_parser
answer = merge_chain.invoke({"context": batch_results, "question": user_prompt})

state.update({self.output[0]: answer})
Expand Down
2 changes: 1 addition & 1 deletion scrapegraphai/nodes/graph_iterator_node.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,7 +130,7 @@ async def _async_run(graph):
if url.startswith("http"):
graph.input_key = "url"
participants.append(graph)

futures = [_async_run(graph) for graph in participants]

answers = await tqdm.gather(
Expand Down
4 changes: 3 additions & 1 deletion scrapegraphai/utils/research_web.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,9 @@ def search_on_web(query: str, search_engine: str = "Google",

elif search_engine.lower() == "searxng":
url = f"http://localhost:{port}"
params = {"q": query, "format": "json"}
params = {"q": query,
"format": "json",
"engines": "google,duckduckgo,brave,qwant,bing"}

response = requests.get(url, params=params)

Expand Down

0 comments on commit 390ad82

Please sign in to comment.