diff --git a/.github/ISSUE_TEMPLATE.md b/.github/ISSUE_TEMPLATE.md
deleted file mode 100644
index b3b0d0daeed..00000000000
--- a/.github/ISSUE_TEMPLATE.md
+++ /dev/null
@@ -1,57 +0,0 @@
-### Description
-
-
-### Environment
-- AutoGen version:
-- Python version:
-- Operating System:
-
-### Steps to Reproduce (for bugs)
-
-
-1. Step 1
-2. Step 2
-3. ...
-
-### Expected Behavior
-
-
-### Actual Behavior
-
-
-### Screenshots / Logs (if applicable)
-
-
-### Additional Information
-
-
-### Possible Solution (if you have one)
-
-
-### Is this a Bug or Feature Request?
-
-
-### Priority
-
-
-### Difficulty
-
-
-### Any related issues?
-
-
-### Any relevant discussions?
-
-
-### Checklist
-
-- [ ] I have searched for similar issues and didn't find any duplicates.
-- [ ] I have provided a clear and concise description of the issue.
-- [ ] I have included the necessary environment details.
-- [ ] I have outlined the steps to reproduce the issue.
-- [ ] I have included any relevant logs or screenshots.
-- [ ] I have indicated whether this is a bug or a feature request.
-- [ ] I have set the priority and difficulty levels.
-
-### Additional Comments
-
diff --git a/.github/ISSUE_TEMPLATE/bug_report.yml b/.github/ISSUE_TEMPLATE/bug_report.yml
index 434226b3e88..090fa6cc593 100644
--- a/.github/ISSUE_TEMPLATE/bug_report.yml
+++ b/.github/ISSUE_TEMPLATE/bug_report.yml
@@ -1,53 +1,55 @@
name: Bug Report
-description: File a bug report
-title: "[Bug]: "
+description: Report a bug
labels: ["bug"]
body:
- type: textarea
- id: description
attributes:
- label: Describe the bug
- description: A clear and concise description of what the bug is.
- placeholder: What went wrong?
+ label: What happened?
+ description: Please provide as much information as possible, this helps us address the issue.
+ validations:
+ required: true
- type: textarea
- id: reproduce
attributes:
- label: Steps to reproduce
- description: |
- Steps to reproduce the behavior:
-
- 1. Step 1
- 2. Step 2
- 3. ...
- 4. See error
- placeholder: How can we replicate the issue?
+ label: What did you expect to happen?
+ validations:
+ required: true
- type: textarea
- id: modelused
attributes:
- label: Model Used
- description: A description of the model that was used when the error was encountered
+ label: How can we reproduce it (as minimally and precisely as possible)?
+ description: Please provide steps to reproduce. Provide code that can be run if possible.
+ validations:
+ required: true
+ - type: input
+ attributes:
+ label: AutoGen version
+ description: What version or commit of the library was used
+ validations:
+ required: true
+ - type: dropdown
+ attributes:
+ label: Which package was this bug in
+ options:
+ - Core
+ - AgentChat
+ - Extensions
+ - AutoGen Studio
+ - Magentic One
+ - AutoGen Bench
+ - Other
+ validations:
+ required: true
+ - type: input
+ attributes:
+ label: Model used
+ description: If a model was used, please describe it here, indicating whether it is a local model or a cloud-hosted model
placeholder: gpt-4, mistral-7B etc
- - type: textarea
- id: expected_behavior
+ - type: input
attributes:
- label: Expected Behavior
- description: A clear and concise description of what you expected to happen.
- placeholder: What should have happened?
- - type: textarea
- id: screenshots
+ label: Python version
+ - type: input
attributes:
- label: Screenshots and logs
- description: If applicable, add screenshots and logs to help explain your problem.
- placeholder: Add screenshots here
+ label: Operating system
- type: textarea
- id: additional_information
- attributes:
- label: Additional Information
- description: |
- - AutoGen Version:
- - Operating System:
- - Python Version:
- - Related Issues:
- - Any other relevant information.
- placeholder: Any additional details
+ attributes:
+ label: Any additional info you think would be helpful for fixing this bug
diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml
index 0086358db1e..76afcbcc5f8 100644
--- a/.github/ISSUE_TEMPLATE/config.yml
+++ b/.github/ISSUE_TEMPLATE/config.yml
@@ -1 +1,5 @@
blank_issues_enabled: true
+contact_links:
+ - name: Questions or general help 💬
+ url: https://github.com/microsoft/autogen/discussions
+ about: Please ask and answer questions here.
diff --git a/.github/ISSUE_TEMPLATE/feature_request.yml b/.github/ISSUE_TEMPLATE/feature_request.yml
index e8a63df7a6e..57f360761a7 100644
--- a/.github/ISSUE_TEMPLATE/feature_request.yml
+++ b/.github/ISSUE_TEMPLATE/feature_request.yml
@@ -1,26 +1,18 @@
name: Feature Request
-description: File a feature request
+description: Request a new feature or enhancement
labels: ["enhancement"]
-title: "[Feature Request]: "
body:
- type: textarea
- id: problem_description
attributes:
- label: Is your feature request related to a problem? Please describe.
- description: A clear and concise description of what the problem is. Ex. I'm always frustrated when [...]
- placeholder: What problem are you trying to solve?
+ label: What feature would you like to be added?
+ description: Please describe the desired feature. Be descriptive, provide examples and if possible, provide a proposed solution.
+ validations:
+ required: true
- type: textarea
- id: solution_description
attributes:
- label: Describe the solution you'd like
- description: A clear and concise description of what you want to happen.
- placeholder: How do you envision the solution?
-
- - type: textarea
- id: additional_context
- attributes:
- label: Additional context
- description: Add any other context or screenshots about the feature request here.
- placeholder: Any additional information
+ label: Why is this needed?
+ description: Why is it important that this feature is implemented? What problem or need does it solve?
+ validations:
+ required: true
diff --git a/.github/ISSUE_TEMPLATE/general_issue.yml b/.github/ISSUE_TEMPLATE/general_issue.yml
deleted file mode 100644
index b585f4642f4..00000000000
--- a/.github/ISSUE_TEMPLATE/general_issue.yml
+++ /dev/null
@@ -1,41 +0,0 @@
-name: General Issue
-description: File a general issue
-title: "[Issue]: "
-labels: []
-
-body:
- - type: textarea
- id: description
- attributes:
- label: Describe the issue
- description: A clear and concise description of what the issue is.
- placeholder: What went wrong?
- - type: textarea
- id: reproduce
- attributes:
- label: Steps to reproduce
- description: |
- Steps to reproduce the behavior:
-
- 1. Step 1
- 2. Step 2
- 3. ...
- 4. See error
- placeholder: How can we replicate the issue?
- - type: textarea
- id: screenshots
- attributes:
- label: Screenshots and logs
- description: If applicable, add screenshots and logs to help explain your problem.
- placeholder: Add screenshots here
- - type: textarea
- id: additional_information
- attributes:
- label: Additional Information
- description: |
- - AutoGen Version:
- - Operating System:
- - Python Version:
- - Related Issues:
- - Any other relevant information.
- placeholder: Any additional details
diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml
index a92044f15b7..0107e7314fc 100644
--- a/.github/workflows/build.yml
+++ b/.github/workflows/build.yml
@@ -5,15 +5,13 @@ name: Build
on:
push:
- branches: ["main"]
+ branches: ["0.2"]
pull_request:
- branches: ["main"]
- merge_group:
- types: [checks_requested]
+ branches: ["0.2"]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
- cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
+ cancel-in-progress: ${{ github.ref != 'refs/heads/0.2' }}
permissions: {}
jobs:
paths-filter:
diff --git a/.github/workflows/contrib-openai.yml b/.github/workflows/contrib-openai.yml
index 7e8fb003317..d084e44bd60 100644
--- a/.github/workflows/contrib-openai.yml
+++ b/.github/workflows/contrib-openai.yml
@@ -5,7 +5,7 @@ name: OpenAI4ContribTests
on:
pull_request:
- branches: ["main"]
+ branches: ["0.2"]
paths:
- "autogen/**"
- "test/agentchat/contrib/**"
diff --git a/.github/workflows/contrib-tests.yml b/.github/workflows/contrib-tests.yml
index f14a7f09ebe..7d779bc5fae 100644
--- a/.github/workflows/contrib-tests.yml
+++ b/.github/workflows/contrib-tests.yml
@@ -5,7 +5,7 @@ name: ContribTests
on:
pull_request:
- branches: ["main"]
+ branches: ["0.2"]
paths:
- "autogen/**"
- "test/agentchat/contrib/**"
@@ -16,7 +16,7 @@ on:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
- cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
+ cancel-in-progress: ${{ github.ref != 'refs/heads/0.2' }}
permissions:
{}
# actions: read
diff --git a/.github/workflows/deploy-website.yml b/.github/workflows/deploy-website.yml
index 2f2ba4d473f..e576ca1b4db 100644
--- a/.github/workflows/deploy-website.yml
+++ b/.github/workflows/deploy-website.yml
@@ -2,20 +2,18 @@ name: docs
on:
pull_request:
- branches: [main]
- path:
+ branches: ["0.2"]
+ paths:
- "autogen/*"
- "website/*"
- ".github/workflows/deploy-website.yml"
push:
- branches: [main]
- path:
+ branches: ["0.2"]
+ paths:
- "autogen/*"
- "website/*"
- ".github/workflows/deploy-website.yml"
workflow_dispatch:
- merge_group:
- types: [checks_requested]
permissions:
id-token: write
pages: write
@@ -67,57 +65,3 @@ jobs:
npm i --legacy-peer-deps
npm run build
fi
- gh-release:
- if: github.event_name != 'pull_request'
- runs-on: ubuntu-latest
- defaults:
- run:
- working-directory: website
- steps:
- - uses: actions/checkout@v4
- with:
- lfs: true
- - uses: actions/setup-node@v4
- with:
- node-version: 18.x
- - name: setup python
- uses: actions/setup-python@v5
- with:
- python-version: "3.8"
- - name: pydoc-markdown install
- run: |
- python -m pip install --upgrade pip
- pip install pydoc-markdown pyyaml termcolor
- # Pin databind packages as version 4.5.0 is not compatible with pydoc-markdown.
- pip install databind.core==4.4.2 databind.json==4.4.2
- - name: pydoc-markdown run
- run: |
- pydoc-markdown
- - name: quarto install
- working-directory: ${{ runner.temp }}
- run: |
- wget -q https://github.com/quarto-dev/quarto-cli/releases/download/v1.5.23/quarto-1.5.23-linux-amd64.tar.gz
- tar -xzf quarto-1.5.23-linux-amd64.tar.gz
- echo "$(pwd)/quarto-1.5.23/bin/" >> $GITHUB_PATH
- - name: Process notebooks
- run: |
- python process_notebooks.py render
- - name: Build website
- run: |
- if [ -e yarn.lock ]; then
- yarn install --frozen-lockfile --ignore-engines
- yarn build
- elif [ -e package-lock.json ]; then
- npm ci
- npm run build
- else
- npm i --legacy-peer-deps
- npm run build
- fi
- - name: Upload artifact
- uses: actions/upload-pages-artifact@v3
- with:
- path: "website/build"
- - name: Deploy to GitHub Pages
- id: deployment
- uses: actions/deploy-pages@v4
diff --git a/.github/workflows/dotnet-build.yml b/.github/workflows/dotnet-build.yml
index 6aac54d3818..b333d9065d6 100644
--- a/.github/workflows/dotnet-build.yml
+++ b/.github/workflows/dotnet-build.yml
@@ -6,15 +6,13 @@ name: dotnet-ci
on:
workflow_dispatch:
pull_request:
- branches: [ "main" ]
+ branches: [ "0.2" ]
push:
- branches: [ "main" ]
- merge_group:
- types: [checks_requested]
+ branches: [ "0.2" ]
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
- cancel-in-progress: ${{ github.ref != 'refs/heads/main' || github.ref != 'refs/heads/dotnet' }}
+ cancel-in-progress: ${{ github.ref != 'refs/heads/0.2' || github.ref != 'refs/heads/dotnet' }}
permissions:
contents: read
@@ -122,7 +120,7 @@ jobs:
defaults:
run:
working-directory: dotnet
- if: success() && (github.ref == 'refs/heads/main')
+ if: success() && (github.ref == 'refs/heads/0.2')
needs: aot-test
steps:
- uses: actions/checkout@v4
@@ -228,4 +226,4 @@ jobs:
env:
MYGET_TOKEN: ${{ secrets.MYGET_TOKEN }}
continue-on-error: true
-
+
diff --git a/.github/workflows/issue-needs-triage.yml b/.github/workflows/issue-needs-triage.yml
new file mode 100644
index 00000000000..59cb3479c80
--- /dev/null
+++ b/.github/workflows/issue-needs-triage.yml
@@ -0,0 +1,18 @@
+name: Label issues with needs-triage
+on:
+ issues:
+ types:
+ - reopened
+ - opened
+jobs:
+ label_issues:
+ runs-on: ubuntu-latest
+ permissions:
+ issues: write
+ steps:
+ - run: gh issue edit "$NUMBER" --add-label "$LABELS"
+ env:
+ GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
+ GH_REPO: ${{ github.repository }}
+ NUMBER: ${{ github.event.issue.number }}
+ LABELS: needs-triage
diff --git a/.github/workflows/lfs-check.yml b/.github/workflows/lfs-check.yml
index 4baae925de3..dc5e1678be2 100644
--- a/.github/workflows/lfs-check.yml
+++ b/.github/workflows/lfs-check.yml
@@ -1,6 +1,7 @@
name: "Git LFS Check"
-
-on: pull_request
+on:
+ pull_request:
+ branches: ["0.2"]
permissions: {}
jobs:
lfs-check:
diff --git a/.github/workflows/openai.yml b/.github/workflows/openai.yml
index a9ab8e9e0c5..e29f9d0f1ab 100644
--- a/.github/workflows/openai.yml
+++ b/.github/workflows/openai.yml
@@ -5,7 +5,7 @@ name: OpenAI
on:
pull_request:
- branches: ["main"]
+ branches: ["0.2"]
paths:
- "autogen/**"
- "test/**"
diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml
index 8404de61154..7ec5a4eb104 100644
--- a/.github/workflows/pre-commit.yml
+++ b/.github/workflows/pre-commit.yml
@@ -3,8 +3,7 @@ name: Code formatting
# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows
on: # Trigger the workflow on pull request or merge
pull_request:
- merge_group:
- types: [checks_requested]
+ branches: ["0.2"]
defaults:
run:
diff --git a/.github/workflows/python-package.yml b/.github/workflows/python-package.yml
index b2274e64198..e34678ab003 100644
--- a/.github/workflows/python-package.yml
+++ b/.github/workflows/python-package.yml
@@ -5,14 +5,10 @@
name: python-package
on:
- release:
- types: [published]
- workflow_dispatch:
+ push:
+ tags:
+ - "0.2.*"
permissions: {}
- # actions: read
- # checks: read
- # contents: read
- # deployments: read
jobs:
deploy:
strategy:
@@ -28,26 +24,6 @@ jobs:
steps:
- name: Checkout
uses: actions/checkout@v4
- # - name: Cache conda
- # uses: actions/cache@v4
- # with:
- # path: ~/conda_pkgs_dir
- # key: conda-${{ matrix.os }}-python-${{ matrix.python-version }}-${{ hashFiles('environment.yml') }}
- # - name: Setup Miniconda
- # uses: conda-incubator/setup-miniconda@v2
- # with:
- # auto-update-conda: true
- # auto-activate-base: false
- # activate-environment: hcrystalball
- # python-version: ${{ matrix.python-version }}
- # use-only-tar-bz2: true
- - name: Install from source
- # This is required for the pre-commit tests
- shell: pwsh
- run: pip install .
- # - name: Conda list
- # shell: pwsh
- # run: conda list
- name: Build
shell: pwsh
run: |
diff --git a/.github/workflows/samples-tools-tests.yml b/.github/workflows/samples-tools-tests.yml
index e774e5cb0b1..9452f0e377e 100644
--- a/.github/workflows/samples-tools-tests.yml
+++ b/.github/workflows/samples-tools-tests.yml
@@ -5,7 +5,7 @@ name: SamplesToolsTests
on:
pull_request:
- branches: ["main"]
+ branches: ["0.2"]
paths:
- "autogen/**"
- "samples/tools/**"
@@ -14,7 +14,7 @@ on:
concurrency:
group: ${{ github.workflow }}-${{ github.ref }}-${{ github.head_ref }}
- cancel-in-progress: ${{ github.ref != 'refs/heads/main' }}
+ cancel-in-progress: ${{ github.ref != 'refs/heads/0.2' }}
permissions: {}
jobs:
SamplesToolsFineTuningTests:
diff --git a/.github/workflows/type-check.yml b/.github/workflows/type-check.yml
index c66fb6ad7b1..3afd32ad886 100644
--- a/.github/workflows/type-check.yml
+++ b/.github/workflows/type-check.yml
@@ -2,8 +2,8 @@ name: Type check
# see: https://help.github.com/en/actions/reference/events-that-trigger-workflows
on: # Trigger the workflow on pull request or merge
pull_request:
- merge_group:
- types: [checks_requested]
+ branches: ["0.2"]
+
defaults:
run:
shell: bash
diff --git a/README.md b/README.md
index e92083f0df3..0599300c539 100644
--- a/README.md
+++ b/README.md
@@ -26,7 +26,7 @@ AutoGen is an open-source programming framework for building AI agents and facil
> -_Maintainers (Sept 6th, 2024)_
-![AutoGen Overview](https://github.com/microsoft/autogen/blob/main/website/static/img/autogen_agentchat.png)
+![AutoGen Overview](https://github.com/microsoft/autogen/blob/0.2/website/static/img/autogen_agentchat.png)
- AutoGen enables building next-gen LLM applications based on [multi-agent conversations](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat) with minimal effort. It simplifies the orchestration, automation, and optimization of a complex LLM workflow. It maximizes the performance of LLM models and overcomes their weaknesses.
- It supports [diverse conversation patterns](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat#supporting-diverse-conversation-patterns) for complex workflows. With customizable and conversable agents, developers can use AutoGen to build a wide range of conversation patterns concerning conversation autonomy,
@@ -171,7 +171,7 @@ Features of this use case include:
- **Customization**: AutoGen agents can be customized to meet the specific needs of an application. This includes the ability to choose the LLMs to use, the types of human input to allow, and the tools to employ.
- **Human participation**: AutoGen seamlessly allows human participation. This means that humans can provide input and feedback to the agents as needed.
-For [example](https://github.com/microsoft/autogen/blob/main/test/twoagent.py),
+For [example](https://github.com/microsoft/autogen/blob/0.2/test/twoagent.py),
```python
from autogen import AssistantAgent, UserProxyAgent, config_list_from_json
@@ -194,9 +194,9 @@ python test/twoagent.py
After the repo is cloned.
The figure below shows an example conversation flow with AutoGen.
-![Agent Chat Example](https://github.com/microsoft/autogen/blob/main/website/static/img/chat_example.png)
+![Agent Chat Example](https://github.com/microsoft/autogen/blob/0.2/website/static/img/chat_example.png)
-Alternatively, the [sample code](https://github.com/microsoft/autogen/blob/main/samples/simple_chat.py) here allows a user to chat with an AutoGen agent in ChatGPT style.
+Alternatively, the [sample code](https://github.com/microsoft/autogen/blob/0.2/samples/simple_chat.py) here allows a user to chat with an AutoGen agent in ChatGPT style.
Please find more [code examples](https://microsoft.github.io/autogen/docs/Examples#automated-multi-agent-chat) for this feature.
@@ -240,7 +240,7 @@ You can find detailed documentation about AutoGen [here](https://microsoft.githu
In addition, you can find:
-- [Research](https://microsoft.github.io/autogen/docs/Research), [blogposts](https://microsoft.github.io/autogen/blog) around AutoGen, and [Transparency FAQs](https://github.com/microsoft/autogen/blob/main/TRANSPARENCY_FAQS.md)
+- [Research](https://microsoft.github.io/autogen/docs/Research), [blogposts](https://microsoft.github.io/autogen/blog) around AutoGen, and [Transparency FAQs](https://github.com/microsoft/autogen/blob/0.2/TRANSPARENCY_FAQS.md)
- [Contributing guide](https://microsoft.github.io/autogen/docs/Contribute)
diff --git a/autogen/agentchat/groupchat.py b/autogen/agentchat/groupchat.py
index c6355a13b94..b0f0b9520aa 100644
--- a/autogen/agentchat/groupchat.py
+++ b/autogen/agentchat/groupchat.py
@@ -12,9 +12,9 @@
from ..formatting_utils import colored
from ..graph_utils import check_graph_validity, invert_disallowed_to_allowed
from ..io.base import IOStream
+from ..oai.client import ModelClient
from ..runtime_logging import log_new_agent, logging_enabled
from .agent import Agent
-from .chat import ChatResult
from .conversable_agent import ConversableAgent
try:
@@ -105,6 +105,8 @@ def custom_speaker_selection_func(
"clear history" phrase in user prompt. This is experimental feature.
See description of GroupChatManager.clear_agents_history function for more info.
- send_introductions: send a round of introductions at the start of the group chat, so agents know who they can speak to (default: False)
+ - select_speaker_auto_model_client_cls: Custom model client class for the internal speaker select agent used during 'auto' speaker selection (optional)
+ - select_speaker_auto_llm_config: LLM config for the internal speaker select agent used during 'auto' speaker selection (optional)
- role_for_select_speaker_messages: sets the role name for speaker selection when in 'auto' mode, typically 'user' or 'system'. (default: 'system')
"""
@@ -142,6 +144,8 @@ def custom_speaker_selection_func(
Respond with ONLY the name of the speaker and DO NOT provide a reason."""
select_speaker_transform_messages: Optional[Any] = None
select_speaker_auto_verbose: Optional[bool] = False
+ select_speaker_auto_model_client_cls: Optional[Union[ModelClient, List[ModelClient]]] = None
+ select_speaker_auto_llm_config: Optional[Union[Dict, Literal[False]]] = None
role_for_select_speaker_messages: Optional[str] = "system"
_VALID_SPEAKER_SELECTION_METHODS = ["auto", "manual", "random", "round_robin"]
@@ -591,6 +595,79 @@ def _finalize_speaker(self, last_speaker: Agent, final: bool, name: str, agents:
agent = self.agent_by_name(name)
return agent if agent else self.next_agent(last_speaker, agents)
+ def _register_client_from_config(self, agent: Agent, config: Dict):
+ model_client_cls_to_match = config.get("model_client_cls")
+ if model_client_cls_to_match:
+ if not self.select_speaker_auto_model_client_cls:
+ raise ValueError(
+ "A custom model was detected in the config but no 'model_client_cls' "
+ "was supplied for registration in GroupChat."
+ )
+
+ if isinstance(self.select_speaker_auto_model_client_cls, list):
+ # Register the first custom model client class matching the name specified in the config
+ matching_model_cls = [
+ client_cls
+ for client_cls in self.select_speaker_auto_model_client_cls
+ if client_cls.__name__ == model_client_cls_to_match
+ ]
+ if len(set(matching_model_cls)) > 1:
+ raise RuntimeError(
+ f"More than one unique 'model_client_cls' with __name__ '{model_client_cls_to_match}'."
+ )
+ if not matching_model_cls:
+ raise ValueError(
+ "No model's __name__ matches the model client class "
+ f"'{model_client_cls_to_match}' specified in select_speaker_auto_llm_config."
+ )
+ select_speaker_auto_model_client_cls = matching_model_cls[0]
+ else:
+ # Register the only custom model client
+ select_speaker_auto_model_client_cls = self.select_speaker_auto_model_client_cls
+
+ agent.register_model_client(select_speaker_auto_model_client_cls)
+
+ def _register_custom_model_clients(self, agent: ConversableAgent):
+ if not self.select_speaker_auto_llm_config:
+ return
+
+ config_format_is_list = "config_list" in self.select_speaker_auto_llm_config.keys()
+ if config_format_is_list:
+ for config in self.select_speaker_auto_llm_config["config_list"]:
+ self._register_client_from_config(agent, config)
+ elif not config_format_is_list:
+ self._register_client_from_config(agent, self.select_speaker_auto_llm_config)
+
+ def _create_internal_agents(
+ self, agents, max_attempts, messages, validate_speaker_name, selector: Optional[ConversableAgent] = None
+ ):
+ checking_agent = ConversableAgent("checking_agent", default_auto_reply=max_attempts)
+
+ # Register the speaker validation function with the checking agent
+ checking_agent.register_reply(
+ [ConversableAgent, None],
+ reply_func=validate_speaker_name, # Validate each response
+ remove_other_reply_funcs=True,
+ )
+
+ # Override the selector's config if one was passed as a parameter to this class
+ speaker_selection_llm_config = self.select_speaker_auto_llm_config or selector.llm_config
+
+ # Agent for selecting a single agent name from the response
+ speaker_selection_agent = ConversableAgent(
+ "speaker_selection_agent",
+ system_message=self.select_speaker_msg(agents),
+ chat_messages={checking_agent: messages},
+ llm_config=speaker_selection_llm_config,
+ human_input_mode="NEVER",
+ # Suppresses some extra terminal outputs, outputs will be handled by select_speaker_auto_verbose
+ )
+
+ # Register any custom model passed in select_speaker_auto_llm_config with the speaker_selection_agent
+ self._register_custom_model_clients(speaker_selection_agent)
+
+ return checking_agent, speaker_selection_agent
+
def _auto_select_speaker(
self,
last_speaker: Agent,
@@ -644,28 +721,8 @@ def validate_speaker_name(recipient, messages, sender, config) -> Tuple[bool, Un
# Two-agent chat for speaker selection
# Agent for checking the response from the speaker_select_agent
- checking_agent = ConversableAgent("checking_agent", default_auto_reply=max_attempts)
-
- # Register the speaker validation function with the checking agent
- checking_agent.register_reply(
- [ConversableAgent, None],
- reply_func=validate_speaker_name, # Validate each response
- remove_other_reply_funcs=True,
- )
-
- # NOTE: Do we have a speaker prompt (select_speaker_prompt_template is not None)? If we don't, we need to feed in the last message to start the nested chat
-
- # Agent for selecting a single agent name from the response
- speaker_selection_agent = ConversableAgent(
- "speaker_selection_agent",
- system_message=self.select_speaker_msg(agents),
- chat_messages=(
- {checking_agent: messages}
- if self.select_speaker_prompt_template is not None
- else {checking_agent: messages[:-1]}
- ),
- llm_config=selector.llm_config,
- human_input_mode="NEVER", # Suppresses some extra terminal outputs, outputs will be handled by select_speaker_auto_verbose
+ checking_agent, speaker_selection_agent = self._create_internal_agents(
+ agents, max_attempts, messages, validate_speaker_name, selector
)
# Create the starting message
@@ -747,24 +804,8 @@ def validate_speaker_name(recipient, messages, sender, config) -> Tuple[bool, Un
# Two-agent chat for speaker selection
# Agent for checking the response from the speaker_select_agent
- checking_agent = ConversableAgent("checking_agent", default_auto_reply=max_attempts)
-
- # Register the speaker validation function with the checking agent
- checking_agent.register_reply(
- [ConversableAgent, None],
- reply_func=validate_speaker_name, # Validate each response
- remove_other_reply_funcs=True,
- )
-
- # NOTE: Do we have a speaker prompt (select_speaker_prompt_template is not None)? If we don't, we need to feed in the last message to start the nested chat
-
- # Agent for selecting a single agent name from the response
- speaker_selection_agent = ConversableAgent(
- "speaker_selection_agent",
- system_message=self.select_speaker_msg(agents),
- chat_messages={checking_agent: messages},
- llm_config=selector.llm_config,
- human_input_mode="NEVER", # Suppresses some extra terminal outputs, outputs will be handled by select_speaker_auto_verbose
+ checking_agent, speaker_selection_agent = self._create_internal_agents(
+ agents, max_attempts, messages, validate_speaker_name, selector
)
# Create the starting message
diff --git a/autogen/oai/gemini.py b/autogen/oai/gemini.py
index 33790c9851c..d44e41112ac 100644
--- a/autogen/oai/gemini.py
+++ b/autogen/oai/gemini.py
@@ -32,6 +32,8 @@
from __future__ import annotations
import base64
+import copy
+import json
import logging
import os
import random
@@ -39,24 +41,39 @@
import time
import warnings
from io import BytesIO
-from typing import Any, Dict, List, Mapping, Union
+from typing import Any, Dict, List, Union
import google.generativeai as genai
import requests
import vertexai
-from google.ai.generativelanguage import Content, Part
+from google.ai.generativelanguage import Content, FunctionCall, FunctionDeclaration, FunctionResponse, Part, Tool
from google.api_core.exceptions import InternalServerError
from google.auth.credentials import Credentials
-from openai.types.chat import ChatCompletion
+from openai.types.chat import ChatCompletion, ChatCompletionMessageToolCall
from openai.types.chat.chat_completion import ChatCompletionMessage, Choice
+from openai.types.chat.chat_completion_message_tool_call import Function
from openai.types.completion_usage import CompletionUsage
from PIL import Image
-from vertexai.generative_models import Content as VertexAIContent
+from vertexai.generative_models import (
+ Content as VertexAIContent,
+)
+from vertexai.generative_models import (
+ FunctionDeclaration as VertexAIFunctionDeclaration,
+)
+from vertexai.generative_models import (
+ GenerationConfig as VertexAIGenerationConfig,
+)
from vertexai.generative_models import GenerativeModel
from vertexai.generative_models import HarmBlockThreshold as VertexAIHarmBlockThreshold
from vertexai.generative_models import HarmCategory as VertexAIHarmCategory
from vertexai.generative_models import Part as VertexAIPart
from vertexai.generative_models import SafetySetting as VertexAISafetySetting
+from vertexai.generative_models import (
+ Tool as VertexAITool,
+)
+from vertexai.generative_models import (
+ ToolConfig as VertexAIToolConfig,
+)
logger = logging.getLogger(__name__)
@@ -107,7 +124,7 @@ def __init__(self, **kwargs):
Args:
api_key (str): The API key for using Gemini.
- credentials (google.auth.credentials.Credentials): credentials to be used for authentication with vertexai.
+ credentials (google.auth.credentials.Credentials): credentials to be used for authentication with vertexai.
google_application_credentials (str): Path to the JSON service account key file of the service account.
Alternatively, the GOOGLE_APPLICATION_CREDENTIALS environment variable
can also be set instead of using this argument.
@@ -171,6 +188,8 @@ def create(self, params: Dict) -> ChatCompletion:
params.get("api_type", "google") # not used
messages = params.get("messages", [])
+ tools = params.get("tools", [])
+ tool_config = params.get("tool_config", {})
stream = params.get("stream", False)
n_response = params.get("n", 1)
system_instruction = params.get("system_instruction", None)
@@ -183,6 +202,7 @@ def create(self, params: Dict) -> ChatCompletion:
}
if self.use_vertexai:
safety_settings = GeminiClient._to_vertexai_safety_settings(params.get("safety_settings", {}))
+ tool_config = GeminiClient._to_vertexai_tool_config(tool_config, tools)
else:
safety_settings = params.get("safety_settings", {})
@@ -198,12 +218,15 @@ def create(self, params: Dict) -> ChatCompletion:
if "vision" not in model_name:
# A. create and call the chat model.
gemini_messages = self._oai_messages_to_gemini_messages(messages)
+ gemini_tools = self._oai_tools_to_gemini_tools(tools)
if self.use_vertexai:
model = GenerativeModel(
model_name,
generation_config=generation_config,
safety_settings=safety_settings,
system_instruction=system_instruction,
+ tools=gemini_tools,
+ tool_config=tool_config,
)
chat = model.start_chat(history=gemini_messages[:-1], response_validation=response_validation)
else:
@@ -213,12 +236,13 @@ def create(self, params: Dict) -> ChatCompletion:
generation_config=generation_config,
safety_settings=safety_settings,
system_instruction=system_instruction,
+ tools=gemini_tools,
)
genai.configure(api_key=self.api_key)
chat = model.start_chat(history=gemini_messages[:-1])
max_retries = 5
for attempt in range(max_retries):
- ans = None
+ ans: Union[Content, VertexAIContent] = None
try:
response = chat.send_message(
gemini_messages[-1].parts, stream=stream, safety_settings=safety_settings
@@ -234,7 +258,7 @@ def create(self, params: Dict) -> ChatCompletion:
raise RuntimeError(f"Google GenAI exception occurred while calling Gemini API: {e}")
else:
# `ans = response.text` is unstable. Use the following code instead.
- ans: str = chat.history[-1].parts[0].text
+ ans: Union[Content, VertexAIContent] = chat.history[-1]
break
if ans is None:
@@ -262,7 +286,7 @@ def create(self, params: Dict) -> ChatCompletion:
# Gemini's vision model does not support chat history yet
# chat = model.start_chat(history=gemini_messages[:-1])
# response = chat.send_message(gemini_messages[-1].parts)
- user_message = self._oai_content_to_gemini_content(messages[-1]["content"])
+ user_message = self._oai_content_to_gemini_content(messages[-1])
if len(messages) > 2:
warnings.warn(
"Warning: Gemini's vision model does not support chat history yet.",
@@ -273,16 +297,14 @@ def create(self, params: Dict) -> ChatCompletion:
response = model.generate_content(user_message, stream=stream)
# ans = response.text
if self.use_vertexai:
- ans: str = response.candidates[0].content.parts[0].text
+ ans: VertexAIContent = response.candidates[0].content
else:
- ans: str = response._result.candidates[0].content.parts[0].text
+ ans: Content = response._result.candidates[0].content
prompt_tokens = model.count_tokens(user_message).total_tokens
- completion_tokens = model.count_tokens(ans).total_tokens
+ completion_tokens = model.count_tokens(ans.parts[0].text).total_tokens
- # 3. convert output
- message = ChatCompletionMessage(role="assistant", content=ans, function_call=None, tool_calls=None)
- choices = [Choice(finish_reason="stop", index=0, message=message)]
+ choices = self._gemini_content_to_oai_choices(ans)
response_oai = ChatCompletion(
id=str(random.randint(0, 1000)),
@@ -295,31 +317,87 @@ def create(self, params: Dict) -> ChatCompletion:
completion_tokens=completion_tokens,
total_tokens=prompt_tokens + completion_tokens,
),
- cost=calculate_gemini_cost(prompt_tokens, completion_tokens, model_name),
+ cost=self._calculate_gemini_cost(prompt_tokens, completion_tokens, model_name),
)
return response_oai
- def _oai_content_to_gemini_content(self, content: Union[str, List]) -> List:
+ # If str is not a json string return str as is
+ def _to_json(self, str) -> dict:
+ try:
+ return json.loads(str)
+ except ValueError:
+ return str
+
+ def _oai_content_to_gemini_content(self, message: Dict[str, Any]) -> List:
"""Convert content from OAI format to Gemini format"""
rst = []
- if isinstance(content, str):
- if content == "":
- content = "empty" # Empty content is not allowed.
+ if isinstance(message["content"], str):
+ if message["content"] == "":
+ message["content"] = "empty" # Empty content is not allowed.
+ if self.use_vertexai:
+ rst.append(VertexAIPart.from_text(message["content"]))
+ else:
+ rst.append(Part(text=message["content"]))
+ return rst
+
+ if "tool_calls" in message:
+ if self.use_vertexai:
+ for tool_call in message["tool_calls"]:
+ rst.append(
+ VertexAIPart.from_dict(
+ {
+ "functionCall": {
+ "name": tool_call["function"]["name"],
+ "args": json.loads(tool_call["function"]["arguments"]),
+ }
+ }
+ )
+ )
+ else:
+ for tool_call in message["tool_calls"]:
+ rst.append(
+ Part(
+ function_call=FunctionCall(
+ name=tool_call["function"]["name"],
+ args=json.loads(tool_call["function"]["arguments"]),
+ )
+ )
+ )
+ return rst
+
+ if message["role"] == "tool":
+ if self.use_vertexai:
+ rst.append(
+ VertexAIPart.from_function_response(
+ name=message["name"], response={"result": self._to_json(message["content"])}
+ )
+ )
+ else:
+ rst.append(
+ Part(
+ function_response=FunctionResponse(
+ name=message["name"], response={"result": self._to_json(message["content"])}
+ )
+ )
+ )
+ return rst
+
+ if isinstance(message["content"], str):
if self.use_vertexai:
- rst.append(VertexAIPart.from_text(content))
+ rst.append(VertexAIPart.from_text(message["content"]))
else:
- rst.append(Part(text=content))
+ rst.append(Part(text=message["content"]))
return rst
- assert isinstance(content, list)
+ assert isinstance(message["content"], list)
- for msg in content:
+ for msg in message["content"]:
if isinstance(msg, dict):
assert "type" in msg, f"Missing 'type' field in message: {msg}"
if msg["type"] == "text":
if self.use_vertexai:
- rst.append(VertexAIPart.from_text(text=msg["text"]))
+ rst.append(VertexAIPart.from_text(msg["text"]))
else:
rst.append(Part(text=msg["text"]))
elif msg["type"] == "image_url":
@@ -340,34 +418,32 @@ def _oai_content_to_gemini_content(self, content: Union[str, List]) -> List:
raise ValueError(f"Unsupported message type: {type(msg)}")
return rst
- def _concat_parts(self, parts: List[Part]) -> List:
- """Concatenate parts with the same type.
- If two adjacent parts both have the "text" attribute, then it will be joined into one part.
- """
- if not parts:
- return []
-
- concatenated_parts = []
- previous_part = parts[0]
-
- for current_part in parts[1:]:
- if previous_part.text != "":
- if self.use_vertexai:
- previous_part = VertexAIPart.from_text(previous_part.text + current_part.text)
- else:
- previous_part.text += current_part.text
- else:
- concatenated_parts.append(previous_part)
- previous_part = current_part
-
- if previous_part.text == "":
- if self.use_vertexai:
- previous_part = VertexAIPart.from_text("empty")
- else:
- previous_part.text = "empty" # Empty content is not allowed.
- concatenated_parts.append(previous_part)
+ def _calculate_gemini_cost(self, input_tokens: int, output_tokens: int, model_name: str) -> float:
+ if "1.5-pro" in model_name:
+ if (input_tokens + output_tokens) <= 128000:
+ # "gemini-1.5-pro"
+ # When total tokens is less than 128K cost is $3.5 per million input tokens and $10.5 per million output tokens
+ return 3.5 * input_tokens / 1e6 + 10.5 * output_tokens / 1e6
+ # "gemini-1.5-pro"
+ # Cost is $7 per million input tokens and $21 per million output tokens
+ return 7.0 * input_tokens / 1e6 + 21.0 * output_tokens / 1e6
+
+ if "1.5-flash" in model_name:
+ if (input_tokens + output_tokens) <= 128000:
+ # "gemini-1.5-flash"
+ # Cost is $0.35 per million input tokens and $1.05 per million output tokens
+ return 0.35 * input_tokens / 1e6 + 1.05 * output_tokens / 1e6
+ # "gemini-1.5-flash"
+ # When total tokens is less than 128K cost is $0.70 per million input tokens and $2.10 per million output tokens
+ return 0.70 * input_tokens / 1e6 + 2.10 * output_tokens / 1e6
+
+ if "gemini-pro" not in model_name and "gemini-1.0-pro" not in model_name:
+ warnings.warn(
+ f"Cost calculation is not implemented for model {model_name}. Using Gemini-1.0-Pro.", UserWarning
+ )
- return concatenated_parts
+ # Cost is $0.5 per million input tokens and $1.5 per million output tokens
+ return 0.5 * input_tokens / 1e6 + 1.5 * output_tokens / 1e6
def _oai_messages_to_gemini_messages(self, messages: list[Dict[str, Any]]) -> list[dict[str, Any]]:
"""Convert messages from OAI format to Gemini format.
@@ -376,38 +452,154 @@ def _oai_messages_to_gemini_messages(self, messages: list[Dict[str, Any]]) -> li
"""
prev_role = None
rst = []
- curr_parts = []
+
+ def append_parts(parts, role):
+ if self.use_vertexai:
+ rst.append(VertexAIContent(parts=parts, role=role))
+ else:
+ rst.append(Content(parts=parts, role=role))
+
+ def append_text_to_last(text):
+ if self.use_vertexai:
+ rst[-1] = VertexAIContent(parts=[*rst[-1].parts, VertexAIPart.from_text(text)], role=rst[-1].role)
+ else:
+ rst[-1] = Content(parts=[*rst[-1].parts, Part(text=text)], role=rst[-1].role)
+
+ def is_function_call(parts):
+ return self.use_vertexai and parts[0].function_call or not self.use_vertexai and "function_call" in parts[0]
+
for i, message in enumerate(messages):
- parts = self._oai_content_to_gemini_content(message["content"])
+
+ # Since the tool call message does not have the "name" field, we need to find the corresponding tool message.
+ if message["role"] == "tool":
+ message["name"] = [
+ m["tool_calls"][i]["function"]["name"]
+ for m in messages
+ if "tool_calls" in m
+ for i, tc in enumerate(m["tool_calls"])
+ if tc["id"] == message["tool_call_id"]
+ ][0]
+
+ parts = self._oai_content_to_gemini_content(message)
role = "user" if message["role"] in ["user", "system"] else "model"
- if (prev_role is None) or (role == prev_role):
- curr_parts += parts
- elif role != prev_role:
- if self.use_vertexai:
- rst.append(VertexAIContent(parts=curr_parts, role=prev_role))
- else:
- rst.append(Content(parts=curr_parts, role=prev_role))
- curr_parts = parts
- prev_role = role
- # handle the last message
- if self.use_vertexai:
- rst.append(VertexAIContent(parts=curr_parts, role=role))
- else:
- rst.append(Content(parts=curr_parts, role=role))
+ # In Gemini if the current message is a function call then previous message should not be a model message.
+ if is_function_call(parts):
+ # If the previous message is a model message then add a dummy "continue" user message before the function call
+ if prev_role == "model":
+ append_parts(self._oai_content_to_gemini_content({"content": "continue"}), "user")
+ append_parts(parts, role)
+ # In Gemini if the current message is a function response then next message should be a model message.
+ elif role == "function":
+ append_parts(parts, "function")
+ # If the next message is not a model message then add a dummy "continue" model message after the function response
+ if len(messages) > (i + 1) and messages[i + 1]["role"] in ["user", "system"]:
+ append_parts(self._oai_content_to_gemini_content({"content": "continue"}), "model")
+ # If the role is the same as the previous role and both are text messages then concatenate the text
+ elif role == prev_role:
+ append_text_to_last(parts[0].text)
+ # If this is first message or the role is different from the previous role then append the parts
+ else:
+ # If the previous text message is empty then update the text to "empty" as Gemini does not support empty messages
+ if (
+ (len(rst) > 0)
+ and hasattr(rst[-1].parts[0], "_raw_part")
+ and hasattr(rst[-1].parts[0]._raw_part, "text")
+ and (rst[-1].parts[0]._raw_part.text == "")
+ ):
+ append_text_to_last("empty")
+ append_parts(parts, role)
+
+ prev_role = role
# The Gemini is restrict on order of roles, such that
# 1. The messages should be interleaved between user and model.
# 2. The last message must be from the user role.
# We add a dummy message "continue" if the last role is not the user.
- if rst[-1].role != "user":
+ if rst[-1].role != "user" and rst[-1].role != "function":
if self.use_vertexai:
- rst.append(VertexAIContent(parts=self._oai_content_to_gemini_content("continue"), role="user"))
+ rst.append(
+ VertexAIContent(parts=self._oai_content_to_gemini_content({"content": "continue"}), role="user")
+ )
else:
- rst.append(Content(parts=self._oai_content_to_gemini_content("continue"), role="user"))
-
+ rst.append(Content(parts=self._oai_content_to_gemini_content({"content": "continue"}), role="user"))
return rst
+ def _oai_tools_to_gemini_tools(self, tools: List[Dict[str, Any]]) -> List[Tool]:
+ """Convert tools from OAI format to Gemini format."""
+ if len(tools) == 0:
+ return None
+ function_declarations = []
+ for tool in tools:
+ if self.use_vertexai:
+ function_declaration = VertexAIFunctionDeclaration(
+ name=tool["function"]["name"],
+ description=tool["function"]["description"],
+ parameters=tool["function"]["parameters"],
+ )
+ else:
+ function_declaration = FunctionDeclaration(
+ name=tool["function"]["name"],
+ description=tool["function"]["description"],
+ parameters=self._oai_function_parameters_to_gemini_function_parameters(
+ copy.deepcopy(tool["function"]["parameters"])
+ ),
+ )
+ function_declarations.append(function_declaration)
+ if self.use_vertexai:
+ return [VertexAITool(function_declarations=function_declarations)]
+ else:
+ return [Tool(function_declarations=function_declarations)]
+
+ def _oai_function_parameters_to_gemini_function_parameters(
+ self, function_definition: dict[str, any]
+ ) -> dict[str, any]:
+ """
+ Convert OpenAPI function definition parameters to Gemini function parameters definition.
+ The type key is renamed to type_ and the value is capitalized.
+ """
+ assert "anyOf" not in function_definition, "Union types are not supported for function parameter in Gemini."
+ # Delete the default key as it is not supported in Gemini
+ if "default" in function_definition:
+ del function_definition["default"]
+
+ function_definition["type_"] = function_definition["type"].upper()
+ del function_definition["type"]
+ if "properties" in function_definition:
+ for key in function_definition["properties"]:
+ function_definition["properties"][key] = self._oai_function_parameters_to_gemini_function_parameters(
+ function_definition["properties"][key]
+ )
+ if "items" in function_definition:
+ function_definition["items"] = self._oai_function_parameters_to_gemini_function_parameters(
+ function_definition["items"]
+ )
+ return function_definition
+
+ def _gemini_content_to_oai_choices(self, response: Union[Content, VertexAIContent]) -> List[Choice]:
+ """Convert response from Gemini format to OAI format."""
+ text = None
+ tool_calls = []
+ for part in response.parts:
+ if part.function_call:
+ if self.use_vertexai:
+ arguments = VertexAIPart.to_dict(part)["function_call"]["args"]
+ else:
+ arguments = Part.to_dict(part)["function_call"]["args"]
+ tool_calls.append(
+ ChatCompletionMessageToolCall(
+ id=str(random.randint(0, 1000)),
+ type="function",
+ function=Function(name=part.function_call.name, arguments=json.dumps(arguments)),
+ )
+ )
+ elif part.text:
+ text = part.text
+ message = ChatCompletionMessage(
+ role="assistant", content=text, function_call=None, tool_calls=tool_calls if len(tool_calls) > 0 else None
+ )
+ return [Choice(finish_reason="tool_calls" if tool_calls else "stop", index=0, message=message)]
+
@staticmethod
def _to_vertexai_safety_settings(safety_settings):
"""Convert safety settings to VertexAI format if needed,
@@ -437,6 +629,49 @@ def _to_vertexai_safety_settings(safety_settings):
else:
return safety_settings
+ @staticmethod
+ def _to_vertexai_tool_config(tool_config, tools):
+ """Convert tool config to VertexAI format,
+ like when specifying them in the OAI_CONFIG_LIST
+ """
+ if (
+ isinstance(tool_config, dict)
+ and (len(tool_config) > 0)
+ and all([isinstance(tool_config[tool_config_entry], dict) for tool_config_entry in tool_config])
+ ):
+ if (
+ tool_config["function_calling_config"]["mode"]
+ not in VertexAIToolConfig.FunctionCallingConfig.Mode.__members__
+ ):
+ invalid_mode = tool_config["function_calling_config"]
+ logger.error(f"Function calling mode {invalid_mode} is invalid")
+ return None
+ else:
+ # Currently, there is only function calling config
+ func_calling_config_params = {}
+ func_calling_config_params["mode"] = VertexAIToolConfig.FunctionCallingConfig.Mode[
+ tool_config["function_calling_config"]["mode"]
+ ]
+ if (
+ (func_calling_config_params["mode"] == VertexAIToolConfig.FunctionCallingConfig.Mode.ANY)
+ and (len(tools) > 0)
+ and all(["function_name" in tool for tool in tools])
+ ):
+ # The function names are not yet known when parsing the OAI_CONFIG_LIST
+ func_calling_config_params["allowed_function_names"] = [tool["function_name"] for tool in tools]
+ vertexai_tool_config = VertexAIToolConfig(
+ function_calling_config=VertexAIToolConfig.FunctionCallingConfig(**func_calling_config_params)
+ )
+ return vertexai_tool_config
+ elif isinstance(tool_config, VertexAIToolConfig):
+ return tool_config
+ elif len(tool_config) == 0 and len(tools) == 0:
+ logger.debug("VertexAI tool config is empty!")
+ return None
+ else:
+ logger.error("Invalid VertexAI tool config!")
+ return None
+
def _to_pil(data: str) -> Image.Image:
"""
@@ -470,16 +705,3 @@ def get_image_data(image_file: str, use_b64=True) -> bytes:
return base64.b64encode(content).decode("utf-8")
else:
return content
-
-
-def calculate_gemini_cost(input_tokens: int, output_tokens: int, model_name: str) -> float:
- if "1.5" in model_name or "gemini-experimental" in model_name:
- # "gemini-1.5-pro-preview-0409"
- # Cost is $7 per million input tokens and $21 per million output tokens
- return 7.0 * input_tokens / 1e6 + 21.0 * output_tokens / 1e6
-
- if "gemini-pro" not in model_name and "gemini-1.0-pro" not in model_name:
- warnings.warn(f"Cost calculation is not implemented for model {model_name}. Using Gemini-1.0-Pro.", UserWarning)
-
- # Cost is $0.5 per million input tokens and $1.5 per million output tokens
- return 0.5 * input_tokens / 1e6 + 1.5 * output_tokens / 1e6
diff --git a/autogen/oai/openai_utils.py b/autogen/oai/openai_utils.py
index 3844795c24f..ceb7ef90c93 100644
--- a/autogen/oai/openai_utils.py
+++ b/autogen/oai/openai_utils.py
@@ -21,6 +21,7 @@
"azure_ad_token",
"azure_ad_token_provider",
"credentials",
+ "tool_config",
]
DEFAULT_AZURE_API_VERSION = "2024-02-01"
OAI_PRICE1K = {
diff --git a/notebook/agentchat_MathChat.ipynb b/notebook/agentchat_MathChat.ipynb
index 69c38031b2f..bb6f713eabc 100644
--- a/notebook/agentchat_MathChat.ipynb
+++ b/notebook/agentchat_MathChat.ipynb
@@ -17,7 +17,7 @@
"\n",
"AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation. Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
"\n",
- "MathChat is an experimental conversational framework for math problem solving. In this notebook, we demonstrate how to use MathChat to solve math problems. MathChat uses the `AssistantAgent` and `MathUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `MathUserProxyAgent` implements a different auto reply mechanism corresponding to the MathChat prompts. You can find more details in the paper [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337) or the [blogpost](https://microsoft.github.io/autogen/blog/2023/06/28/MathChat).\n",
+ "MathChat is an experimental conversational framework for math problem solving. In this notebook, we demonstrate how to use MathChat to solve math problems. MathChat uses the `AssistantAgent` and `MathUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `MathUserProxyAgent` implements a different auto reply mechanism corresponding to the MathChat prompts. You can find more details in the paper [An Empirical Study on Challenging Math Problem Solving with GPT-4](https://arxiv.org/abs/2306.01337) or the [blogpost](https://microsoft.github.io/autogen/blog/2023/06/28/MathChat).\n",
"\n",
"````{=mdx}\n",
":::info Requirements\n",
diff --git a/notebook/agentchat_RetrieveChat.ipynb b/notebook/agentchat_RetrieveChat.ipynb
index 6ca2d1ac512..6debb2450df 100644
--- a/notebook/agentchat_RetrieveChat.ipynb
+++ b/notebook/agentchat_RetrieveChat.ipynb
@@ -10,7 +10,7 @@
"AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
"\n",
- "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
+ "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
"\n",
"## Table of Contents\n",
"We'll demonstrate six examples of using RetrieveChat for code generation and question answering:\n",
diff --git a/notebook/agentchat_RetrieveChat_mongodb.ipynb b/notebook/agentchat_RetrieveChat_mongodb.ipynb
index f1f85f65a80..30508ad6c47 100644
--- a/notebook/agentchat_RetrieveChat_mongodb.ipynb
+++ b/notebook/agentchat_RetrieveChat_mongodb.ipynb
@@ -10,7 +10,7 @@
"AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
"\n",
- "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
+ "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
"\n",
"## Table of Contents\n",
"We'll demonstrate six examples of using RetrieveChat for code generation and question answering:\n",
diff --git a/notebook/agentchat_RetrieveChat_pgvector.ipynb b/notebook/agentchat_RetrieveChat_pgvector.ipynb
index 022b1347a2d..18f3c5199e8 100644
--- a/notebook/agentchat_RetrieveChat_pgvector.ipynb
+++ b/notebook/agentchat_RetrieveChat_pgvector.ipynb
@@ -10,7 +10,7 @@
"AutoGen offers conversable agents powered by LLM, tool or human, which can be used to perform tasks collectively via automated chat. This framework allows tool use and human participation through multi-agent conversation.\n",
"Please find documentation about this feature [here](https://microsoft.github.io/autogen/docs/Use-Cases/agent_chat).\n",
"\n",
- "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
+ "RetrieveChat is a conversational system for retrieval-augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_auto_feedback_from_code_execution.ipynb)). Essentially, `RetrieveUserProxyAgent` implement a different auto-reply mechanism corresponding to the RetrieveChat prompts.\n",
"\n",
"## Table of Contents\n",
"We'll demonstrate six examples of using RetrieveChat for code generation and question answering:\n",
diff --git a/notebook/agentchat_RetrieveChat_qdrant.ipynb b/notebook/agentchat_RetrieveChat_qdrant.ipynb
index 9be4cbfe528..e56bc00e4ab 100644
--- a/notebook/agentchat_RetrieveChat_qdrant.ipynb
+++ b/notebook/agentchat_RetrieveChat_qdrant.ipynb
@@ -12,7 +12,7 @@
"This notebook demonstrates the usage of Qdrant for RAG, based on [agentchat_RetrieveChat.ipynb](https://colab.research.google.com/github/microsoft/autogen/blob/main/notebook/agentchat_RetrieveChat.ipynb).\n",
"\n",
"\n",
- "RetrieveChat is a conversational system for retrieve augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/main/notebook/agentchat_auto_feedback_from_code_execution.ipynb)).\n",
+ "RetrieveChat is a conversational system for retrieve augmented code generation and question answering. In this notebook, we demonstrate how to utilize RetrieveChat to generate code and answer questions based on customized documentations that are not present in the LLM's training dataset. RetrieveChat uses the `AssistantAgent` and `RetrieveUserProxyAgent`, which is similar to the usage of `AssistantAgent` and `UserProxyAgent` in other notebooks (e.g., [Automated Task Solving with Code Generation, Execution & Debugging](https://github.com/microsoft/autogen/blob/0.2/notebook/agentchat_auto_feedback_from_code_execution.ipynb)).\n",
"\n",
"We'll demonstrate usage of RetrieveChat with Qdrant for code generation and question answering w/ human feedback.\n",
"\n",
diff --git a/notebook/agentchat_agentoptimizer.ipynb b/notebook/agentchat_agentoptimizer.ipynb
index 7de418b5ee7..ea3a0d88e65 100644
--- a/notebook/agentchat_agentoptimizer.ipynb
+++ b/notebook/agentchat_agentoptimizer.ipynb
@@ -53,7 +53,7 @@
"source": [
"# MathUserProxy with function_call\n",
"\n",
- "This agent is a customized MathUserProxy inherits from its [parent class](https://github.com/microsoft/autogen/blob/main/autogen/agentchat/contrib/math_user_proxy_agent.py).\n",
+ "This agent is a customized MathUserProxy inherits from its [parent class](https://github.com/microsoft/autogen/blob/0.2/autogen/agentchat/contrib/math_user_proxy_agent.py).\n",
"\n",
"It supports using both function_call and python to solve math problems.\n"
]
diff --git a/notebook/agentchat_cost_token_tracking.ipynb b/notebook/agentchat_cost_token_tracking.ipynb
index 17106e7c938..0d456daa46b 100644
--- a/notebook/agentchat_cost_token_tracking.ipynb
+++ b/notebook/agentchat_cost_token_tracking.ipynb
@@ -109,7 +109,7 @@
"]\n",
"```\n",
"\n",
- "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/main/website/docs/topics/llm_configuration.ipynb) for full code examples of the different methods."
+ "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/0.2/website/docs/topics/llm_configuration.ipynb) for full code examples of the different methods."
]
},
{
diff --git a/notebook/agentchat_custom_model.ipynb b/notebook/agentchat_custom_model.ipynb
index 773247ee0b9..364d81517a1 100644
--- a/notebook/agentchat_custom_model.ipynb
+++ b/notebook/agentchat_custom_model.ipynb
@@ -238,7 +238,7 @@
"]\n",
"```\n",
"\n",
- "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/main/notebook/oai_openai_utils.ipynb) for full code examples of the different methods."
+ "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/0.2/notebook/oai_openai_utils.ipynb) for full code examples of the different methods."
]
},
{
diff --git a/notebook/agentchat_function_call.ipynb b/notebook/agentchat_function_call.ipynb
index ff94c0d4fb0..e341aecaead 100644
--- a/notebook/agentchat_function_call.ipynb
+++ b/notebook/agentchat_function_call.ipynb
@@ -104,7 +104,7 @@
"]\n",
"```\n",
"\n",
- "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/main/website/docs/topics/llm_configuration.ipynb) for full code examples of the different methods."
+ "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/0.2/website/docs/topics/llm_configuration.ipynb) for full code examples of the different methods."
]
},
{
diff --git a/notebook/agentchat_function_call_currency_calculator.ipynb b/notebook/agentchat_function_call_currency_calculator.ipynb
index 36ef81d5edb..d6d9e05826f 100644
--- a/notebook/agentchat_function_call_currency_calculator.ipynb
+++ b/notebook/agentchat_function_call_currency_calculator.ipynb
@@ -104,7 +104,7 @@
"]\n",
"```\n",
"\n",
- "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/main/website/docs/topics/llm_configuration.ipynb) for full code examples of the different methods."
+ "You can set the value of config_list in any way you prefer. Please refer to this [notebook](https://github.com/microsoft/autogen/blob/0.2/website/docs/topics/llm_configuration.ipynb) for full code examples of the different methods."
]
},
{
diff --git a/notebook/agentchat_gemini.ipynb b/notebook/agentchat_gemini.ipynb
new file mode 100644
index 00000000000..699d9dc0235
--- /dev/null
+++ b/notebook/agentchat_gemini.ipynb
@@ -0,0 +1,809 @@
+{
+ "cells": [
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "# Assistants with Google Gemini"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "Gemini is family of Generative AI Models built by Google. It support upto 1 Million tokens as of now. Gemini is now natively supported in Autogen.\n",
+ "\n",
+ "This notebook will demonstrate few samples of Autogen with Gemini Models.\n",
+ "\n",
+ "## Requirements\n",
+ "\n",
+ "You must have a [API Key](https://aistudio.google.com/app/apikey) from Google AI.\n",
+ "\n",
+ "## Setup Gemini config list\n",
+ "\n",
+ "The list of all supported Gemini Models along with OpenAI's gtp-4o,\n",
+ "\n",
+ "```python\n",
+ "config_list = [\n",
+ " {\n",
+ " 'model': 'gpt-4o',\n",
+ " 'api_key': '',\n",
+ " 'tags': ['tool', 'gpt-4'],\n",
+ " },\n",
+ " {\n",
+ " 'model': 'gemini-1.5-pro',\n",
+ " 'api_key': '',\n",
+ " 'api_type': 'google',\n",
+ " 'tags': ['tool', 'gemini'],\n",
+ " },\n",
+ " {\n",
+ " 'model': 'gemini-1.5-flash',\n",
+ " 'api_key': '',\n",
+ " 'api_type': 'google',\n",
+ " 'tags': ['tool', 'gemini'],\n",
+ " },\n",
+ " {\n",
+ " 'model': 'gemini-1.0-pro',\n",
+ " 'api_key': '',\n",
+ " 'api_type': 'google',\n",
+ " 'tags': ['gemini'],\n",
+ " }\n",
+ "]\n",
+ "```\n"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Hello World Example\n",
+ "\n",
+ "Our first example will be with a simple `UserProxyAgent` asking a question to an `AssistantAgent`. This is based on the tutorial demo [here](https://microsoft.github.io/autogen/docs/tutorial/introduction).\n",
+ "\n",
+ "After sending the question and seeing a response, you can type `exit` to end the chat or continue to converse."
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 10,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33muser\u001b[0m (to assistant):\n",
+ "\n",
+ "Hi, what is a LLM ?\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33massistant\u001b[0m (to user):\n",
+ "\n",
+ "A large language model (LLM) is a type of artificial intelligence (AI) system that excels at natural language processing (NLP) tasks. These models are trained on massive text datasets, enabling them to understand, interpret, and generate human-like text in response to a wide range of prompts and questions. \n",
+ "\n",
+ "Here are some key characteristics and capabilities of LLMs:\n",
+ "\n",
+ "* **Text Generation:** LLMs can generate coherent and contextually relevant text, including stories, articles, summaries, and conversations.\n",
+ "* **Language Translation:** They can translate text between multiple languages with impressive accuracy.\n",
+ "* **Question Answering:** LLMs can comprehend questions and provide relevant answers based on their training data.\n",
+ "* **Summarization:** They can condense large amounts of text into concise summaries while preserving key information.\n",
+ "* **Sentiment Analysis:** LLMs can analyze text to determine the emotional tone or sentiment expressed.\n",
+ "\n",
+ "Essentially, LLMs are powerful tools that can understand and process human language in a sophisticated manner, opening up a world of possibilities in various fields, such as:\n",
+ "\n",
+ "* **Chatbots and Virtual Assistants:** Providing more natural and engaging conversational experiences.\n",
+ "* **Content Creation:** Automating content generation for articles, marketing materials, and more.\n",
+ "* **Customer Service:** Enhancing support interactions through automated responses and sentiment analysis.\n",
+ "* **Education:** Personalizing learning experiences and providing on-demand tutoring.\n",
+ "\n",
+ "Overall, LLMs represent a significant advancement in AI, enabling machines to communicate and interact with humans more effectively than ever before. As research and development continue, we can expect even more impressive applications of these models in the future.\n",
+ "\n",
+ "TERMINATE\n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ }
+ ],
+ "source": [
+ "import autogen\n",
+ "\n",
+ "config_list = autogen.config_list_from_json(\"OAI_CONFIG_LIST\", filter_dict={\"tags\": [\"gemini\"]})\n",
+ "\n",
+ "llm_config = {\"config_list\": config_list, \"timeout\": 120}\n",
+ "\n",
+ "# Create Assistant and User\n",
+ "assistant = autogen.AssistantAgent(name=\"assistant\", llm_config=llm_config)\n",
+ "\n",
+ "user_proxy = autogen.UserProxyAgent(name=\"user\", code_execution_config=False)\n",
+ "\n",
+ "# Initiate chat from user_proxy side\n",
+ "chat_result = user_proxy.initiate_chat(assistant, message=\"Hi, what is a LLM ?\")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## In a Group Chat with OpenAI\n",
+ "\n",
+ "Here is an example of Gemini participating in a Group Cat with a GPT-4"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 1,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stderr",
+ "output_type": "stream",
+ "text": [
+ "c:\\Users\\arjun\\anaconda3\\envs\\autogen\\Lib\\site-packages\\tqdm\\auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+ " from .autonotebook import tqdm as notebook_tqdm\n"
+ ]
+ },
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n",
+ "\n",
+ "Find a latest paper about gpt-4 on arxiv and find its potential applications in software.\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: User_proxy\n",
+ "\u001b[0m\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: User_proxy\n",
+ "\u001b[0m\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n",
+ "\n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Coder\n",
+ "\u001b[0m\n",
+ "\u001b[33mCoder\u001b[0m (to chat_manager):\n",
+ "\n",
+ "Let's start by searching for the latest paper about GPT-4 on arXiv. We can use the arXiv API to fetch the latest paper related to GPT-4.\n",
+ "\n",
+ "I'll provide a Python script to search arXiv for the latest papers related to \"GPT-4.\"\n",
+ "\n",
+ "```python\n",
+ "import requests\n",
+ "from xml.etree import ElementTree\n",
+ "\n",
+ "# arXiv API url for querying papers related to GPT-4\n",
+ "url = \"http://export.arxiv.org/api/query?search_query=ti:GPT-4&start=0&max_results=1&sortBy=submittedDate&sortOrder=descending\"\n",
+ "\n",
+ "response = requests.get(url)\n",
+ "if response.status_code == 200:\n",
+ " root = ElementTree.fromstring(response.content)\n",
+ " entry = root.find(\"{http://www.w3.org/2005/Atom}entry\")\n",
+ " if entry is not None:\n",
+ " title = entry.find(\"{http://www.w3.org/2005/Atom}title\").text\n",
+ " summary = entry.find(\"{http://www.w3.org/2005/Atom}summary\").text\n",
+ " link = entry.find(\"{http://www.w3.org/2005/Atom}id\").text\n",
+ " \n",
+ " print(f\"Title: {title}\")\n",
+ " print(f\"Summary: {summary}\")\n",
+ " print(f\"Link: {link}\")\n",
+ " else:\n",
+ " print(\"No entries found.\")\n",
+ "else:\n",
+ " print(f\"Failed to fetch data from arXiv API. Status code: {response.status_code}\")\n",
+ "```\n",
+ "\n",
+ "Please execute this script. It will output the title, summary, and link of the latest paper about \"GPT-4\" on arXiv. After obtaining the relevant paper, I will analyze its potential applications in software.\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: User_proxy\n",
+ "\u001b[0m\n",
+ "\u001b[31m\n",
+ ">>>>>>>> USING AUTO REPLY...\u001b[0m\n",
+ "\u001b[31m\n",
+ ">>>>>>>> EXECUTING CODE BLOCK 0 (inferred language is python)...\u001b[0m\n",
+ "\u001b[33mUser_proxy\u001b[0m (to chat_manager):\n",
+ "\n",
+ "exitcode: 0 (execution succeeded)\n",
+ "Code output: \n",
+ "Title: I See You: Teacher Analytics with GPT-4 Vision-Powered Observational\n",
+ " Assessment\n",
+ "Summary: This preliminary study explores the integration of GPT-4 Vision (GPT-4V)\n",
+ "technology into teacher analytics, focusing on its applicability in\n",
+ "observational assessment to enhance reflective teaching practice. This research\n",
+ "is grounded in developing a Video-based Automatic Assessment System (VidAAS)\n",
+ "empowered by GPT-4V. Our approach aims to revolutionize teachers' assessment of\n",
+ "students' practices by leveraging Generative Artificial Intelligence (GenAI) to\n",
+ "offer detailed insights into classroom dynamics. Our research methodology\n",
+ "encompasses a comprehensive literature review, prototype development of the\n",
+ "VidAAS, and usability testing with in-service teachers. The study findings\n",
+ "provide future research avenues for VidAAS design, implementation, and\n",
+ "integration in teacher analytics, underscoring the potential of GPT-4V to\n",
+ "provide real-time, scalable feedback and a deeper understanding of the\n",
+ "classroom.\n",
+ "\n",
+ "Link: http://arxiv.org/abs/2405.18623v2\n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Product_manager\n",
+ "\u001b[0m\n",
+ "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n",
+ "\n",
+ "This is exciting! The paper you found, \"I See You: Teacher Analytics with GPT-4 Vision-Powered Observational Assessment\", explores an innovative application of GPT-4 Vision. Let's break down the potential software applications based on this:\n",
+ "\n",
+ "**Software Applications Inspired by \"I See You\":**\n",
+ "\n",
+ "* **Video-Based Educational Analytics Platforms:** This paper lays the groundwork for a new breed of educational software. Imagine platforms that analyze classroom recordings to provide automated feedback to teachers. These platforms could:\n",
+ " * **Track student engagement:** Identify students who seem disengaged or confused.\n",
+ " * **Analyze classroom interactions:** Quantify the quality and nature of teacher-student interactions.\n",
+ " * **Assess student understanding:** Potentially even gauge student comprehension through facial expressions and body language.\n",
+ "* **Real-time Teacher Assistance Tools:** GPT-4 Vision could power real-time feedback tools for teachers during live lessons. Imagine:\n",
+ " * **Subtle alerts:** Discretely notifying a teacher if a student appears to be struggling.\n",
+ " * **Personalized suggestions:** Providing on-the-fly recommendations for teaching strategies based on real-time classroom dynamics. \n",
+ "* **Teacher Training and Professional Development:** \n",
+ " * **Simulation Training:** GPT-4 Vision could create realistic virtual classroom simulations for teacher training, allowing educators to practice techniques and receive AI-powered feedback.\n",
+ " * **Reflective Practice:** Video analysis tools could help teachers reflect on their own teaching styles and identify areas for improvement. \n",
+ "\n",
+ "**Beyond Education:**\n",
+ "\n",
+ "While this paper focuses on education, the core technology has broader implications:\n",
+ "\n",
+ "* **Meeting Analytics:** Imagine software that analyzes video conferences to track engagement, identify key discussion points, or even assess team dynamics.\n",
+ "* **Healthcare Training:** Similar applications could revolutionize healthcare training by providing automated feedback during simulated patient interactions. \n",
+ "* **Retail Analytics:** GPT-4 Vision could analyze customer behavior in retail environments, providing insights into product placement, customer service interactions, and more.\n",
+ "\n",
+ "**Challenges and Ethical Considerations:**\n",
+ "\n",
+ "* **Bias in AI:** Ensuring that the AI models are trained on diverse datasets to avoid perpetuating existing biases in education or other fields. \n",
+ "* **Privacy Concerns:** Video analysis raises significant privacy concerns. Clear guidelines and user consent are essential.\n",
+ "* **Teacher Autonomy:** It's crucial to design these systems as tools to *augment* teacher expertise, not replace it.\n",
+ "\n",
+ "**In conclusion,** the paper you found highlights the immense potential of GPT-4 Vision to revolutionize software in education and beyond. It's an exciting area of exploration with the potential to create powerful tools for learning, analysis, and understanding human behavior. \n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Coder\n",
+ "\u001b[0m\n",
+ "\u001b[33mCoder\u001b[0m (to chat_manager):\n",
+ "\n",
+ "The paper you found titled \"I See You: Teacher Analytics with GPT-4 Vision-Powered Observational Assessment\" outlines a fascinating and innovative application of GPT-4 Vision technology. Here are potential software applications based on this research:\n",
+ "\n",
+ "### 1. Video-Based Educational Analytics Platforms\n",
+ "These platforms can analyze classroom recordings to automate feedback for teachers. They could:\n",
+ "- **Track Student Engagement:** Identify students who seem disengaged or confused.\n",
+ "- **Analyze Teacher-Student Interactions:** Quantify the quality and nature of interactions.\n",
+ "- **Assess Understanding:** Gauge student comprehension through facial expressions and body language.\n",
+ "\n",
+ "### 2. Real-Time Teacher Assistance Tools\n",
+ "GPT-4 Vision could power tools that provide real-time feedback during live lessons by:\n",
+ "- **Alerting Teachers Subtly:** Notifying if a student is struggling.\n",
+ "- **Offering Personalized Suggestions:** Providing on-the-fly recommendations for teaching strategies.\n",
+ "\n",
+ "### 3. Teacher Training and Professional Development\n",
+ "- **Simulation Training:** Enable realistic virtual classroom simulations for practice and AI-powered feedback.\n",
+ "- **Reflective Practice:** Allow teachers to reflect on their own teaching and identify areas for improvement through video analysis tools.\n",
+ "\n",
+ "### Applications Beyond Education\n",
+ "While this paper focuses on education, the underlying technology has broader implications across various domains:\n",
+ "#### Meeting Analytics\n",
+ "Software could analyze video conferences to:\n",
+ "- Track engagement.\n",
+ "- Identify key discussion points.\n",
+ "- Assess team dynamics.\n",
+ "\n",
+ "#### Healthcare Training\n",
+ "Tools could provide automated feedback during simulated patient interactions.\n",
+ "\n",
+ "#### Retail Analytics\n",
+ "GPT-4 Vision could analyze customer behavior in retail environments, offering insights into:\n",
+ "- Product placements.\n",
+ "- Customer service interactions.\n",
+ "- Sales strategies.\n",
+ "\n",
+ "### Challenges and Ethical Considerations\n",
+ "- **Bias in AI:** Ensuring diverse datasets to avoid perpetuating biases.\n",
+ "- **Privacy Concerns:** Addressing significant privacy concerns with clear guidelines and user consent.\n",
+ "- **Teacher Autonomy:** Designing systems to augment rather than replace teacher expertise.\n",
+ "\n",
+ "**Conclusion:**\n",
+ "The \"I See You\" paper exemplifies the immense potential of GPT-4 Vision technology to innovate educational software and beyond. By addressing challenges and ethical considerations, we can harness this technology to create powerful tools for learning, analysis, and human behavior understanding.\n",
+ "\n",
+ "TERMINATE\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Product_manager\n",
+ "\u001b[0m\n",
+ "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n",
+ "\n",
+ "That's an excellent summary and expansion of the potential applications! You've clearly grasped the key points of the paper and extrapolated them into a range of innovative software ideas. \n",
+ "\n",
+ "I especially appreciate your inclusion of:\n",
+ "\n",
+ "* **Specific examples** within each application category (like tracking engagement in meeting analytics or analyzing customer service interactions in retail). \n",
+ "* **The broader applications** beyond education, demonstrating the wide-reaching impact of this technology.\n",
+ "* **The critical emphasis on challenges and ethical considerations**, which are essential to responsible development and deployment of such powerful AI systems. \n",
+ "\n",
+ "This kind of analysis is crucial for turning research like the \"I See You\" paper into real-world solutions that can benefit various industries. You've highlighted the exciting possibilities and important considerations for the future of AI-powered software! \n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Product_manager\n",
+ "\u001b[0m\n",
+ "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n",
+ "\n",
+ "You're right! We've just scratched the surface. Let's dive deeper into some specific software product ideas and how we might overcome the challenges:\n",
+ "\n",
+ "**1. \"Classroom Insights\" for Education Analytics**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Engagement Heatmap:** Visualize student engagement over time, highlighting moments of high and low participation. \n",
+ " * **Interaction Analysis:** Quantify teacher-student talk time, question types, and wait time for responses.\n",
+ " * **Sentiment Detection:** (With appropriate ethical safeguards) gauge general classroom sentiment (positive, negative, neutral) at different points during the lesson. \n",
+ " * **Personalized Recommendations:** Provide teachers with tailored suggestions for improving engagement, questioning techniques, or addressing individual student needs. \n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Bias Mitigation:** Train the AI model on diverse classroom settings and demographics, and allow for manual adjustments based on teacher feedback. \n",
+ " * **Privacy:** Implement strict data anonymization, secure storage, and clear consent procedures for both teachers and students (or parents/guardians).\n",
+ " * **Teacher Autonomy:** Emphasize that the tool provides insights, not judgments. Allow teachers to customize the feedback and focus areas.\n",
+ "\n",
+ "**2. \"Simulate Teach\" for Teacher Training**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Virtual Classrooms:** Create realistic virtual classroom environments with diverse student avatars exhibiting different behaviors and learning styles. \n",
+ " * **Scenario-Based Training:** Present trainees with various teaching challenges (e.g., classroom management, differentiated instruction) to practice in a safe space.\n",
+ " * **Real-Time Feedback:** Provide immediate AI-powered feedback on the trainee's teaching strategies, body language, and classroom management techniques. \n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Realism:** Continuously improve the virtual students' AI to respond more naturally and authentically to the trainee's actions.\n",
+ " * **Accessibility:** Design the software to be usable across various devices and internet speeds to benefit trainees in diverse locations.\n",
+ " * **Mentorship Integration:** Combine the AI feedback with opportunities for reflection and discussion with experienced mentors.\n",
+ "\n",
+ "**3. \"Meeting Insights Pro\" for Business**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Engagement Timeline:** Track attendee engagement levels throughout the meeting, identifying moments of high and low interest.\n",
+ " * **Action Item Detection:** Automatically extract key decisions, action items, and assigned owners from meeting transcripts.\n",
+ " * **Sentiment & Tone Analysis:** (With appropriate ethical considerations) analyze the overall sentiment and communication style within the team. \n",
+ " * **Meeting Efficiency Recommendations:** Provide data-driven suggestions for improving meeting structure, duration, and facilitation techniques.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Privacy:** Ensure robust data security, user consent, and clear communication about how the data is used.\n",
+ " * **Bias in Tone Analysis:** Train on diverse communication styles and cultural contexts to avoid misinterpretations of tone. \n",
+ " * **Human Oversight:** Allow for manual review and adjustments to AI-generated insights to account for nuances in communication and context. \n",
+ "\n",
+ "These are just a few examples. The potential applications of GPT-4 Vision in software are vast, spanning industries and use cases. By thoughtfully addressing the ethical and practical challenges, we can create transformative tools that empower educators, professionals, and individuals. \n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Product_manager\n",
+ "\u001b[0m\n",
+ "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n",
+ "\n",
+ "You're right, let's continue exploring! Beyond those initial ideas, here are some more speculative, but potentially high-impact applications:\n",
+ "\n",
+ "**4. \"CodeMentor\" for Personalized Programming Education**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Code Analysis & Feedback:** GPT-4 Vision analyzes code in real-time, identifying errors, suggesting improvements, and explaining concepts visually. \n",
+ " * **Personalized Learning Paths:** The software adapts to the learner's pace, style, and identified knowledge gaps to create a customized curriculum.\n",
+ " * **Virtual Debugging Assistant:** GPT-4 Vision \"looks\" at the code alongside the learner, anticipating common errors and providing interactive debugging guidance.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Complexity of Programming:** Training the AI on a vast dataset of code, programming paradigms, and best practices would be crucial.\n",
+ " * **Pedagogical Effectiveness:** Integrating proven teaching methods and ensuring the AI's feedback aligns with sound learning principles.\n",
+ " * **Avoiding Over-Reliance:** Encouraging problem-solving skills and independent thinking alongside AI assistance.\n",
+ "\n",
+ "**5. \"DesignSpark\" for Collaborative Creative Work**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Visual Brainstorming:** GPT-4 Vision assists teams in brainstorming by generating images, mockups, and design variations based on keywords, sketches, or mood boards.\n",
+ " * **Real-time Feedback & Iteration:** AI provides instant feedback on design elements, suggesting improvements to composition, color, and typography. \n",
+ " * **Cross-Cultural Design:** GPT-4 Vision analyzes design trends and preferences across different cultures, helping teams create globally resonant visuals.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Subjectivity in Design:** Balancing objective design principles with the subjective nature of aesthetics and creativity.\n",
+ " * **Copyright & Ownership:** Establishing clear guidelines for the use and ownership of AI-generated design elements. \n",
+ " * **Preserving Human Creativity:** Ensuring that AI augments, not replaces, the unique vision and skills of human designers.\n",
+ "\n",
+ "**6. \"Accessible World\" for Assistive Technology**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Real-Time Object Recognition:** For visually impaired individuals, the software provides audio descriptions of objects, scenes, and text in their environment.\n",
+ " * **Sign Language Translation:** GPT-4 Vision translates sign language into text or speech in real-time, facilitating communication.\n",
+ " * **Personalized Sensory Feedback:** The software creates customized sensory experiences (e.g., haptic feedback, light patterns) for individuals with diverse sensory processing needs.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Accuracy and Reliability:** Ensuring the AI's interpretations are accurate and dependable in real-world situations.\n",
+ " * **Privacy and Dignity:** Designing the technology in a way that respects user privacy and avoids stigmatization.\n",
+ " * **Affordability & Accessibility:** Making the technology accessible to a wide range of users, regardless of their financial or technical resources. \n",
+ "\n",
+ "These expanded examples highlight the potential of GPT-4 Vision to not only automate tasks but also to enhance human capabilities, foster creativity, and promote inclusivity. By approaching these innovations with a focus on ethical considerations and human-centered design, we can unlock incredible opportunities for positive change. \n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Product_manager\n",
+ "\u001b[0m\n",
+ "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n",
+ "\n",
+ "You're right, there's so much more to explore! Let's venture into some even more futuristic and ambitious applications, pushing the boundaries of what we currently imagine possible:\n",
+ "\n",
+ "**7. \"Empathic Tech\" for Mental Health Support:**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Emotionally Aware Virtual Companions:** GPT-4 Vision powers AI companions that can recognize and respond to human emotions expressed through facial expressions, body language, and vocal tone.\n",
+ " * **Personalized Mental Health Exercises:** The software provides tailored mindfulness practices, breathing exercises, or cognitive-behavioral therapy (CBT) techniques based on real-time emotional cues.\n",
+ " * **Early Warning System:** GPT-4 Vision analyzes patterns in user behavior and emotional expression to identify potential signs of mental health challenges and connect them with appropriate resources.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Ethical Considerations:** Ensuring user privacy, data security, and responsible use of sensitive health information is paramount.\n",
+ " * **Accuracy and Sensitivity:** Training AI to accurately interpret complex human emotions and respond with empathy and cultural sensitivity is a significant challenge.\n",
+ " * **Human Connection:** Emphasizing that technology should complement, not replace, professional mental health care and human connection.\n",
+ "\n",
+ "**8. \"EcoVision\" for Environmental Monitoring and Conservation:**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Real-Time Environmental Analysis:** GPT-4 Vision analyzes images and videos from drones, satellites, or ground-based cameras to monitor deforestation, pollution levels, wildlife populations, and other environmental factors.\n",
+ " * **Predictive Modeling for Conservation:** The software uses AI to predict environmental changes, identify areas at risk, and inform conservation efforts.\n",
+ " * **Citizen Science Platform:** EcoVision empowers individuals to contribute to environmental monitoring by uploading images and observations that the AI can analyze and integrate into its models.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Data Accessibility and Accuracy:** Ensuring access to high-quality, diverse environmental data from various sources is crucial.\n",
+ " * **Bias in Data and Algorithms:** Mitigating potential biases in data collection and algorithm design to avoid skewed environmental assessments.\n",
+ " * **Collaboration and Action:** Translating AI insights into concrete actions by fostering collaboration between scientists, policymakers, and local communities.\n",
+ "\n",
+ "**9. \"HistoryLens\" for Immersive Historical Experiences:**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Interactive Historical Reenactments:** GPT-4 Vision recreates historical events, figures, and locations in immersive virtual reality or augmented reality experiences.\n",
+ " * **Personalized Learning Journeys:** The software tailors historical narratives and perspectives based on user interests and background, providing a deeper understanding of the past.\n",
+ " * **Preservation and Accessibility:** HistoryLens digitally preserves historical artifacts, documents, and oral histories, making them accessible to a wider audience.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Historical Accuracy and Bias:** Ensuring historical representations are accurate, nuanced, and avoid perpetuating biases or historical revisionism. \n",
+ " * **Ethical Considerations of Representation:** Carefully navigating the ethical complexities of recreating sensitive historical events or representing marginalized communities. \n",
+ " * **Balancing Entertainment and Education:** Creating engaging experiences that also foster critical thinking and historical understanding.\n",
+ "\n",
+ "These futuristic applications highlight the immense potential of GPT-4 Vision to not only solve practical problems but also enhance our understanding of ourselves, our planet, and our history. As we continue to develop this technology, it is essential to proceed with thoughtful consideration for its ethical implications and societal impact, ensuring that it is used to benefit humanity and create a more just and sustainable future. \n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[32m\n",
+ "Next speaker: Product_manager\n",
+ "\u001b[0m\n",
+ "\u001b[33mProduct_manager\u001b[0m (to chat_manager):\n",
+ "\n",
+ "You're right, there's always more to imagine! Let's keep pushing the boundaries and explore some applications that sound like they're straight out of science fiction:\n",
+ "\n",
+ "**10. \"Dream Weaver\" for Artistic Co-Creation:**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Concept Visualization:** Users can input textual descriptions, rough sketches, or even their emotions, and GPT-4 Vision generates stunning visuals, musical compositions, or even short films that capture the essence of their ideas. \n",
+ " * **Style Transfer & Remixing:** The software enables artists to blend different artistic styles, seamlessly merging the realism of a photograph with the brushstrokes of Van Gogh or the abstract patterns of Kandinsky.\n",
+ " * **Interactive Storytelling:** GPT-4 Vision becomes a collaborative partner in storytelling, generating dynamic environments, characters, and plot twists in response to user input, blurring the lines between audience and creator.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Defining Creativity:** Exploring the philosophical and technical boundaries of AI creativity and ensuring it complements, not replaces, human artistic expression.\n",
+ " * **Copyright and Authorship:** Establishing clear guidelines for ownership and attribution when AI contributes significantly to the creative process. \n",
+ " * **Accessibility and Democratization:** Making these powerful creative tools accessible to a wide audience, fostering a more inclusive and imaginative future for art.\n",
+ "\n",
+ "\n",
+ "**11. \"Universal Translator\" for Real-Time Cross-Cultural Communication:**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Seamless Language Translation:** GPT-4 Vision goes beyond text, translating spoken language in real-time while accounting for nuances in tone, dialect, and cultural context.\n",
+ " * **Nonverbal Communication Interpretation:** The software analyzes facial expressions, gestures, and body language to bridge cultural differences in nonverbal communication, fostering greater understanding.\n",
+ " * **Cultural Sensitivity Guidance:** GPT-4 Vision provides users with real-time insights into cultural norms and customs, helping to avoid misunderstandings and promote respectful interactions.\n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Linguistic Complexity and Nuance:** Accurately translating the full richness and complexity of human language, including idioms, slang, and cultural references, remains a significant hurdle.\n",
+ " * **Bias and Stereotyping:** Ensuring the AI avoids perpetuating cultural biases or stereotypes in its translations and interpretations is crucial.\n",
+ " * **Preserving Linguistic Diversity:** Promoting language learning and cultural exchange while using technology to bridge communication gaps is essential. \n",
+ "\n",
+ "**12. \"Guardian AI\" for Personalized Safety and Well-being:**\n",
+ "\n",
+ "* **Features:**\n",
+ " * **Predictive Risk Assessment:** GPT-4 Vision analyzes real-time data from personal devices, social networks, and public safety feeds to identify potential risks or threats to an individual's safety and well-being. \n",
+ " * **Proactive Safety Measures:** The AI can alert users to potential dangers, suggest alternative routes, or even contact emergency services in critical situations. \n",
+ " * **Personalized Health Monitoring:** GPT-4 Vision monitors an individual's physical and mental health through wearable sensors and behavioral patterns, providing early warnings of potential health issues and suggesting preventative measures. \n",
+ "\n",
+ "* **Addressing Challenges:**\n",
+ " * **Privacy vs. Security:** Striking a delicate balance between protecting individual privacy and utilizing data to enhance safety.\n",
+ " * **Bias and Discrimination:** Ensuring the AI does not perpetuate existing societal biases or discriminate against individuals based on factors like race, gender, or socioeconomic status.\n",
+ " * **Over-Reliance and Autonomy:** Avoiding over-dependence on AI for safety and encouraging individuals to maintain their own situational awareness and decision-making abilities. \n",
+ "\n",
+ "These futuristic applications highlight the potential of GPT-4 Vision to not only solve problems but also enhance human connection, understanding, and well-being. As we develop such powerful AI, responsible innovation, ethical considerations, and a focus on human-centered design will be paramount in shaping a future where technology empowers and uplifts all of humanity. \n",
+ "\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "ChatResult(chat_id=None, chat_history=[{'content': 'Find a latest paper about gpt-4 on arxiv and find its potential applications in software.', 'role': 'assistant'}, {'content': '', 'role': 'assistant'}, {'content': '', 'role': 'assistant'}, {'content': 'Let\\'s start by searching for the latest paper about GPT-4 on arXiv. We can use the arXiv API to fetch the latest paper related to GPT-4.\\n\\nI\\'ll provide a Python script to search arXiv for the latest papers related to \"GPT-4.\"\\n\\n```python\\nimport requests\\nfrom xml.etree import ElementTree\\n\\n# arXiv API url for querying papers related to GPT-4\\nurl = \"http://export.arxiv.org/api/query?search_query=ti:GPT-4&start=0&max_results=1&sortBy=submittedDate&sortOrder=descending\"\\n\\nresponse = requests.get(url)\\nif response.status_code == 200:\\n root = ElementTree.fromstring(response.content)\\n entry = root.find(\"{http://www.w3.org/2005/Atom}entry\")\\n if entry is not None:\\n title = entry.find(\"{http://www.w3.org/2005/Atom}title\").text\\n summary = entry.find(\"{http://www.w3.org/2005/Atom}summary\").text\\n link = entry.find(\"{http://www.w3.org/2005/Atom}id\").text\\n \\n print(f\"Title: {title}\")\\n print(f\"Summary: {summary}\")\\n print(f\"Link: {link}\")\\n else:\\n print(\"No entries found.\")\\nelse:\\n print(f\"Failed to fetch data from arXiv API. Status code: {response.status_code}\")\\n```\\n\\nPlease execute this script. It will output the title, summary, and link of the latest paper about \"GPT-4\" on arXiv. After obtaining the relevant paper, I will analyze its potential applications in software.', 'name': 'Coder', 'role': 'user'}, {'content': \"exitcode: 0 (execution succeeded)\\nCode output: \\nTitle: I See You: Teacher Analytics with GPT-4 Vision-Powered Observational\\n Assessment\\nSummary: This preliminary study explores the integration of GPT-4 Vision (GPT-4V)\\ntechnology into teacher analytics, focusing on its applicability in\\nobservational assessment to enhance reflective teaching practice. This research\\nis grounded in developing a Video-based Automatic Assessment System (VidAAS)\\nempowered by GPT-4V. Our approach aims to revolutionize teachers' assessment of\\nstudents' practices by leveraging Generative Artificial Intelligence (GenAI) to\\noffer detailed insights into classroom dynamics. Our research methodology\\nencompasses a comprehensive literature review, prototype development of the\\nVidAAS, and usability testing with in-service teachers. The study findings\\nprovide future research avenues for VidAAS design, implementation, and\\nintegration in teacher analytics, underscoring the potential of GPT-4V to\\nprovide real-time, scalable feedback and a deeper understanding of the\\nclassroom.\\n\\nLink: http://arxiv.org/abs/2405.18623v2\\n\", 'role': 'assistant'}, {'content': 'This is exciting! The paper you found, \"I See You: Teacher Analytics with GPT-4 Vision-Powered Observational Assessment\", explores an innovative application of GPT-4 Vision. Let\\'s break down the potential software applications based on this:\\n\\n**Software Applications Inspired by \"I See You\":**\\n\\n* **Video-Based Educational Analytics Platforms:** This paper lays the groundwork for a new breed of educational software. Imagine platforms that analyze classroom recordings to provide automated feedback to teachers. These platforms could:\\n * **Track student engagement:** Identify students who seem disengaged or confused.\\n * **Analyze classroom interactions:** Quantify the quality and nature of teacher-student interactions.\\n * **Assess student understanding:** Potentially even gauge student comprehension through facial expressions and body language.\\n* **Real-time Teacher Assistance Tools:** GPT-4 Vision could power real-time feedback tools for teachers during live lessons. Imagine:\\n * **Subtle alerts:** Discretely notifying a teacher if a student appears to be struggling.\\n * **Personalized suggestions:** Providing on-the-fly recommendations for teaching strategies based on real-time classroom dynamics. \\n* **Teacher Training and Professional Development:** \\n * **Simulation Training:** GPT-4 Vision could create realistic virtual classroom simulations for teacher training, allowing educators to practice techniques and receive AI-powered feedback.\\n * **Reflective Practice:** Video analysis tools could help teachers reflect on their own teaching styles and identify areas for improvement. \\n\\n**Beyond Education:**\\n\\nWhile this paper focuses on education, the core technology has broader implications:\\n\\n* **Meeting Analytics:** Imagine software that analyzes video conferences to track engagement, identify key discussion points, or even assess team dynamics.\\n* **Healthcare Training:** Similar applications could revolutionize healthcare training by providing automated feedback during simulated patient interactions. \\n* **Retail Analytics:** GPT-4 Vision could analyze customer behavior in retail environments, providing insights into product placement, customer service interactions, and more.\\n\\n**Challenges and Ethical Considerations:**\\n\\n* **Bias in AI:** Ensuring that the AI models are trained on diverse datasets to avoid perpetuating existing biases in education or other fields. \\n* **Privacy Concerns:** Video analysis raises significant privacy concerns. Clear guidelines and user consent are essential.\\n* **Teacher Autonomy:** It\\'s crucial to design these systems as tools to *augment* teacher expertise, not replace it.\\n\\n**In conclusion,** the paper you found highlights the immense potential of GPT-4 Vision to revolutionize software in education and beyond. It\\'s an exciting area of exploration with the potential to create powerful tools for learning, analysis, and understanding human behavior. \\n', 'name': 'Product_manager', 'role': 'user'}, {'content': 'The paper you found titled \"I See You: Teacher Analytics with GPT-4 Vision-Powered Observational Assessment\" outlines a fascinating and innovative application of GPT-4 Vision technology. Here are potential software applications based on this research:\\n\\n### 1. Video-Based Educational Analytics Platforms\\nThese platforms can analyze classroom recordings to automate feedback for teachers. They could:\\n- **Track Student Engagement:** Identify students who seem disengaged or confused.\\n- **Analyze Teacher-Student Interactions:** Quantify the quality and nature of interactions.\\n- **Assess Understanding:** Gauge student comprehension through facial expressions and body language.\\n\\n### 2. Real-Time Teacher Assistance Tools\\nGPT-4 Vision could power tools that provide real-time feedback during live lessons by:\\n- **Alerting Teachers Subtly:** Notifying if a student is struggling.\\n- **Offering Personalized Suggestions:** Providing on-the-fly recommendations for teaching strategies.\\n\\n### 3. Teacher Training and Professional Development\\n- **Simulation Training:** Enable realistic virtual classroom simulations for practice and AI-powered feedback.\\n- **Reflective Practice:** Allow teachers to reflect on their own teaching and identify areas for improvement through video analysis tools.\\n\\n### Applications Beyond Education\\nWhile this paper focuses on education, the underlying technology has broader implications across various domains:\\n#### Meeting Analytics\\nSoftware could analyze video conferences to:\\n- Track engagement.\\n- Identify key discussion points.\\n- Assess team dynamics.\\n\\n#### Healthcare Training\\nTools could provide automated feedback during simulated patient interactions.\\n\\n#### Retail Analytics\\nGPT-4 Vision could analyze customer behavior in retail environments, offering insights into:\\n- Product placements.\\n- Customer service interactions.\\n- Sales strategies.\\n\\n### Challenges and Ethical Considerations\\n- **Bias in AI:** Ensuring diverse datasets to avoid perpetuating biases.\\n- **Privacy Concerns:** Addressing significant privacy concerns with clear guidelines and user consent.\\n- **Teacher Autonomy:** Designing systems to augment rather than replace teacher expertise.\\n\\n**Conclusion:**\\nThe \"I See You\" paper exemplifies the immense potential of GPT-4 Vision technology to innovate educational software and beyond. By addressing challenges and ethical considerations, we can harness this technology to create powerful tools for learning, analysis, and human behavior understanding.\\n\\nTERMINATE', 'name': 'Coder', 'role': 'user'}, {'content': 'That\\'s an excellent summary and expansion of the potential applications! You\\'ve clearly grasped the key points of the paper and extrapolated them into a range of innovative software ideas. \\n\\nI especially appreciate your inclusion of:\\n\\n* **Specific examples** within each application category (like tracking engagement in meeting analytics or analyzing customer service interactions in retail). \\n* **The broader applications** beyond education, demonstrating the wide-reaching impact of this technology.\\n* **The critical emphasis on challenges and ethical considerations**, which are essential to responsible development and deployment of such powerful AI systems. \\n\\nThis kind of analysis is crucial for turning research like the \"I See You\" paper into real-world solutions that can benefit various industries. You\\'ve highlighted the exciting possibilities and important considerations for the future of AI-powered software! \\n', 'name': 'Product_manager', 'role': 'user'}, {'content': 'You\\'re right! We\\'ve just scratched the surface. Let\\'s dive deeper into some specific software product ideas and how we might overcome the challenges:\\n\\n**1. \"Classroom Insights\" for Education Analytics**\\n\\n* **Features:**\\n * **Engagement Heatmap:** Visualize student engagement over time, highlighting moments of high and low participation. \\n * **Interaction Analysis:** Quantify teacher-student talk time, question types, and wait time for responses.\\n * **Sentiment Detection:** (With appropriate ethical safeguards) gauge general classroom sentiment (positive, negative, neutral) at different points during the lesson. \\n * **Personalized Recommendations:** Provide teachers with tailored suggestions for improving engagement, questioning techniques, or addressing individual student needs. \\n\\n* **Addressing Challenges:**\\n * **Bias Mitigation:** Train the AI model on diverse classroom settings and demographics, and allow for manual adjustments based on teacher feedback. \\n * **Privacy:** Implement strict data anonymization, secure storage, and clear consent procedures for both teachers and students (or parents/guardians).\\n * **Teacher Autonomy:** Emphasize that the tool provides insights, not judgments. Allow teachers to customize the feedback and focus areas.\\n\\n**2. \"Simulate Teach\" for Teacher Training**\\n\\n* **Features:**\\n * **Virtual Classrooms:** Create realistic virtual classroom environments with diverse student avatars exhibiting different behaviors and learning styles. \\n * **Scenario-Based Training:** Present trainees with various teaching challenges (e.g., classroom management, differentiated instruction) to practice in a safe space.\\n * **Real-Time Feedback:** Provide immediate AI-powered feedback on the trainee\\'s teaching strategies, body language, and classroom management techniques. \\n\\n* **Addressing Challenges:**\\n * **Realism:** Continuously improve the virtual students\\' AI to respond more naturally and authentically to the trainee\\'s actions.\\n * **Accessibility:** Design the software to be usable across various devices and internet speeds to benefit trainees in diverse locations.\\n * **Mentorship Integration:** Combine the AI feedback with opportunities for reflection and discussion with experienced mentors.\\n\\n**3. \"Meeting Insights Pro\" for Business**\\n\\n* **Features:**\\n * **Engagement Timeline:** Track attendee engagement levels throughout the meeting, identifying moments of high and low interest.\\n * **Action Item Detection:** Automatically extract key decisions, action items, and assigned owners from meeting transcripts.\\n * **Sentiment & Tone Analysis:** (With appropriate ethical considerations) analyze the overall sentiment and communication style within the team. \\n * **Meeting Efficiency Recommendations:** Provide data-driven suggestions for improving meeting structure, duration, and facilitation techniques.\\n\\n* **Addressing Challenges:**\\n * **Privacy:** Ensure robust data security, user consent, and clear communication about how the data is used.\\n * **Bias in Tone Analysis:** Train on diverse communication styles and cultural contexts to avoid misinterpretations of tone. \\n * **Human Oversight:** Allow for manual review and adjustments to AI-generated insights to account for nuances in communication and context. \\n\\nThese are just a few examples. The potential applications of GPT-4 Vision in software are vast, spanning industries and use cases. By thoughtfully addressing the ethical and practical challenges, we can create transformative tools that empower educators, professionals, and individuals. \\n', 'name': 'Product_manager', 'role': 'user'}, {'content': 'You\\'re right, let\\'s continue exploring! Beyond those initial ideas, here are some more speculative, but potentially high-impact applications:\\n\\n**4. \"CodeMentor\" for Personalized Programming Education**\\n\\n* **Features:**\\n * **Code Analysis & Feedback:** GPT-4 Vision analyzes code in real-time, identifying errors, suggesting improvements, and explaining concepts visually. \\n * **Personalized Learning Paths:** The software adapts to the learner\\'s pace, style, and identified knowledge gaps to create a customized curriculum.\\n * **Virtual Debugging Assistant:** GPT-4 Vision \"looks\" at the code alongside the learner, anticipating common errors and providing interactive debugging guidance.\\n\\n* **Addressing Challenges:**\\n * **Complexity of Programming:** Training the AI on a vast dataset of code, programming paradigms, and best practices would be crucial.\\n * **Pedagogical Effectiveness:** Integrating proven teaching methods and ensuring the AI\\'s feedback aligns with sound learning principles.\\n * **Avoiding Over-Reliance:** Encouraging problem-solving skills and independent thinking alongside AI assistance.\\n\\n**5. \"DesignSpark\" for Collaborative Creative Work**\\n\\n* **Features:**\\n * **Visual Brainstorming:** GPT-4 Vision assists teams in brainstorming by generating images, mockups, and design variations based on keywords, sketches, or mood boards.\\n * **Real-time Feedback & Iteration:** AI provides instant feedback on design elements, suggesting improvements to composition, color, and typography. \\n * **Cross-Cultural Design:** GPT-4 Vision analyzes design trends and preferences across different cultures, helping teams create globally resonant visuals.\\n\\n* **Addressing Challenges:**\\n * **Subjectivity in Design:** Balancing objective design principles with the subjective nature of aesthetics and creativity.\\n * **Copyright & Ownership:** Establishing clear guidelines for the use and ownership of AI-generated design elements. \\n * **Preserving Human Creativity:** Ensuring that AI augments, not replaces, the unique vision and skills of human designers.\\n\\n**6. \"Accessible World\" for Assistive Technology**\\n\\n* **Features:**\\n * **Real-Time Object Recognition:** For visually impaired individuals, the software provides audio descriptions of objects, scenes, and text in their environment.\\n * **Sign Language Translation:** GPT-4 Vision translates sign language into text or speech in real-time, facilitating communication.\\n * **Personalized Sensory Feedback:** The software creates customized sensory experiences (e.g., haptic feedback, light patterns) for individuals with diverse sensory processing needs.\\n\\n* **Addressing Challenges:**\\n * **Accuracy and Reliability:** Ensuring the AI\\'s interpretations are accurate and dependable in real-world situations.\\n * **Privacy and Dignity:** Designing the technology in a way that respects user privacy and avoids stigmatization.\\n * **Affordability & Accessibility:** Making the technology accessible to a wide range of users, regardless of their financial or technical resources. \\n\\nThese expanded examples highlight the potential of GPT-4 Vision to not only automate tasks but also to enhance human capabilities, foster creativity, and promote inclusivity. By approaching these innovations with a focus on ethical considerations and human-centered design, we can unlock incredible opportunities for positive change. \\n', 'name': 'Product_manager', 'role': 'user'}, {'content': 'You\\'re right, there\\'s so much more to explore! Let\\'s venture into some even more futuristic and ambitious applications, pushing the boundaries of what we currently imagine possible:\\n\\n**7. \"Empathic Tech\" for Mental Health Support:**\\n\\n* **Features:**\\n * **Emotionally Aware Virtual Companions:** GPT-4 Vision powers AI companions that can recognize and respond to human emotions expressed through facial expressions, body language, and vocal tone.\\n * **Personalized Mental Health Exercises:** The software provides tailored mindfulness practices, breathing exercises, or cognitive-behavioral therapy (CBT) techniques based on real-time emotional cues.\\n * **Early Warning System:** GPT-4 Vision analyzes patterns in user behavior and emotional expression to identify potential signs of mental health challenges and connect them with appropriate resources.\\n\\n* **Addressing Challenges:**\\n * **Ethical Considerations:** Ensuring user privacy, data security, and responsible use of sensitive health information is paramount.\\n * **Accuracy and Sensitivity:** Training AI to accurately interpret complex human emotions and respond with empathy and cultural sensitivity is a significant challenge.\\n * **Human Connection:** Emphasizing that technology should complement, not replace, professional mental health care and human connection.\\n\\n**8. \"EcoVision\" for Environmental Monitoring and Conservation:**\\n\\n* **Features:**\\n * **Real-Time Environmental Analysis:** GPT-4 Vision analyzes images and videos from drones, satellites, or ground-based cameras to monitor deforestation, pollution levels, wildlife populations, and other environmental factors.\\n * **Predictive Modeling for Conservation:** The software uses AI to predict environmental changes, identify areas at risk, and inform conservation efforts.\\n * **Citizen Science Platform:** EcoVision empowers individuals to contribute to environmental monitoring by uploading images and observations that the AI can analyze and integrate into its models.\\n\\n* **Addressing Challenges:**\\n * **Data Accessibility and Accuracy:** Ensuring access to high-quality, diverse environmental data from various sources is crucial.\\n * **Bias in Data and Algorithms:** Mitigating potential biases in data collection and algorithm design to avoid skewed environmental assessments.\\n * **Collaboration and Action:** Translating AI insights into concrete actions by fostering collaboration between scientists, policymakers, and local communities.\\n\\n**9. \"HistoryLens\" for Immersive Historical Experiences:**\\n\\n* **Features:**\\n * **Interactive Historical Reenactments:** GPT-4 Vision recreates historical events, figures, and locations in immersive virtual reality or augmented reality experiences.\\n * **Personalized Learning Journeys:** The software tailors historical narratives and perspectives based on user interests and background, providing a deeper understanding of the past.\\n * **Preservation and Accessibility:** HistoryLens digitally preserves historical artifacts, documents, and oral histories, making them accessible to a wider audience.\\n\\n* **Addressing Challenges:**\\n * **Historical Accuracy and Bias:** Ensuring historical representations are accurate, nuanced, and avoid perpetuating biases or historical revisionism. \\n * **Ethical Considerations of Representation:** Carefully navigating the ethical complexities of recreating sensitive historical events or representing marginalized communities. \\n * **Balancing Entertainment and Education:** Creating engaging experiences that also foster critical thinking and historical understanding.\\n\\nThese futuristic applications highlight the immense potential of GPT-4 Vision to not only solve practical problems but also enhance our understanding of ourselves, our planet, and our history. As we continue to develop this technology, it is essential to proceed with thoughtful consideration for its ethical implications and societal impact, ensuring that it is used to benefit humanity and create a more just and sustainable future. \\n', 'name': 'Product_manager', 'role': 'user'}, {'content': 'You\\'re right, there\\'s always more to imagine! Let\\'s keep pushing the boundaries and explore some applications that sound like they\\'re straight out of science fiction:\\n\\n**10. \"Dream Weaver\" for Artistic Co-Creation:**\\n\\n* **Features:**\\n * **Concept Visualization:** Users can input textual descriptions, rough sketches, or even their emotions, and GPT-4 Vision generates stunning visuals, musical compositions, or even short films that capture the essence of their ideas. \\n * **Style Transfer & Remixing:** The software enables artists to blend different artistic styles, seamlessly merging the realism of a photograph with the brushstrokes of Van Gogh or the abstract patterns of Kandinsky.\\n * **Interactive Storytelling:** GPT-4 Vision becomes a collaborative partner in storytelling, generating dynamic environments, characters, and plot twists in response to user input, blurring the lines between audience and creator.\\n\\n* **Addressing Challenges:**\\n * **Defining Creativity:** Exploring the philosophical and technical boundaries of AI creativity and ensuring it complements, not replaces, human artistic expression.\\n * **Copyright and Authorship:** Establishing clear guidelines for ownership and attribution when AI contributes significantly to the creative process. \\n * **Accessibility and Democratization:** Making these powerful creative tools accessible to a wide audience, fostering a more inclusive and imaginative future for art.\\n\\n\\n**11. \"Universal Translator\" for Real-Time Cross-Cultural Communication:**\\n\\n* **Features:**\\n * **Seamless Language Translation:** GPT-4 Vision goes beyond text, translating spoken language in real-time while accounting for nuances in tone, dialect, and cultural context.\\n * **Nonverbal Communication Interpretation:** The software analyzes facial expressions, gestures, and body language to bridge cultural differences in nonverbal communication, fostering greater understanding.\\n * **Cultural Sensitivity Guidance:** GPT-4 Vision provides users with real-time insights into cultural norms and customs, helping to avoid misunderstandings and promote respectful interactions.\\n\\n* **Addressing Challenges:**\\n * **Linguistic Complexity and Nuance:** Accurately translating the full richness and complexity of human language, including idioms, slang, and cultural references, remains a significant hurdle.\\n * **Bias and Stereotyping:** Ensuring the AI avoids perpetuating cultural biases or stereotypes in its translations and interpretations is crucial.\\n * **Preserving Linguistic Diversity:** Promoting language learning and cultural exchange while using technology to bridge communication gaps is essential. \\n\\n**12. \"Guardian AI\" for Personalized Safety and Well-being:**\\n\\n* **Features:**\\n * **Predictive Risk Assessment:** GPT-4 Vision analyzes real-time data from personal devices, social networks, and public safety feeds to identify potential risks or threats to an individual\\'s safety and well-being. \\n * **Proactive Safety Measures:** The AI can alert users to potential dangers, suggest alternative routes, or even contact emergency services in critical situations. \\n * **Personalized Health Monitoring:** GPT-4 Vision monitors an individual\\'s physical and mental health through wearable sensors and behavioral patterns, providing early warnings of potential health issues and suggesting preventative measures. \\n\\n* **Addressing Challenges:**\\n * **Privacy vs. Security:** Striking a delicate balance between protecting individual privacy and utilizing data to enhance safety.\\n * **Bias and Discrimination:** Ensuring the AI does not perpetuate existing societal biases or discriminate against individuals based on factors like race, gender, or socioeconomic status.\\n * **Over-Reliance and Autonomy:** Avoiding over-dependence on AI for safety and encouraging individuals to maintain their own situational awareness and decision-making abilities. \\n\\nThese futuristic applications highlight the potential of GPT-4 Vision to not only solve problems but also enhance human connection, understanding, and well-being. As we develop such powerful AI, responsible innovation, ethical considerations, and a focus on human-centered design will be paramount in shaping a future where technology empowers and uplifts all of humanity. \\n', 'name': 'Product_manager', 'role': 'user'}], summary='You\\'re right, there\\'s always more to imagine! Let\\'s keep pushing the boundaries and explore some applications that sound like they\\'re straight out of science fiction:\\n\\n**10. \"Dream Weaver\" for Artistic Co-Creation:**\\n\\n* **Features:**\\n * **Concept Visualization:** Users can input textual descriptions, rough sketches, or even their emotions, and GPT-4 Vision generates stunning visuals, musical compositions, or even short films that capture the essence of their ideas. \\n * **Style Transfer & Remixing:** The software enables artists to blend different artistic styles, seamlessly merging the realism of a photograph with the brushstrokes of Van Gogh or the abstract patterns of Kandinsky.\\n * **Interactive Storytelling:** GPT-4 Vision becomes a collaborative partner in storytelling, generating dynamic environments, characters, and plot twists in response to user input, blurring the lines between audience and creator.\\n\\n* **Addressing Challenges:**\\n * **Defining Creativity:** Exploring the philosophical and technical boundaries of AI creativity and ensuring it complements, not replaces, human artistic expression.\\n * **Copyright and Authorship:** Establishing clear guidelines for ownership and attribution when AI contributes significantly to the creative process. \\n * **Accessibility and Democratization:** Making these powerful creative tools accessible to a wide audience, fostering a more inclusive and imaginative future for art.\\n\\n\\n**11. \"Universal Translator\" for Real-Time Cross-Cultural Communication:**\\n\\n* **Features:**\\n * **Seamless Language Translation:** GPT-4 Vision goes beyond text, translating spoken language in real-time while accounting for nuances in tone, dialect, and cultural context.\\n * **Nonverbal Communication Interpretation:** The software analyzes facial expressions, gestures, and body language to bridge cultural differences in nonverbal communication, fostering greater understanding.\\n * **Cultural Sensitivity Guidance:** GPT-4 Vision provides users with real-time insights into cultural norms and customs, helping to avoid misunderstandings and promote respectful interactions.\\n\\n* **Addressing Challenges:**\\n * **Linguistic Complexity and Nuance:** Accurately translating the full richness and complexity of human language, including idioms, slang, and cultural references, remains a significant hurdle.\\n * **Bias and Stereotyping:** Ensuring the AI avoids perpetuating cultural biases or stereotypes in its translations and interpretations is crucial.\\n * **Preserving Linguistic Diversity:** Promoting language learning and cultural exchange while using technology to bridge communication gaps is essential. \\n\\n**12. \"Guardian AI\" for Personalized Safety and Well-being:**\\n\\n* **Features:**\\n * **Predictive Risk Assessment:** GPT-4 Vision analyzes real-time data from personal devices, social networks, and public safety feeds to identify potential risks or threats to an individual\\'s safety and well-being. \\n * **Proactive Safety Measures:** The AI can alert users to potential dangers, suggest alternative routes, or even contact emergency services in critical situations. \\n * **Personalized Health Monitoring:** GPT-4 Vision monitors an individual\\'s physical and mental health through wearable sensors and behavioral patterns, providing early warnings of potential health issues and suggesting preventative measures. \\n\\n* **Addressing Challenges:**\\n * **Privacy vs. Security:** Striking a delicate balance between protecting individual privacy and utilizing data to enhance safety.\\n * **Bias and Discrimination:** Ensuring the AI does not perpetuate existing societal biases or discriminate against individuals based on factors like race, gender, or socioeconomic status.\\n * **Over-Reliance and Autonomy:** Avoiding over-dependence on AI for safety and encouraging individuals to maintain their own situational awareness and decision-making abilities. \\n\\nThese futuristic applications highlight the potential of GPT-4 Vision to not only solve problems but also enhance human connection, understanding, and well-being. As we develop such powerful AI, responsible innovation, ethical considerations, and a focus on human-centered design will be paramount in shaping a future where technology empowers and uplifts all of humanity. \\n', cost={'usage_including_cached_inference': {'total_cost': 0}, 'usage_excluding_cached_inference': {'total_cost': 0}}, human_input=[])"
+ ]
+ },
+ "execution_count": 1,
+ "metadata": {},
+ "output_type": "execute_result"
+ }
+ ],
+ "source": [
+ "import autogen\n",
+ "\n",
+ "gpt_config_list = autogen.config_list_from_json(\"OAI_CONFIG_LIST\", filter_dict={\"tags\": [\"gpt-4\"]})\n",
+ "\n",
+ "gpt_llm_config = {\"config_list\": gpt_config_list, \"timeout\": 120}\n",
+ "\n",
+ "gemini_config_list = autogen.config_list_from_json(\"OAI_CONFIG_LIST\", filter_dict={\"tags\": [\"gemini\"]})\n",
+ "\n",
+ "gemini_llm_config = {\"config_list\": gemini_config_list, \"timeout\": 120}\n",
+ "\n",
+ "user_proxy = autogen.UserProxyAgent(\n",
+ " name=\"User_proxy\",\n",
+ " system_message=\"A human admin.\",\n",
+ " code_execution_config={\n",
+ " \"last_n_messages\": 2,\n",
+ " \"work_dir\": \"groupchat\",\n",
+ " \"use_docker\": False,\n",
+ " }, # Please set use_docker=True if docker is available to run the generated code. Using docker is safer than running the generated code directly.\n",
+ " human_input_mode=\"TERMINATE\",\n",
+ ")\n",
+ "coder = autogen.AssistantAgent(\n",
+ " name=\"Coder\",\n",
+ " llm_config=gpt_llm_config,\n",
+ ")\n",
+ "pm = autogen.AssistantAgent(\n",
+ " name=\"Product_manager\",\n",
+ " system_message=\"Creative in software product ideas.\",\n",
+ " llm_config=gemini_llm_config,\n",
+ ")\n",
+ "groupchat = autogen.GroupChat(agents=[user_proxy, coder, pm], messages=[], max_round=12)\n",
+ "manager = autogen.GroupChatManager(groupchat=groupchat, llm_config=gpt_llm_config)\n",
+ "user_proxy.initiate_chat(\n",
+ " manager, message=\"Find a latest paper about gpt-4 on arxiv and find its potential applications in software.\"\n",
+ ")"
+ ]
+ },
+ {
+ "cell_type": "markdown",
+ "metadata": {},
+ "source": [
+ "## Function Calling with Gemini\n",
+ "\n",
+ "Here is an example of Gemini with Function Calling,"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": 5,
+ "metadata": {},
+ "outputs": [
+ {
+ "name": "stdout",
+ "output_type": "stream",
+ "text": [
+ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n",
+ "\n",
+ "Draw two agents chatting with each other with an example dialog. Don't add plt.show().\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mchatbot\u001b[0m (to user_proxy):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_l7Rz8YLE4F2y8nGLCaroD6XL): python *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\n",
+ " \"cell\": `\n",
+ "import matplotlib.pyplot as plt\n",
+ "\n",
+ "# Create figure and axes\n",
+ "fig, ax = plt.subplots()\n",
+ "\n",
+ "# Define agent coordinates\n",
+ "agent1_x, agent1_y = 1, 1\n",
+ "agent2_x, agent2_y = 3, 1\n",
+ "\n",
+ "# Draw agents as circles\n",
+ "agent1 = plt.Circle((agent1_x, agent1_y), 0.1, color='blue')\n",
+ "agent2 = plt.Circle((agent2_x, agent2_y), 0.1, color='green')\n",
+ "ax.add_patch(agent1)\n",
+ "ax.add_patch(agent2)\n",
+ "\n",
+ "# Add example dialog\n",
+ "dialog1 = \"Hi, how are you?\"\n",
+ "dialog2 = \"I'm good, thanks!\"\n",
+ "\n",
+ "# Add text labels for the dialog\n",
+ "plt.text(agent1_x, agent1_y + 0.3, dialog1, fontsize=12, ha='center')\n",
+ "plt.text(agent2_x, agent2_y + 0.3, dialog2, fontsize=12, ha='center')\n",
+ "\n",
+ "# Set limits and turn off the axes\n",
+ "ax.set_xlim(0, 4)\n",
+ "ax.set_ylim(0, 2)\n",
+ "ax.axis('off')\n",
+ "`\n",
+ "}\n",
+ "\u001b[32m***********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n",
+ "\n",
+ "\u001b[33muser_proxy\u001b[0m (to chatbot):\n",
+ "\n",
+ "\u001b[32m***** Response from calling tool (call_l7Rz8YLE4F2y8nGLCaroD6XL) *****\u001b[0m\n",
+ "Error: Expecting value: line 1 column 12 (char 11)\n",
+ " You argument should follow json format.\n",
+ "\u001b[32m**********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[33mchatbot\u001b[0m (to user_proxy):\n",
+ "\n",
+ "\u001b[32m***** Suggested tool call (call_VjzGKf34UMKMlSHCrQpJ6ICK): python *****\u001b[0m\n",
+ "Arguments: \n",
+ "{\n",
+ " \"cell\": \"import matplotlib.pyplot as plt\\n\\n# Create figure and axes\\nfig, ax = plt.subplots()\\n\\n# Define agent coordinates\\nagent1_x, agent1_y = 1, 1\\nagent2_x, agent2_y = 3, 1\\n\\n# Draw agents as circles\\nagent1 = plt.Circle((agent1_x, agent1_y), 0.1, color='blue')\\nagent2 = plt.Circle((agent2_x, agent2_y), 0.1, color='green')\\nax.add_patch(agent1)\\nax.add_patch(agent2)\\n\\n# Add example dialog\\ndialog1 = \\\"Hi, how are you?\\\"\\ndialog2 = \\\"I'm good, thanks!\\\"\\n\\n# Add text labels for the dialog\\nplt.text(agent1_x, agent1_y + 0.3, dialog1, fontsize=12, ha='center')\\nplt.text(agent2_x, agent2_y + 0.3, dialog2, fontsize=12, ha='center')\\n\\n# Set limits and turn off the axes\\nax.set_xlim(0, 4)\\nax.set_ylim(0, 2)\\nax.axis('off')\"\n",
+ "}\n",
+ "\u001b[32m***********************************************************************\u001b[0m\n",
+ "\n",
+ "--------------------------------------------------------------------------------\n",
+ "\u001b[35m\n",
+ ">>>>>>>> EXECUTING FUNCTION python...\u001b[0m\n"
+ ]
+ },
+ {
+ "data": {
+ "text/plain": [
+ "(0.0, 4.0, 0.0, 2.0)"
+ ]
+ },
+ "execution_count": 5,
+ "metadata": {},
+ "output_type": "execute_result"
+ },
+ {
+ "data": {
+ "image/png": "iVBORw0KGgoAAAANSUhEUgAAAgMAAAGFCAYAAABg2vAPAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy80BEi2AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAgJklEQVR4nO3deXRU9d3H8c9kshGihGBIgGLYgsqmKKIgAQQUBEGQTUUhWAEVBS1aly5Cq3CktQdl0YCURdzKUhSNFmSRFBdEcAOMIItiyk4AC4Ek83v+yJMpQxIIYUIw3/frnBzNnTv33lzmN3nPnXsnHuecEwAAMCukvDcAAACUL2IAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YAADAOGIAAADjiAEAAIwjBgAAMI4YOAt16tRRSkpKqe7bvn17NWnSJLgbBABFaN++vdq3bx/UZW7btk0ej0d//etfg7rckvJ4PHrggQfKZd0VETHw/2bOnCmPx6M1a9YUeTu/vG346KOP1LdvX1188cWKjo5W69at9eGHH5b3ZuEMFTWeR48erTp16pTfRv1CpaWlafTo0eW9GWWq4PGybdu28t6UckMMnIWMjAxNmzatvDcDQTRgwADt27dPjzzyiJ555hnt3btXXbp00bffflvemwaUi7S0NI0ZM6a8NwNlLLS8N+CXLCIiorw34byTm5srn8+n8PDw8t6UUnnjjTd0zTXX+L+/6aabdMkll2j+/Pn63e9+V45bBgBlhyMDZ+FszhkosGHDBl1//fWKiopSrVq1NH78+ELz7N69W7/+9a8VHx+vyMhIXX755Zo1a1bAPFdeeaVuvfXWgGlNmzaVx+PRV1995Z/25ptvyuPxaOPGjcVu0/Hjx/XHP/5RV111lapUqaLKlSsrOTlZy5cvD5jvxPcMJ0yYoPr16ysiIkIbNmyQJH377bfq06ePYmNjFRkZqRYtWujtt98+5f5wzqlOnTq65ZZbCt2WnZ2tKlWqaNiwYWe0b1asWCGPx6MVK1YUuf0zZ870TzsxBCQpMjLSv09Q8RS87zx37lw1atRIlSpVUqtWrfT1119LklJTU9WgQQNFRkaqffv2JT6MvGLFCrVo0UKRkZGqX7++UlNTNXr0aHk8noD5cnNz9ec//9k/durUqaMnn3xSx44dK7TMKVOmqHHjxoqIiFDNmjU1fPhwZWVlFZpv6tSpql+/vipVqqSWLVsqPT39jPdLgZSUFE2ePFlS/r4q+CpunREREbr66qv12WefBdz+1VdfKSUlRfXq1VNkZKQSEhJ09913a9++fQHzFeyjzZs3KyUlRTExMapSpYoGDx6sI0eOnHZ7n376aYWEhGjixIn+aRMnTlTjxo0VFRWlqlWrqkWLFnrttddKszsqNI4MnOTgwYPau3dvoek5OTlBX9eBAwfUpUsX3XrrrerXr5/mzZunxx57TE2bNtVNN90kSTp69Kjat2+vzZs364EHHlDdunU1d+5cpaSkKCsrSyNHjpQkJScn6/XXX/cve//+/Vq/fr1CQkKUnp6uZs2aSZLS09MVFxenyy67rNjtOnTokF5++WXdfvvtGjJkiA4fPqzp06erc+fOWr16ta644oqA+WfMmKHs7GwNHTpUERERio2N1fr163XdddepVq1aevzxx1W5cmX94x//UM+ePTV//nz16tWryHV7PB7deeedGj9+vPbv36/Y2Fj/bYsWLdKhQ4d05513ntG+KS2fz6dRo0YpIiJCAwYMOKtl4fyVnp6ut99+W8OHD5ckjRs3TjfffLN++9vfasqUKbr//vt14MABjR8/XnfffbeWLVt2yuWtW7dOXbp0UY0aNTRmzBjl5eXpT3/6k+Li4grNe88992jWrFnq06ePRo0apU8//VTjxo3Txo0b9c9//tM/3+jRozVmzBh16tRJ9913nzIyMvTiiy/qs88+06pVqxQWFiZJmj59uoYNG6bWrVvroYce0pYtW9SjRw/Fxsaqdu3aZ7xvhg0bpszMTC1ZskSvvPJKkfO89tprOnz4sIYNGyaPx6Px48fr1ltv1ZYtW/zbtWTJEm3ZskWDBw9WQkKC1q9fr6lTp2r9+vX65JNPCgVGv379VLduXY0bN05r167Vyy+/rOrVq+vZZ58tdlt///vfa+zYsUpNTdWQIUMkSdOmTdOIESPUp08fjRw5UtnZ2frqq6/06aef6o477jjj/VGhOTjnnJsxY4aTdMqvxo0bB9wnMTHRDRo0qFTra9eunZPkZs+e7Z927Ngxl5CQ4Hr37u2fNmHCBCfJzZkzxz/t+PHjrlWrVi46OtodOnTIOefc3LlznSS3YcMG55xzb7/9touIiHA9evRw/fv399+3WbNmrlevXqfcttzcXHfs2LGAaQcOHHDx8fHu7rvv9k/bunWrk+QuvPBCt3v37oD5O3bs6Jo2beqys7P903w+n2vdurVLSko65fozMjKcJPfiiy8GTO/Ro4erU6eO8/l8Z7Rvli9f7iS55cuXByyvYPtnzJhR5HYMHTrUeTwe99prr51ye3H+KRjPn3322Snnk+QiIiLc1q1b/dNSU1OdJJeQkOB/DDnn3BNPPOEkBcxblO7du7uoqCj3008/+adt2rTJhYaGuhOfcr/44gsnyd1zzz0B93/kkUecJLds2TLnnHO7d+924eHh7sYbb3R5eXn++SZNmuQkub///e/OufzHfvXq1d0VV1wRMH6nTp3qJLl27dqdcruLM3z4cFfUr4qC8VOtWjW3f/9+//S33nrLSXKLFi3yTzty5Eih+7/++utOklu5cqV/2lNPPeUkBTzPOOdcr169XLVq1QKmSXLDhw93zjk3atQoFxIS4mbOnBkwzy233FLoebsoBY+X0/3bVmS8TXCSyZMna8mSJYW+Cl5ZB1N0dLT/Va4khYeHq2XLltqyZYt/WlpamhISEnT77bf7p4WFhWnEiBH6+eef/We6JycnS5JWrlwpKf/VztVXX60bbrjBf5gwKytL33zzjX/e4ni9Xv97/j6fT/v371dubq5atGihtWvXFpq/d+/eAa969u/fr2XLlqlfv346fPiw9u7dq71792rfvn3q3LmzNm3apJ9++qnY9Tds2FDXXHONXn311YBlvvfeexowYID/VURJ901pTJ8+XVOnTtVzzz0XsHxUPB07dgy4yqDgraLevXvrggsuKDT9xPF5sry8PH3wwQfq2bOnatas6Z/eoEED/9G+AmlpaZKk3/zmNwHTR40aJUl69913JUkffPCBjh8/roceekghIf97yh4yZIguvPBC/3xr1qzR7t27de+99wacs5OSkqIqVaqcZi+UXv/+/VW1alX/9wXPLyfup0qVKvn/Pzs7W3v37tW1114rSUU+p9x7770B3ycnJ2vfvn06dOhQwHTnnB544AE9//zzmjNnjgYNGhRwe0xMjHbs2FHobQsURgycpGXLlurUqVOhrxMf7MHyq1/9qtDhsapVq+rAgQP+77dv366kpKSAJwFJ/sP827dvlyTFx8crKSnJ/4s/PT1dycnJatu2rTIzM7VlyxatWrVKPp/vtDEgSbNmzVKzZs0UGRmpatWqKS4uTu+++64OHjxYaN66desGfL9582Y55/SHP/xBcXFxAV9PPfWUpPz3+k9l4MCBWrVqlf/nmzt3rnJycnTXXXed8b4pjVdeeUUNGzbUww8/XOpl4Jfh4osvDvi+4BfnyYfVC6afOD5Ptnv3bh09elQNGjQodNvJ07Zv366QkJBC0xMSEhQTE+N//Bb895JLLgmYLzw8XPXq1Ss0X1JSUsB8YWFhqlevXrHbfLZO3n8Fz5Un7qf9+/dr5MiRio+PV6VKlRQXF+d/3ijqOaUky5Sk2bNna/LkyZo4cWKR0f7YY48pOjpaLVu2VFJSkoYPH65Vq1aV4qes+IiBcuT1eouc7pwr1fLatGmj9PR0HT16VJ9//rmSk5PVpEkTxcTEKD09Xenp6YqOjlbz5s1PuZw5c+YoJSVF9evX1/Tp0/X+++9ryZIl6tChg3w+X6H5T6x+Sf55HnnkkSKPsixZsqTIJ8sT3XbbbQoLC/MfHZgzZ45atGhR6AmxJIo64UnKfxVXnH379qlGjRpnvC788hQ3DoM9PotT3OPzl6Ik+6lfv36aNm2a7r33Xi1YsECLFy/W+++/L0lFPqeUdN9fd911io+P16RJk7R///5C81922WXKyMjQG2+8oTZt2mj+/Plq06aN/0UJ/ocYOM8lJiZq06ZNhQZMwXXviYmJ/mnJycn64Ycf9MYbbygvL0+tW7dWSEiIPxLS09PVunXrYgdagXnz5qlevXpasGCB7rrrLnXu3FmdOnVSdnZ2iba54FVIWFhYkUdZOnXqFHD4tSixsbHq1q2bXn31VW3fvl2rVq0KOCpwJvum4FXFyWden+rIwe23317sSY5AcapXr67IyEht3ry50G0nT0tMTJTP59OmTZsCpu/atUtZWVn+x2/BfzMyMgLmO378uLZu3VpovpOXl5OTo61bt5b6ZzrbWDlw4ICWLl2qxx9/XGPGjFGvXr10ww03BOVoRYMGDbR48WJlZmaqS5cuOnz4cKF5KleurP79+2vGjBn64Ycf1K1bNz3zzDMBz2cpKSn+K5msIgbOc127dtXOnTv15ptv+qfl5uZq4sSJio6OVrt27fzTCw7/P/vss2rWrJn/sGZycrKWLl2qNWvWlOgtgoJYOLHCP/30U3388ccl2ubq1aurffv2Sk1N1X/+859Ct+/Zs6dEy7nrrru0YcMGPfroo/J6vbrtttsCbi/pvklMTJTX6/WfT1FgypQpxa67f//+xADOmNfrVadOnbRw4UJlZmb6p2/evFnvvfdewLxdu3aVJE2YMCFg+t/+9jdJUrdu3SRJnTp1Unh4uF544YWAMTl9+nQdPHjQP1+LFi0UFxenl156KeBS2JkzZxZ5CWJJVa5cWVLhmC6pop5PpMI/d2k1a9ZMaWlp2rhxo7p3766jR4/6bzv50sXw8HA1atRIzrmAK8T27t2rb7/9tkyuGvul4NLCICsoy2B9rOXQoUOVmpqqlJQUff7556pTp47mzZunVatWacKECQGvsBs0aKCEhARlZGTowQcf9E9v27atHnvsMUkqUQzcfPPNWrBggXr16qVu3bpp69ateumll9SoUSP9/PPPJdruyZMnq02bNmratKmGDBmievXqadeuXfr444+1Y8cOffnll6ddRrdu3VStWjXNnTtXN910k6pXr16qfVOlShX17dtXEydOlMfjUf369fXOO++c8ryFgpPKTv5sAuB0Ro8ercWLF+u6667Tfffdp7y8PE2aNElNmjTRF1984Z/v8ssv16BBgzR16lRlZWWpXbt2Wr16tWbNmqWePXvq+uuvlyTFxcXpiSee0JgxY9SlSxf16NFDGRkZmjJliq6++mr/SchhYWF6+umnNWzYMHXo0EH9+/fX1q1bNWPGjCJfhbdv314ffvjhad/2uOqqqyRJI0aMUOfOnYsM81O58MIL1bZtW40fP145OTmqVauWFi9efFZHK0527bXX6q233lLXrl3Vp08fLVy4UGFhYbrxxhuVkJDgfzth48aNmjRpkrp16xbw3Dlp0iSNGTNGW7dutXt0oHwuYjj/nO5SpHbt2pXo0sKLLrrIXXvttaddX1HLc865QYMGucTExIBpu3btcoMHD3YXXXSRCw8Pd02bNi32cri+ffs6Se7NN9/0Tzt+/LiLiopy4eHh7ujRo6fdNp/P58aOHesSExNdRESEa968uXvnnXcKbVvBpUV/+ctfilzO999/7wYOHOgSEhJcWFiYq1Wrlrv55pvdvHnzTrsNBe6//34nqdjL+0q6b/bs2eN69+7toqKiXNWqVd2wYcPcN998U+ylhYmJiaW+FAvl70wuLSy4PK1AcY/rgktU586de9r1L1261DVv3tyFh4e7+vXru5dfftmNGjXKRUZGBsyXk5PjxowZ4+rWrevCwsJc7dq13RNPPBFwSW6BSZMmuUsvvdSFhYW5+Ph4d99997kDBw4Umm/KlCmubt26LiIiwrVo0cKtXLnStWvXrtDj+aqrrnIJCQmn/Vlyc3Pdgw8+6OLi4pzH4/FfZniq8S/JPfXUU/7vd+zY4Xr16uViYmJclSpVXN++fV1mZmah+QouLdyzZ0/A8oq69K+of7u33nrLhYaGuv79+7u8vDyXmprq2rZt66pVq+YiIiJc/fr13aOPPuoOHjwYcL+C9Vq+tNDjXJDPhjFsw4YNaty4sd555x3/oTucnYcffljTp0/Xzp07FRUVVd6bA5Raz549tX79+kLv6ZeHw4cPKzY2VhMmTPB/2BJs45yBIFq+fLlatWpFCARJdna25syZo969exMC+EU58X1rKf+kvrS0tKD/GeHSWrlypWrVquX/pD6AIwM47+zevVsffPCB5s2bp4ULF2rt2rWFPgIZOJ/VqFHD/1n827dv14svvqhjx45p3bp1hT4HADgfcAIhzjsbNmzQgAEDVL16db3wwguEAH5xunTpotdff107d+5URESEWrVqpbFjxxICOG9xZAAAAOM4ZwAAAOOIAQAAjCMGAAAwjhgAAMA4YgAAAOOIAQAAjCMGAAAwjhgAAMA4YgAAAOOIAQAAjCMGAAAwjhgAAMA4YgAAAOOIAQAAjCMGAAAwjhgAAMA4YgAAAOOIAQAAjCMGAAAwjhgAAMA4YgAAAOOIAQAAjCMGAAAwjhgAAMA4YgAAAOOIAQAAjCMGAAAwjhgAAMA4YgAAAOOIAQAAjCMGAAAwLrS8NwDlz+eTli6VFi3K/9q5M396QoLUvXv+V8eOUgjpCPzi+JxPS7cs1aLvFmnRd4u08+f8AZ4QnaDuDbure8Pu6livo0I8DHDLPM45V94bgfLz5ZfS0KHS6tVSaKiUmxt4e8G0li2ladOkZs3KZzsBnLkvd36poYuGanXmaoWGhCrXFzjAC6a1rNVS07pPU7N4BrhVpKBhzz0nXXmltHZt/vcnh8CJ09aulZo3z78PgPPfcx89pyunXqm1O/MH+MkhcOK0tZlr1Ty1uZ77iAFuFUcGjBo3TnryydLf9/HHg7s9AIJnXPo4PbmsdAN8XMdxerwNA9waYsCgF16QRo48u2U8/7w0YkRwtgdA8Lzw6Qsa+f7ZDfDnuzyvEdcwwC0hBoxZty7//f+i3hI4E6Gh+ecZNG8enO0CcPbW/WedWr7cssi3BM5EaEioVt+zWs1rMMCt4JwBQ3JzpYEDpWDkn3PSoEFnHxUAgiPXl6uBCwcqGK/vnHMatHDQWUcFfjmIAUNmz5a++UbKyzv7ZeXlSV9/nb9MAOVv9pez9c3ub5Tnzn6A57k8fb37a83+kgFuBW8TGOGcdOml0qZNwTkyIEkej9SwobRxY/7/AygfzjldOvlSbdq3SU7BGeAeedSwWkNtHL5RHgZ4hceRASP+9S/pu++CFwJS/rIyMqTFi4O3TABn7l/f/0vf7fsuaCEgSU5OGfsytPh7BrgFxIARqamS1xv85Xq90ksvBX+5AEoudU2qvJ7gD3Cvx6uX1jDALeBtAgNycqSYGOnIkbJZflSUlJUlhYWVzfIBFC8nL0cxz8boSE7ZDPCosChlPZalMC8DvCLjyIABH39cdiEg5S/7k0/KbvkAivfxjo/LLAQk6UjOEX2ygwFe0REDBixZkv+5AGUlNJTzBoDysuT7JQoNKbsBHhoSynkDBhADBqxfH5zLCYvj80kbNpTd8gEUb/2e9crzld0A9zmfNuxhgFd0xIABGRnBvYrgZD5f/joAnHsZ+zKCehXByXzOp4x9DPCKjhio4JyTtm0r+/Vs21a2wQGgMOectmVtK/P1bMvaFpRPNsT5ixio4A4fLtuTBwv897/Szz+X/XoA/M/h44fL9OTBAv/N+a9+Ps4Ar8iIgQru2LGKuS4A0rHcczfojuUxwCsyYqCC8/kq5roA5L+fXxHXhXOPGKjgwsMr5roASOHeczfozuW6cO4RAxXcBRdIIefgX9nrlaKjy349AP7ngogLFOIp+wHu9XgVHc4Ar8iIgQouNFSqVavs11OzZtl+sBGAwkJDQlXrgrIf4DUvqFmmH2yE8kcMGJCUVPbraNiw7NcBoLCkamU/wBtWY4BXdMSAAUlJZftHhMLCzk1wACgsKTZJYSFlN8DDQsKUFMsAr+iIAQOSk/P/cmFZycnJXweAcy/54mTl+MpugOf4cpScyACv6PgTxgbs2SPFx5fdJwR6PNKuXVJcXNksH0Dx9vx3j+L/Gl9mH0nskUe7HtmluMoM8IqMIwMGxMVJzZqV3fIvv5wQAMpLXOU4NYsvuwF+ecLlhIABxIARAwaUzSWGHo90xx3BXy6AkhvQdECZXGLokUd3NGGAW8DbBEZkZUk1akjZ2cFdbqVKUmamFBMT3OUCKLms7CzVeK6GsnODO8ArhVZS5qhMxUTGBHW5OP9wZMCImBhp6ND8DwcKFq83f5mEAFC+YiJjNPTKofJ6gjfAvR6vhl41lBAwgiMDhvz4Y/7nAQTr6EBkpPTdd1Lt2sFZHoDS+/Hgj2o4qWHQjg5Ehkbquwe+U+0qDHALODJgSO3a0tixwVveuHGEAHC+qF2ltsZ2CN4AH9dxHCFgCEcGjPH5pA4dpH//W8rLK90yvN78zxVYuvTc/N0DACXjcz51mNVB//7h38pzpRvgXo9XyYnJWjpw6Tn5uwc4P/AvbUxIiLRggdSoUenOH/B6pSZNpPnzCQHgfBPiCdGC/gvUKK5Rqc4f8Hq8alK9ieb3m08IGMO/tkGxsdKKFfm/1M/kF3pISP59li3LXwaA809spVitSFmhJtWbnNEv9BBPiJpUb6Jlg5YpthID3BpiwKjYWCk9XRoxIv+zAk51lMDrzZ9nxIj8+xACwPkttlKs0gena0TLEfLIc8qjBF6PVx55NKLlCKUPTicEjOKcAWjduvwTC9PSpCNHAm+LipK6dpWefFJq3rx8tg9A6a37zzqNTR+rtM1pOpITOMCjwqLUtUFXPZn8pJrXYIBbRgzA7/hx6aOP8j9ESJJq1pRat5bCw8t3uwCcveN5x/XRjx8p83D+AK95QU21rt1a4V4GOIgBAADM45wBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADCOGAAAwDhiAAAA44gBAACMIwYAADDu/wC+Ka8LXmqY8wAAAABJRU5ErkJggg==",
+ "text/plain": [
+ "