Release v0.4.1 (#4686)
Co-authored-by: Reinier van der Leer <github@pwuts.nl> Co-authored-by: Nicholas Tindle <nick@ntindle.com> Co-authored-by: Nicholas Tindle <nicktindle@outlook.com> Co-authored-by: k-boikov <64261260+k-boikov@users.noreply.github.com> Co-authored-by: merwanehamadi <merwanehamadi@gmail.com> Co-authored-by: Merwane Hamadi <merwanehamadi@gmail.com> Co-authored-by: Richard Beales <rich@richbeales.net> Co-authored-by: Luke K <2609441+lc0rp@users.noreply.github.com> Co-authored-by: Luke K (pr-0f3t) <2609441+lc0rp@users.noreply.github.com> Co-authored-by: Erik Peterson <e@eriklp.com> Co-authored-by: Auto-GPT-Bot <github-bot@agpt.co> Co-authored-by: Benny van der Lans <49377421+bfalans@users.noreply.github.com> Co-authored-by: Jan <jan-github@phobia.de> Co-authored-by: Robin Richtsfeld <robin.richtsfeld@gmail.com> Co-authored-by: Marc Bornträger <marc.borntraeger@gmail.com> Co-authored-by: Stefan Ayala <stefanayala3266@gmail.com> Co-authored-by: javableu <45064273+javableu@users.noreply.github.com> Co-authored-by: DGdev91 <DGdev91@users.noreply.github.com> Co-authored-by: Kinance <kinance@gmail.com> Co-authored-by: digger yu <digger-yu@outlook.com> Co-authored-by: David <scenaristeur@gmail.com> Co-authored-by: gravelBridge <john.tian31@gmail.com> Fix Python CI "update cassettes" step (#4591) fix CI (#4596) Fix inverted logic for deny_command (#4563) fix current_score.json generation (#4601) Fix duckduckgo rate limiting (#4592) Fix debug code challenge (#4632) Fix issues with information retrieval challenge a (#4622) fix issues with env configuration and .env.template (#4630) Fix prompt issue causing 'No Command' issues and challenge to fail (#4623) Fix benchmark logs (#4653) Fix typo in docs/setup.md (#4613) Fix run.sh shebang (#4561) Fix autogpt docker image not working because missing prompt_settings (#4680) Fix execute_command coming from plugins (#4730)pull/4744/head v0.4.1
parent
25a7957bb8
commit
abb397e442
|
@ -2,6 +2,7 @@
|
|||
*.template
|
||||
*.yaml
|
||||
*.yml
|
||||
!prompt_settings.yaml
|
||||
|
||||
*.md
|
||||
*.png
|
||||
|
|
239
.env.template
239
.env.template
|
@ -1,10 +1,16 @@
|
|||
# For further descriptions of these settings see docs/configuration/options.md or go to docs.agpt.co
|
||||
|
||||
################################################################################
|
||||
### AUTO-GPT - GENERAL SETTINGS
|
||||
################################################################################
|
||||
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
|
||||
## EXECUTE_LOCAL_COMMANDS - Allow local command execution (Default: False)
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# EXECUTE_LOCAL_COMMANDS=False
|
||||
|
||||
## RESTRICT_TO_WORKSPACE - Restrict file operations to workspace ./auto_gpt_workspace (Default: True)
|
||||
# RESTRICT_TO_WORKSPACE=True
|
||||
|
||||
## USER_AGENT - Define the user-agent used by the requests library to browse website (string)
|
||||
|
@ -13,211 +19,186 @@
|
|||
## AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)
|
||||
# AI_SETTINGS_FILE=ai_settings.yaml
|
||||
|
||||
## PLUGINS_CONFIG_FILE - The path to the plugins_config.yaml file (Default plugins_config.yaml)
|
||||
# PLUGINS_CONFIG_FILE=plugins_config.yaml
|
||||
|
||||
## PROMPT_SETTINGS_FILE - Specifies which Prompt Settings file to use (defaults to prompt_settings.yaml)
|
||||
# PROMPT_SETTINGS_FILE=prompt_settings.yaml
|
||||
|
||||
## OPENAI_API_BASE_URL - Custom url for the OpenAI API, useful for connecting to custom backends. No effect if USE_AZURE is true, leave blank to keep the default url
|
||||
# the following is an example:
|
||||
# OPENAI_API_BASE_URL=http://localhost:443/v1
|
||||
|
||||
## AUTHORISE COMMAND KEY - Key to authorise commands
|
||||
# AUTHORISE_COMMAND_KEY=y
|
||||
|
||||
## EXIT_KEY - Key to exit AUTO-GPT
|
||||
# EXIT_KEY=n
|
||||
|
||||
## PLAIN_OUTPUT - Enabeling plain output will disable spinner (Default: False)
|
||||
## Note: Spinner is used to indicate that Auto-GPT is working on something in the background
|
||||
## PLAIN_OUTPUT - Plain output, which disables the spinner (Default: False)
|
||||
# PLAIN_OUTPUT=False
|
||||
|
||||
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled. Each of the below are an option:
|
||||
## autogpt.commands.analyze_code
|
||||
## autogpt.commands.audio_text
|
||||
## autogpt.commands.execute_code
|
||||
## autogpt.commands.file_operations
|
||||
## autogpt.commands.git_operations
|
||||
## autogpt.commands.google_search
|
||||
## autogpt.commands.image_gen
|
||||
## autogpt.commands.improve_code
|
||||
## autogpt.commands.web_selenium
|
||||
## autogpt.commands.write_tests
|
||||
## autogpt.app
|
||||
## autogpt.commands.task_statuses
|
||||
## For example, to disable coding related features, uncomment the next line
|
||||
# DISABLED_COMMAND_CATEGORIES=autogpt.commands.analyze_code,autogpt.commands.execute_code,autogpt.commands.git_operations,autogpt.commands.improve_code,autogpt.commands.write_tests
|
||||
|
||||
## DENY_COMMANDS - The list of commands that are not allowed to be executed by Auto-GPT (Default: None)
|
||||
# the following are examples:
|
||||
# DENY_COMMANDS=cd,nano,vim,vi,emacs,rm,sudo,top,ping,ssh,scp
|
||||
|
||||
## ALLOW_COMMANDS - ONLY those commands will be allowed to be executed by Auto-GPT
|
||||
# the following are examples:
|
||||
# ALLOW_COMMANDS=ls,git,cat,grep,find,echo,ps,curl,wget
|
||||
|
||||
## DISABLED_COMMAND_CATEGORIES - The list of categories of commands that are disabled (Default: None)
|
||||
# DISABLED_COMMAND_CATEGORIES=
|
||||
|
||||
################################################################################
|
||||
### LLM PROVIDER
|
||||
################################################################################
|
||||
|
||||
### OPENAI
|
||||
## OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
|
||||
|
||||
## NOTE: https://platform.openai.com/docs/api-reference/completions
|
||||
# The temperature setting in language models like GPT controls the balance between predictable and random responses.
|
||||
# Lower temperature makes the responses more focused and deterministic, while higher temperature makes them more
|
||||
# creative and varied. The temperature range typically goes from 0 to 2 in OpenAI's implementation.
|
||||
##
|
||||
## TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
##
|
||||
###
|
||||
# TEMPERATURE=0
|
||||
|
||||
## OPENAI_ORGANIZATION - Your OpenAI Organization key (Default: None)
|
||||
# OPENAI_ORGANIZATION=
|
||||
|
||||
## USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
# TEMPERATURE=0
|
||||
# USE_AZURE=False
|
||||
# OPENAI_ORGANIZATION=your-openai-organization-key-if-applicable
|
||||
|
||||
### AZURE
|
||||
# moved to `azure.yaml.template`
|
||||
|
||||
################################################################################
|
||||
### LLM MODELS
|
||||
################################################################################
|
||||
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-4)
|
||||
## SMART_LLM_MODEL - Smart language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-3.5-turbo
|
||||
|
||||
## FAST_LLM_MODEL - Fast language model (Default: gpt-3.5-turbo)
|
||||
# SMART_LLM_MODEL=gpt-4
|
||||
# FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
|
||||
### LLM MODEL SETTINGS
|
||||
## FAST_TOKEN_LIMIT - Fast token limit for OpenAI (Default: 4000)
|
||||
## SMART_TOKEN_LIMIT - Smart token limit for OpenAI (Default: 8000)
|
||||
## When using --gpt3only this needs to be set to 4000.
|
||||
# FAST_TOKEN_LIMIT=4000
|
||||
# SMART_TOKEN_LIMIT=8000
|
||||
|
||||
### EMBEDDINGS
|
||||
## EMBEDDING_MODEL - Model to use for creating embeddings
|
||||
# EMBEDDING_MODEL=text-embedding-ada-002
|
||||
|
||||
################################################################################
|
||||
### SHELL EXECUTION
|
||||
################################################################################
|
||||
|
||||
## SHELL_COMMAND_CONTROL - Whether to use "allowlist" or "denylist" to determine what shell commands can be executed (Default: denylist)
|
||||
# SHELL_COMMAND_CONTROL=denylist
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to denylist:
|
||||
## SHELL_DENYLIST - List of shell commands that ARE NOT allowed to be executed by Auto-GPT (Default: sudo,su)
|
||||
# SHELL_DENYLIST=sudo,su
|
||||
|
||||
## ONLY if SHELL_COMMAND_CONTROL is set to allowlist:
|
||||
## SHELL_ALLOWLIST - List of shell commands that ARE allowed to be executed by Auto-GPT (Default: None)
|
||||
# SHELL_ALLOWLIST=
|
||||
|
||||
################################################################################
|
||||
### MEMORY
|
||||
################################################################################
|
||||
|
||||
### MEMORY_BACKEND - Memory backend type
|
||||
## json_file - Default
|
||||
## redis - Redis (if configured)
|
||||
## MEMORY_INDEX - Name of index created in Memory backend (Default: auto-gpt)
|
||||
# MEMORY_BACKEND=json_file
|
||||
# MEMORY_INDEX=auto-gpt-memory
|
||||
### General
|
||||
|
||||
## MEMORY_BACKEND - Memory backend type
|
||||
# MEMORY_BACKEND=json_file
|
||||
|
||||
## MEMORY_INDEX - Value used in the Memory backend for scoping, naming, or indexing (Default: auto-gpt)
|
||||
# MEMORY_INDEX=auto-gpt
|
||||
|
||||
### Redis
|
||||
|
||||
### REDIS
|
||||
## REDIS_HOST - Redis host (Default: localhost, use "redis" for docker-compose)
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# REDIS_HOST=localhost
|
||||
|
||||
## REDIS_PORT - Redis port (Default: 6379)
|
||||
# REDIS_PORT=6379
|
||||
|
||||
## REDIS_PASSWORD - Redis password (Default: "")
|
||||
# REDIS_PASSWORD=
|
||||
|
||||
## WIPE_REDIS_ON_START - Wipes data / index on start (Default: True)
|
||||
# WIPE_REDIS_ON_START=True
|
||||
|
||||
################################################################################
|
||||
### IMAGE GENERATION PROVIDER
|
||||
################################################################################
|
||||
|
||||
### COMMON SETTINGS
|
||||
## IMAGE_PROVIDER - Image provider - dalle, huggingface, or sdwebui
|
||||
## IMAGE_SIZE - Image size (Example: 256)
|
||||
## Image sizes for dalle: 256, 512, 1024
|
||||
### Common
|
||||
|
||||
## IMAGE_PROVIDER - Image provider (Default: dalle)
|
||||
# IMAGE_PROVIDER=dalle
|
||||
|
||||
## IMAGE_SIZE - Image size (Default: 256)
|
||||
# IMAGE_SIZE=256
|
||||
|
||||
### HUGGINGFACE
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Example: my-huggingface-api-token)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
# HUGGINGFACE_API_TOKEN=your-huggingface-api-token
|
||||
### Huggingface (IMAGE_PROVIDER=huggingface)
|
||||
|
||||
### STABLE DIFFUSION WEBUI
|
||||
## SD_WEBUI_AUTH - Stable diffusion webui username:password pair (Example: username:password)
|
||||
## SD_WEBUI_URL - Stable diffusion webui API URL (Example: http://127.0.0.1:7860)
|
||||
## HUGGINGFACE_IMAGE_MODEL - Text-to-image model from Huggingface (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_IMAGE_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
## HUGGINGFACE_API_TOKEN - HuggingFace API token (Default: None)
|
||||
# HUGGINGFACE_API_TOKEN=
|
||||
|
||||
### Stable Diffusion (IMAGE_PROVIDER=sdwebui)
|
||||
|
||||
## SD_WEBUI_AUTH - Stable Diffusion Web UI username:password pair (Default: None)
|
||||
# SD_WEBUI_AUTH=
|
||||
# SD_WEBUI_URL=http://127.0.0.1:7860
|
||||
|
||||
## SD_WEBUI_URL - Stable Diffusion Web UI API URL (Default: http://localhost:7860)
|
||||
# SD_WEBUI_URL=http://localhost:7860
|
||||
|
||||
################################################################################
|
||||
### AUDIO TO TEXT PROVIDER
|
||||
################################################################################
|
||||
|
||||
### HUGGINGFACE
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=facebook/wav2vec2-base-960h
|
||||
## AUDIO_TO_TEXT_PROVIDER - Audio-to-text provider (Default: huggingface)
|
||||
# AUDIO_TO_TEXT_PROVIDER=huggingface
|
||||
|
||||
## HUGGINGFACE_AUDIO_TO_TEXT_MODEL - The model for HuggingFace to use (Default: CompVis/stable-diffusion-v1-4)
|
||||
# HUGGINGFACE_AUDIO_TO_TEXT_MODEL=CompVis/stable-diffusion-v1-4
|
||||
|
||||
################################################################################
|
||||
### GIT Provider for repository actions
|
||||
################################################################################
|
||||
|
||||
### GITHUB
|
||||
## GITHUB_API_KEY - Github API key / PAT (Example: github_pat_123)
|
||||
## GITHUB_USERNAME - Github username
|
||||
# GITHUB_API_KEY=github_pat_123
|
||||
# GITHUB_USERNAME=your-github-username
|
||||
################################################################################
|
||||
|
||||
## GITHUB_API_KEY - Github API key / PAT (Default: None)
|
||||
# GITHUB_API_KEY=
|
||||
|
||||
## GITHUB_USERNAME - Github username (Default: None)
|
||||
# GITHUB_USERNAME=
|
||||
|
||||
################################################################################
|
||||
### WEB BROWSING
|
||||
################################################################################
|
||||
|
||||
### BROWSER
|
||||
## HEADLESS_BROWSER - Whether to run the browser in headless mode (default: True)
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome).
|
||||
## Note: set this to either 'chrome', 'firefox', 'safari' or 'edge' depending on your current browser
|
||||
# HEADLESS_BROWSER=True
|
||||
|
||||
## USE_WEB_BROWSER - Sets the web-browser driver to use with selenium (default: chrome)
|
||||
# USE_WEB_BROWSER=chrome
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (in number of tokens, excluding the response. 75 % of FAST_TOKEN_LIMIT is usually wise )
|
||||
|
||||
## BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunks to summarize (Default: 3000)
|
||||
# BROWSE_CHUNK_MAX_LENGTH=3000
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL is used to split sentences. Install additional languages via pip, and set the model name here. Example Chinese: python -m spacy download zh_core_web_sm
|
||||
|
||||
## BROWSE_SPACY_LANGUAGE_MODEL - spaCy language model](https://spacy.io/usage/models) to use when creating chunks. (Default: en_core_web_sm)
|
||||
# BROWSE_SPACY_LANGUAGE_MODEL=en_core_web_sm
|
||||
|
||||
### GOOGLE
|
||||
## GOOGLE_API_KEY - Google API key (Example: my-google-api-key)
|
||||
## CUSTOM_SEARCH_ENGINE_ID - Custom search engine ID (Example: my-custom-search-engine-id)
|
||||
# GOOGLE_API_KEY=your-google-api-key
|
||||
# CUSTOM_SEARCH_ENGINE_ID=your-custom-search-engine-id
|
||||
## GOOGLE_API_KEY - Google API key (Default: None)
|
||||
# GOOGLE_API_KEY=
|
||||
|
||||
## GOOGLE_CUSTOM_SEARCH_ENGINE_ID - Google custom search engine ID (Default: None)
|
||||
# GOOGLE_CUSTOM_SEARCH_ENGINE_ID=
|
||||
|
||||
################################################################################
|
||||
### TTS PROVIDER
|
||||
### TEXT TO SPEECH PROVIDER
|
||||
################################################################################
|
||||
|
||||
### MAC OS
|
||||
## USE_MAC_OS_TTS - Use Mac OS TTS or not (Default: False)
|
||||
# USE_MAC_OS_TTS=False
|
||||
## TEXT_TO_SPEECH_PROVIDER - Which Text to Speech provider to use (Default: gtts)
|
||||
# TEXT_TO_SPEECH_PROVIDER=gtts
|
||||
|
||||
### STREAMELEMENTS
|
||||
## USE_BRIAN_TTS - Use Brian TTS or not (Default: False)
|
||||
# USE_BRIAN_TTS=False
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=streamelements
|
||||
## STREAMELEMENTS_VOICE - Voice to use for StreamElements (Default: Brian)
|
||||
# STREAMELEMENTS_VOICE=Brian
|
||||
|
||||
### ELEVENLABS
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Example: my-elevenlabs-api-key)
|
||||
## ELEVENLABS_VOICE_1_ID - Eleven Labs voice 1 ID (Example: my-voice-id-1)
|
||||
## ELEVENLABS_VOICE_2_ID - Eleven Labs voice 2 ID (Example: my-voice-id-2)
|
||||
# ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
# ELEVENLABS_VOICE_1_ID=your-voice-id-1
|
||||
# ELEVENLABS_VOICE_2_ID=your-voice-id-2
|
||||
### Only if TEXT_TO_SPEECH_PROVIDER=elevenlabs
|
||||
## ELEVENLABS_API_KEY - Eleven Labs API key (Default: None)
|
||||
# ELEVENLABS_API_KEY=
|
||||
|
||||
## ELEVENLABS_VOICE_ID - Eleven Labs voice ID (Example: None)
|
||||
# ELEVENLABS_VOICE_ID=
|
||||
|
||||
################################################################################
|
||||
### TWITTER API
|
||||
### CHAT MESSAGES
|
||||
################################################################################
|
||||
|
||||
# TW_CONSUMER_KEY=
|
||||
# TW_CONSUMER_SECRET=
|
||||
# TW_ACCESS_TOKEN=
|
||||
# TW_ACCESS_TOKEN_SECRET=
|
||||
|
||||
################################################################################
|
||||
### ALLOWLISTED PLUGINS
|
||||
################################################################################
|
||||
|
||||
#ALLOWLISTED_PLUGINS - Sets the listed plugins that are allowed (Example: plugin1,plugin2,plugin3)
|
||||
#DENYLISTED_PLUGINS - Sets the listed plugins that are not allowed (Example: plugin1,plugin2,plugin3)
|
||||
ALLOWLISTED_PLUGINS=
|
||||
DENYLISTED_PLUGINS=
|
||||
|
||||
################################################################################
|
||||
### CHAT PLUGIN SETTINGS
|
||||
################################################################################
|
||||
# CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
## CHAT_MESSAGES_ENABLED - Enable chat messages (Default: False)
|
||||
# CHAT_MESSAGES_ENABLED=False
|
||||
|
|
|
@ -41,7 +41,7 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
|||
black .
|
||||
isort .
|
||||
mypy
|
||||
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports autogpt tests --in-place
|
||||
autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests --in-place
|
||||
```
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
|
|
@ -1,31 +1,73 @@
|
|||
name: Run Benchmarks
|
||||
name: Benchmarks
|
||||
|
||||
on:
|
||||
schedule:
|
||||
- cron: '0 8 * * *'
|
||||
workflow_dispatch:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
Benchmark:
|
||||
name: ${{ matrix.config.task-name }}
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
env:
|
||||
python-version: '3.10'
|
||||
timeout-minutes: 30
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
config:
|
||||
- python-version: "3.10"
|
||||
task: "tests/challenges"
|
||||
task-name: "Mandatory Tasks"
|
||||
- python-version: "3.10"
|
||||
task: "--beat-challenges -ra tests/challenges"
|
||||
task-name: "Challenging Tasks"
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
|
||||
- name: Set up Python ${{ env.python-version }}
|
||||
uses: actions/setup-python@v4
|
||||
with:
|
||||
python-version: ${{ env.python-version }}
|
||||
ref: master
|
||||
|
||||
- name: Install dependencies
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v2
|
||||
with:
|
||||
python-version: ${{ matrix.config.python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: benchmark
|
||||
env:
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
python benchmark/benchmark_entrepreneur_gpt_with_undecisive_user.py
|
||||
rm -rf tests/Auto-GPT-test-cassettes
|
||||
pytest -n auto --record-mode=all ${{ matrix.config.task }}
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ secrets.PROXY }}
|
||||
AGENT_MODE: ${{ secrets.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
- name: Upload logs as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs-${{ matrix.config.task-name }}
|
||||
path: logs/
|
||||
|
||||
- name: Upload cassettes as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: cassettes-${{ matrix.config.task-name }}
|
||||
path: tests/Auto-GPT-test-cassettes/
|
||||
|
|
|
@ -5,20 +5,20 @@ on:
|
|||
branches: [ master, ci-test* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/integration/challenges/current_score.json'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ stable, master ]
|
||||
branches: [ stable, master, release-* ]
|
||||
pull_request_target:
|
||||
branches: [ master, ci-test* ]
|
||||
branches: [ master, release-*, ci-test* ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') && github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target') }}
|
||||
group: ${{ format('ci-{0}', github.head_ref && format('{0}-{1}', github.event_name, github.event.pull_request.number) || github.sha) }}
|
||||
cancel-in-progress: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
# eliminate duplicate runs on master
|
||||
if: github.event_name == 'push' || github.ref_name != 'master' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
|
@ -37,6 +37,16 @@ jobs:
|
|||
with:
|
||||
python-version: ${{ env.min-python-version }}
|
||||
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
|
@ -59,12 +69,12 @@ jobs:
|
|||
|
||||
- name: Check for unused imports and pass statements
|
||||
run: |
|
||||
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports autogpt tests"
|
||||
cmd="autoflake --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests"
|
||||
$cmd --check || (echo "You have unused imports or pass statements, please run '${cmd} --in-place'" && exit 1)
|
||||
|
||||
test:
|
||||
# eliminate duplicate runs on master
|
||||
if: github.event_name == 'push' || github.ref_name != 'master' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
# eliminate duplicate runs
|
||||
if: github.event_name == 'push' || (github.event.pull_request.head.repo.fork == (github.event_name == 'pull_request_target'))
|
||||
|
||||
permissions:
|
||||
# Gives the action the necessary permissions for publishing new
|
||||
|
@ -81,7 +91,7 @@ jobs:
|
|||
python-version: ["3.10"]
|
||||
|
||||
steps:
|
||||
- name: Check out repository
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
@ -89,8 +99,12 @@ jobs:
|
|||
repository: ${{ github.event.pull_request.head.repo.full_name }}
|
||||
submodules: true
|
||||
|
||||
- id: checkout_cassettes
|
||||
name: Check out cassettes
|
||||
- name: Configure git user Auto-GPT-Bot
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
- name: Checkout cassettes
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
|
@ -102,21 +116,14 @@ jobs:
|
|||
|
||||
git checkout $cassette_branch
|
||||
|
||||
if git merge --no-commit --no-ff ${{ github.event.pull_request.base.ref }}; then
|
||||
echo "Using cassettes from mirror branch, synced to upstream branch '${{ github.event.pull_request.base.ref }}'"
|
||||
# Pick non-conflicting cassette updates from the base branch
|
||||
git merge --no-commit --strategy-option=ours origin/${{ github.event.pull_request.base.ref }}
|
||||
echo "Using cassettes from mirror branch '$cassette_branch'," \
|
||||
"synced to upstream branch '${{ github.event.pull_request.base.ref }}'."
|
||||
else
|
||||
echo "Could not merge upstream changes to cassettes. Using cassettes from ${{ github.event.pull_request.base.ref }}."
|
||||
git merge --abort
|
||||
git checkout ${{ github.event.pull_request.base.ref }}
|
||||
|
||||
# Delete branch to prevent conflict when re-creating it
|
||||
git branch -D $cassette_branch
|
||||
fi
|
||||
echo "cassette_branch=$(git branch --show-current)" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule."\
|
||||
"Using cassettes from ${{ github.event.pull_request.base.ref }}."
|
||||
echo "cassette_branch=${{ github.event.pull_request.base.ref }}" >> $GITHUB_OUTPUT
|
||||
git checkout -b $cassette_branch
|
||||
echo "Branch '$cassette_branch' does not exist in cassette submodule." \
|
||||
"Using cassettes from '${{ github.event.pull_request.base.ref }}'."
|
||||
fi
|
||||
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
|
@ -124,30 +131,41 @@ jobs:
|
|||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
|
||||
- name: Install dependencies
|
||||
- id: get_date
|
||||
name: Get date
|
||||
run: echo "date=$(date +'%Y-%m-%d')" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Set up Python dependency cache
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: ~/.cache/pip
|
||||
key: ${{ runner.os }}-pip-${{ hashFiles('**/requirements.txt') }}-${{ steps.get_date.outputs.date }}
|
||||
|
||||
- name: Install Python dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
|
||||
- name: Run pytest tests with coverage
|
||||
- name: Run pytest with coverage
|
||||
run: |
|
||||
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term
|
||||
python tests/integration/challenges/utils/build_current_score.py
|
||||
pytest -n auto --cov=autogpt --cov-branch --cov-report term-missing --cov-report xml \
|
||||
tests/unit tests/integration tests/challenges
|
||||
python tests/challenges/utils/build_current_score.py
|
||||
env:
|
||||
CI: true
|
||||
PROXY: ${{ secrets.PROXY }}
|
||||
AGENT_MODE: ${{ vars.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ vars.AGENT_TYPE }}
|
||||
AGENT_MODE: ${{ secrets.AGENT_MODE }}
|
||||
AGENT_TYPE: ${{ secrets.AGENT_TYPE }}
|
||||
PLAIN_OUTPUT: True
|
||||
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
|
||||
- id: setup_git_auth
|
||||
name: Set up git token authentication
|
||||
# Cassettes may be pushed even when tests fail
|
||||
if: success() || failure()
|
||||
run: |
|
||||
git config --global user.name "Auto-GPT-Bot"
|
||||
git config --global user.email "github-bot@agpt.co"
|
||||
|
||||
config_key="http.${{ github.server_url }}/.extraheader"
|
||||
base64_pat=$(echo -n "pat:${{ secrets.PAT_REVIEW }}" | base64 -w0)
|
||||
|
||||
|
@ -163,63 +181,44 @@ jobs:
|
|||
- name: Push updated challenge scores
|
||||
if: github.event_name == 'push'
|
||||
run: |
|
||||
score_file="tests/integration/challenges/current_score.json"
|
||||
score_file="tests/challenges/current_score.json"
|
||||
|
||||
if ! git diff --quiet $score_file; then
|
||||
git add $score_file
|
||||
git commit -m "Update challenge scores"
|
||||
git push origin HEAD:${{ github.ref }}
|
||||
git push origin HEAD:${{ github.ref_name }}
|
||||
else
|
||||
echo "The challenge scores didn't change."
|
||||
fi
|
||||
|
||||
- id: push_cassettes
|
||||
name: Push updated cassettes
|
||||
# For pull requests, push updated cassettes even when tests fail
|
||||
if: github.event_name == 'push' || success() || failure()
|
||||
run: |
|
||||
if [ "${{ startsWith(github.event_name, 'pull_request') }}" = "true" ]; then
|
||||
is_pull_request=true
|
||||
cassette_branch="${{ github.event.pull_request.user.login }}-${{ github.event.pull_request.head.ref }}"
|
||||
cassette_source_branch="${{ steps.checkout_cassettes.outputs.cassette_branch }}"
|
||||
base_branch="${{ github.event.pull_request.base.ref }}"
|
||||
else
|
||||
current_branch=$(echo ${{ github.ref }} | sed -e "s/refs\/heads\///g")
|
||||
cassette_branch=$current_branch
|
||||
cassette_branch="${{ github.ref_name }}"
|
||||
fi
|
||||
|
||||
cd tests/Auto-GPT-test-cassettes
|
||||
git fetch origin $cassette_source_branch:$cassette_source_branch
|
||||
|
||||
# Commit & push changes to cassettes if any
|
||||
if ! git diff --quiet $cassette_source_branch --; then
|
||||
if [ "$cassette_branch" != "$cassette_source_branch" ]; then
|
||||
git checkout -b $cassette_branch
|
||||
fi
|
||||
if ! git diff --quiet; then
|
||||
git add .
|
||||
git commit -m "Auto-update cassettes"
|
||||
|
||||
if [ $is_pull_request ]; then
|
||||
git push --force origin HEAD:$cassette_branch
|
||||
else
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
|
||||
if [ ! $is_pull_request ]; then
|
||||
cd ../..
|
||||
if [ $is_pull_request ]; then
|
||||
git fetch origin $base_branch
|
||||
cassette_diff=$(git diff origin/$base_branch)
|
||||
else
|
||||
git add tests/Auto-GPT-test-cassettes
|
||||
git commit -m "Update cassette submodule"
|
||||
git push origin HEAD:$current_branch
|
||||
git push origin HEAD:$cassette_branch
|
||||
fi
|
||||
else
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
if [ -n "$cassette_diff" ]; then
|
||||
echo "updated=true" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "updated=false" >> $GITHUB_OUTPUT
|
||||
echo "No cassette changes to commit"
|
||||
fi
|
||||
|
||||
- name: Post Set up git token auth
|
||||
|
@ -228,7 +227,7 @@ jobs:
|
|||
git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
git submodule foreach git config --unset-all '${{ steps.setup_git_auth.outputs.config_key }}'
|
||||
|
||||
- name: Apply or remove behaviour change label and comment on PR
|
||||
- name: Apply "behaviour change" label and comment on PR
|
||||
if: ${{ startsWith(github.event_name, 'pull_request') }}
|
||||
run: |
|
||||
PR_NUMBER=${{ github.event.pull_request.number }}
|
||||
|
@ -245,10 +244,11 @@ jobs:
|
|||
|
||||
echo $TOKEN | gh auth login --with-token
|
||||
gh api repos/$REPO/issues/$PR_NUMBER/comments -X POST -F body="You changed AutoGPT's behaviour. The cassettes have been updated and will be merged to the submodule when this Pull Request gets merged."
|
||||
else
|
||||
echo "Removing label..."
|
||||
curl -X DELETE \
|
||||
-H "Authorization: Bearer $TOKEN" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/$REPO/issues/$PR_NUMBER/labels/behaviour%20change
|
||||
fi
|
||||
|
||||
- name: Upload logs as artifact
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v3
|
||||
with:
|
||||
name: test-logs
|
||||
path: logs/
|
||||
|
|
|
@ -5,9 +5,9 @@ on:
|
|||
branches: [ master ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/integration/challenges/current_score.json'
|
||||
- 'tests/challenges/current_score.json'
|
||||
pull_request:
|
||||
branches: [ master, stable ]
|
||||
branches: [ master, release-*, stable ]
|
||||
|
||||
concurrency:
|
||||
group: ${{ format('docker-ci-{0}', github.head_ref && format('pr-{0}', github.event.pull_request.number) || github.sha) }}
|
||||
|
@ -102,13 +102,15 @@ jobs:
|
|||
- id: test
|
||||
name: Run tests
|
||||
env:
|
||||
PLAIN_OUTPUT: True
|
||||
CI: true
|
||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||
run: |
|
||||
set +e
|
||||
test_output=$(
|
||||
docker run --env CI --env OPENAI_API_KEY --entrypoint python ${{ env.IMAGE_NAME }} -m \
|
||||
pytest -n auto --cov=autogpt --cov-report term-missing --cov-branch --cov-report xml --cov-report term 2>&1
|
||||
pytest -n auto --cov=autogpt --cov-branch --cov-report term-missing \
|
||||
tests/unit tests/integration 2>&1
|
||||
)
|
||||
test_failure=$?
|
||||
|
||||
|
@ -120,3 +122,5 @@ jobs:
|
|||
$test_output
|
||||
\`\`\`
|
||||
$EOF
|
||||
|
||||
exit $test_failure
|
||||
|
|
|
@ -3,10 +3,10 @@ name: "Pull Request auto-label"
|
|||
on:
|
||||
# So that PRs touching the same files as the push are updated
|
||||
push:
|
||||
branches: [ master ]
|
||||
branches: [ master, release-* ]
|
||||
paths-ignore:
|
||||
- 'tests/Auto-GPT-test-cassettes'
|
||||
- 'tests/integration/challenges/current_score.json'
|
||||
- 'tests/challenges/current_score.json'
|
||||
# So that the `dirtyLabel` is removed if conflicts are resolve
|
||||
# We recommend `pull_request_target` so that github secrets are available.
|
||||
# In `pull_request` we wouldn't be able to change labels of fork PRs
|
||||
|
@ -48,11 +48,10 @@ jobs:
|
|||
s_label: 'size/s'
|
||||
s_max_size: 10
|
||||
m_label: 'size/m'
|
||||
m_max_size: 50
|
||||
m_max_size: 100
|
||||
l_label: 'size/l'
|
||||
l_max_size: 200
|
||||
l_max_size: 500
|
||||
xl_label: 'size/xl'
|
||||
message_if_xl: >
|
||||
This PR exceeds the recommended size of 200 lines.
|
||||
This PR exceeds the recommended size of 500 lines.
|
||||
Please make sure you are NOT addressing multiple issues with one PR.
|
||||
Note this PR might be rejected due to its size
|
||||
|
|
|
@ -1,12 +1,7 @@
|
|||
## Original ignores
|
||||
autogpt/keys.py
|
||||
autogpt/*json
|
||||
autogpt/node_modules/
|
||||
autogpt/__pycache__/keys.cpython-310.pyc
|
||||
autogpt/auto_gpt_workspace
|
||||
package-lock.json
|
||||
*.pyc
|
||||
auto_gpt_workspace/*
|
||||
autogpt/*.json
|
||||
**/auto_gpt_workspace/*
|
||||
*.mpeg
|
||||
.env
|
||||
azure.yaml
|
||||
|
@ -37,6 +32,7 @@ build/
|
|||
develop-eggs/
|
||||
dist/
|
||||
plugins/
|
||||
plugins_config.yaml
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
|
|
|
@ -31,7 +31,7 @@ repos:
|
|||
hooks:
|
||||
- id: autoflake
|
||||
name: autoflake
|
||||
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports autogpt tests
|
||||
entry: autoflake --in-place --remove-all-unused-imports --recursive --ignore-init-module-imports --ignore-pass-after-docstring autogpt tests
|
||||
language: python
|
||||
types: [ python ]
|
||||
- id: pytest-check
|
||||
|
|
52
BULLETIN.md
52
BULLETIN.md
|
@ -3,45 +3,25 @@ Check out *https://agpt.co*, the official news & updates site for Auto-GPT!
|
|||
The documentation also has a place here, at *https://docs.agpt.co*
|
||||
|
||||
# For contributors 👷🏼
|
||||
Since releasing v0.3.0, we are working on re-architecting the Auto-GPT core to make
|
||||
it more extensible and to make room for structural performance-oriented R&D.
|
||||
In the meantime, we have less time to process incoming pull requests and issues,
|
||||
so we focus on high-value contributions:
|
||||
* significant bugfixes
|
||||
* *major* improvements to existing functionality and/or docs (so no single-typo fixes)
|
||||
* contributions that help us with re-architecture and other roadmapped items
|
||||
We have to be somewhat selective in order to keep making progress, but this does not
|
||||
mean you can't contribute. Check out the contribution guide on our wiki:
|
||||
Since releasing v0.3.0, whave been working on re-architecting the Auto-GPT core to make it more extensible and make room for structural performance-oriented R&D.
|
||||
|
||||
Check out the contribution guide on our wiki:
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/wiki/Contributing
|
||||
|
||||
# 🚀 v0.4.0 Release 🚀
|
||||
Two weeks and 76 pull requests have passed since v0.3.1, and we are happy to announce
|
||||
the release of v0.4.0!
|
||||
# 🚀 v0.4.1 Release 🚀
|
||||
Two weeks and 50+ pull requests have passed since v0.4.0, and we are happy to announce the release of v0.4.1!
|
||||
|
||||
Highlights and notable changes since v0.3.0:
|
||||
|
||||
## ⚠️ Command `send_tweet` is REMOVED
|
||||
Twitter functionality (and more) is now covered by plugins.
|
||||
|
||||
## ⚠️ Memory backend deprecation 💾
|
||||
The Milvus, Pinecone and Weaviate memory backends were rendered incompatible
|
||||
by work on the memory system, and have been removed in `master`. The Redis
|
||||
memory store was also temporarily removed; we will merge a new implementation ASAP.
|
||||
Whether built-in support for the others will be added back in the future is subject to
|
||||
discussion, feel free to pitch in: https://github.com/Significant-Gravitas/Auto-GPT/discussions/4280
|
||||
|
||||
## Document support in `read_file` 📄
|
||||
Auto-GPT can now read text from document files, with support added for PDF, DOCX, CSV,
|
||||
HTML, TeX and more!
|
||||
|
||||
## Managing Auto-GPT's access to commands ❌🔧
|
||||
You can now disable set of built-in commands through the *DISABLED_COMMAND_CATEGORIES*
|
||||
variable in .env. Specific shell commands can also be disabled using *DENY_COMMANDS*,
|
||||
or selectively enabled using *ALLOW_COMMANDS*.
|
||||
Highlights and notable changes since v0.4.0:
|
||||
- The .env.template is more readable and better explains the purpose of each environment variable.
|
||||
- More dependable search
|
||||
- The CUSTOM_SEARCH_ENGINE_ID variable has been replaced to GOOGLE_CUSTOM_SEARCH_ENGINE_ID, make sure you update it.
|
||||
- Better read_file
|
||||
- More reliable python code execution
|
||||
- Lots of JSON error fixes
|
||||
- Directory-based plugins
|
||||
|
||||
## Further fixes and changes 🛠️
|
||||
Other highlights include improvements to self-feedback mode and continuous mode,
|
||||
documentation, docker and devcontainer setups, and much more. Most of the improvements
|
||||
that were made are not yet visible to users, but will pay off in the long term.
|
||||
Take a look at the Release Notes on Github for the full changelog!
|
||||
Under the hood, we've done a bunch of work improving architectures and streamlining code. Most of that won't be user-visible
|
||||
|
||||
## Take a look at the Release Notes on Github for the full changelog!
|
||||
https://github.com/Significant-Gravitas/Auto-GPT/releases
|
||||
|
|
|
@ -6,11 +6,13 @@ FROM python:3.10-slim AS autogpt-base
|
|||
|
||||
# Install browsers
|
||||
RUN apt-get update && apt-get install -y \
|
||||
chromium-driver firefox-esr \
|
||||
ca-certificates
|
||||
chromium-driver firefox-esr ca-certificates \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install utilities
|
||||
RUN apt-get install -y curl jq wget git
|
||||
RUN apt-get update && apt-get install -y \
|
||||
curl jq wget git \
|
||||
&& apt-get clean && rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
|
@ -38,6 +40,7 @@ WORKDIR /app
|
|||
ONBUILD COPY autogpt/ ./autogpt
|
||||
ONBUILD COPY scripts/ ./scripts
|
||||
ONBUILD COPY plugins/ ./plugins
|
||||
ONBUILD COPY prompt_settings.yaml ./prompt_settings.yaml
|
||||
ONBUILD RUN mkdir ./data
|
||||
|
||||
FROM autogpt-${BUILD_TYPE} AS auto-gpt
|
||||
|
|
|
@ -1,17 +1,17 @@
|
|||
import json
|
||||
import signal
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
from colorama import Fore, Style
|
||||
|
||||
from autogpt.app import execute_command, get_command
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.json_utils.json_fix_llm import fix_json_using_multiple_techniques
|
||||
from autogpt.json_utils.utilities import LLM_DEFAULT_RESPONSE_FORMAT, validate_json
|
||||
from autogpt.json_utils.utilities import extract_json_from_response, validate_json
|
||||
from autogpt.llm.base import ChatSequence
|
||||
from autogpt.llm.chat import chat_with_ai, create_chat_completion
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
|
||||
from autogpt.llm.utils import count_string_tokens
|
||||
from autogpt.log_cycle.log_cycle import (
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
|
@ -44,7 +44,7 @@ class Agent:
|
|||
|
||||
triggering_prompt: The last sentence the AI will see before answering.
|
||||
For Auto-GPT, this prompt is:
|
||||
Determine which next command to use, and respond using the format specified
|
||||
Determine exactly one command to use, and respond using the format specified
|
||||
above:
|
||||
The triggering prompt is not part of the system prompt because between the
|
||||
system prompt and the triggering
|
||||
|
@ -64,28 +64,34 @@ class Agent:
|
|||
memory: VectorMemory,
|
||||
next_action_count: int,
|
||||
command_registry: CommandRegistry,
|
||||
config: AIConfig,
|
||||
ai_config: AIConfig,
|
||||
system_prompt: str,
|
||||
triggering_prompt: str,
|
||||
workspace_directory: str,
|
||||
config: Config,
|
||||
):
|
||||
cfg = Config()
|
||||
self.ai_name = ai_name
|
||||
self.memory = memory
|
||||
self.history = MessageHistory(self)
|
||||
self.next_action_count = next_action_count
|
||||
self.command_registry = command_registry
|
||||
self.config = config
|
||||
self.ai_config = ai_config
|
||||
self.system_prompt = system_prompt
|
||||
self.triggering_prompt = triggering_prompt
|
||||
self.workspace = Workspace(workspace_directory, cfg.restrict_to_workspace)
|
||||
self.workspace = Workspace(workspace_directory, config.restrict_to_workspace)
|
||||
self.created_at = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
self.cycle_count = 0
|
||||
self.log_cycle_handler = LogCycleHandler()
|
||||
self.fast_token_limit = OPEN_AI_CHAT_MODELS.get(
|
||||
config.fast_llm_model
|
||||
).max_tokens
|
||||
|
||||
def start_interaction_loop(self):
|
||||
# Avoid circular imports
|
||||
from autogpt.app import execute_command, get_command
|
||||
|
||||
# Interaction Loop
|
||||
cfg = Config()
|
||||
self.cycle_count = 0
|
||||
command_name = None
|
||||
arguments = None
|
||||
|
@ -110,48 +116,55 @@ class Agent:
|
|||
self.cycle_count += 1
|
||||
self.log_cycle_handler.log_count_within_cycle = 0
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
[m.raw() for m in self.history],
|
||||
FULL_MESSAGE_HISTORY_FILE_NAME,
|
||||
)
|
||||
if (
|
||||
cfg.continuous_mode
|
||||
and cfg.continuous_limit > 0
|
||||
and self.cycle_count > cfg.continuous_limit
|
||||
self.config.continuous_mode
|
||||
and self.config.continuous_limit > 0
|
||||
and self.cycle_count > self.config.continuous_limit
|
||||
):
|
||||
logger.typewriter_log(
|
||||
"Continuous Limit Reached: ", Fore.YELLOW, f"{cfg.continuous_limit}"
|
||||
"Continuous Limit Reached: ",
|
||||
Fore.YELLOW,
|
||||
f"{self.config.continuous_limit}",
|
||||
)
|
||||
break
|
||||
# Send message to AI, get response
|
||||
with Spinner("Thinking... ", plain_output=cfg.plain_output):
|
||||
with Spinner("Thinking... ", plain_output=self.config.plain_output):
|
||||
assistant_reply = chat_with_ai(
|
||||
cfg,
|
||||
self.config,
|
||||
self,
|
||||
self.system_prompt,
|
||||
self.triggering_prompt,
|
||||
cfg.fast_token_limit,
|
||||
cfg.fast_llm_model,
|
||||
self.fast_token_limit,
|
||||
self.config.fast_llm_model,
|
||||
)
|
||||
|
||||
assistant_reply_json = fix_json_using_multiple_techniques(assistant_reply)
|
||||
for plugin in cfg.plugins:
|
||||
try:
|
||||
assistant_reply_json = extract_json_from_response(assistant_reply)
|
||||
validate_json(assistant_reply_json)
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Exception while validating assistant reply JSON: {e}")
|
||||
assistant_reply_json = {}
|
||||
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_planning():
|
||||
continue
|
||||
assistant_reply_json = plugin.post_planning(assistant_reply_json)
|
||||
|
||||
# Print Assistant thoughts
|
||||
if assistant_reply_json != {}:
|
||||
validate_json(assistant_reply_json, LLM_DEFAULT_RESPONSE_FORMAT)
|
||||
# Get command name and arguments
|
||||
try:
|
||||
print_assistant_thoughts(
|
||||
self.ai_name, assistant_reply_json, cfg.speak_mode
|
||||
self.ai_name, assistant_reply_json, self.config.speak_mode
|
||||
)
|
||||
command_name, arguments = get_command(assistant_reply_json)
|
||||
if cfg.speak_mode:
|
||||
if self.config.speak_mode:
|
||||
say_text(f"I want to execute {command_name}")
|
||||
|
||||
arguments = self._resolve_pathlike_command_args(arguments)
|
||||
|
@ -159,13 +172,15 @@ class Agent:
|
|||
except Exception as e:
|
||||
logger.error("Error: \n", str(e))
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
assistant_reply_json,
|
||||
NEXT_ACTION_FILE_NAME,
|
||||
)
|
||||
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
logger.typewriter_log(
|
||||
"NEXT ACTION: ",
|
||||
Fore.CYAN,
|
||||
|
@ -173,7 +188,7 @@ class Agent:
|
|||
f"ARGUMENTS = {Fore.CYAN}{arguments}{Style.RESET_ALL}",
|
||||
)
|
||||
|
||||
if not cfg.continuous_mode and self.next_action_count == 0:
|
||||
if not self.config.continuous_mode and self.next_action_count == 0:
|
||||
# ### GET USER AUTHORIZATION TO EXECUTE COMMAND ###
|
||||
# Get key press: Prompt the user to press enter to continue or escape
|
||||
# to exit
|
||||
|
@ -184,13 +199,13 @@ class Agent:
|
|||
f"{self.ai_name}..."
|
||||
)
|
||||
while True:
|
||||
if cfg.chat_messages_enabled:
|
||||
if self.config.chat_messages_enabled:
|
||||
console_input = clean_input("Waiting for your response...")
|
||||
else:
|
||||
console_input = clean_input(
|
||||
Fore.MAGENTA + "Input:" + Style.RESET_ALL
|
||||
)
|
||||
if console_input.lower().strip() == cfg.authorise_key:
|
||||
if console_input.lower().strip() == self.config.authorise_key:
|
||||
user_input = "GENERATE NEXT COMMAND JSON"
|
||||
break
|
||||
elif console_input.lower().strip() == "s":
|
||||
|
@ -201,7 +216,7 @@ class Agent:
|
|||
)
|
||||
thoughts = assistant_reply_json.get("thoughts", {})
|
||||
self_feedback_resp = self.get_self_feedback(
|
||||
thoughts, cfg.fast_llm_model
|
||||
thoughts, self.config.fast_llm_model
|
||||
)
|
||||
logger.typewriter_log(
|
||||
f"SELF FEEDBACK: {self_feedback_resp}",
|
||||
|
@ -214,7 +229,9 @@ class Agent:
|
|||
elif console_input.lower().strip() == "":
|
||||
logger.warn("Invalid input format.")
|
||||
continue
|
||||
elif console_input.lower().startswith(f"{cfg.authorise_key} -"):
|
||||
elif console_input.lower().startswith(
|
||||
f"{self.config.authorise_key} -"
|
||||
):
|
||||
try:
|
||||
self.next_action_count = abs(
|
||||
int(console_input.split(" ")[1])
|
||||
|
@ -227,14 +244,14 @@ class Agent:
|
|||
)
|
||||
continue
|
||||
break
|
||||
elif console_input.lower() == cfg.exit_key:
|
||||
elif console_input.lower() == self.config.exit_key:
|
||||
user_input = "EXIT"
|
||||
break
|
||||
else:
|
||||
user_input = console_input
|
||||
command_name = "human_feedback"
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
user_input,
|
||||
|
@ -252,6 +269,8 @@ class Agent:
|
|||
logger.info("Exiting...")
|
||||
break
|
||||
else:
|
||||
# First log new-line so user can differentiate sections better in console
|
||||
logger.typewriter_log("\n")
|
||||
# Print authorized commands left value
|
||||
logger.typewriter_log(
|
||||
f"{Fore.CYAN}AUTHORISED COMMANDS LEFT: {Style.RESET_ALL}{self.next_action_count}"
|
||||
|
@ -265,32 +284,30 @@ class Agent:
|
|||
elif command_name == "self_feedback":
|
||||
result = f"Self feedback: {user_input}"
|
||||
else:
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_pre_command():
|
||||
continue
|
||||
command_name, arguments = plugin.pre_command(
|
||||
command_name, arguments
|
||||
)
|
||||
command_result = execute_command(
|
||||
self.command_registry,
|
||||
command_name,
|
||||
arguments,
|
||||
self.config.prompt_generator,
|
||||
config=cfg,
|
||||
command_name=command_name,
|
||||
arguments=arguments,
|
||||
agent=self,
|
||||
)
|
||||
result = f"Command {command_name} returned: " f"{command_result}"
|
||||
|
||||
result_tlength = count_string_tokens(
|
||||
str(command_result), cfg.fast_llm_model
|
||||
str(command_result), self.config.fast_llm_model
|
||||
)
|
||||
memory_tlength = count_string_tokens(
|
||||
str(self.history.summary_message()), cfg.fast_llm_model
|
||||
str(self.history.summary_message()), self.config.fast_llm_model
|
||||
)
|
||||
if result_tlength + memory_tlength + 600 > cfg.fast_token_limit:
|
||||
if result_tlength + memory_tlength + 600 > self.fast_token_limit:
|
||||
result = f"Failure: command {command_name} returned too much output. \
|
||||
Do not execute this command again with the same arguments."
|
||||
|
||||
for plugin in cfg.plugins:
|
||||
for plugin in self.config.plugins:
|
||||
if not plugin.can_handle_post_command():
|
||||
continue
|
||||
result = plugin.post_command(command_name, result)
|
||||
|
@ -331,7 +348,7 @@ class Agent:
|
|||
Returns:
|
||||
str: A feedback response generated using the provided thoughts dictionary.
|
||||
"""
|
||||
ai_role = self.config.ai_role
|
||||
ai_role = self.ai_config.ai_role
|
||||
|
||||
feedback_prompt = f"Below is a message from me, an AI Agent, assuming the role of {ai_role}. whilst keeping knowledge of my slight limitations as an AI Agent Please evaluate my thought process, reasoning, and plan, and provide a concise paragraph outlining potential improvements. Consider adding or removing ideas that do not align with my role and explaining why, prioritizing thoughts based on their significance, or simply refining my overall thought process."
|
||||
reasoning = thoughts.get("reasoning", "")
|
||||
|
@ -343,7 +360,7 @@ class Agent:
|
|||
prompt.add("user", feedback_prompt + feedback_thoughts)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
prompt.raw(),
|
||||
|
@ -353,7 +370,7 @@ class Agent:
|
|||
feedback = create_chat_completion(prompt)
|
||||
|
||||
self.log_cycle_handler.log_cycle(
|
||||
self.config.ai_name,
|
||||
self.ai_config.ai_name,
|
||||
self.created_at,
|
||||
self.cycle_count,
|
||||
feedback,
|
||||
|
|
|
@ -2,12 +2,11 @@
|
|||
import json
|
||||
from typing import Dict, List, Union
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.agent.agent_manager import AgentManager
|
||||
from autogpt.commands.command import CommandRegistry, command
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.commands.web_requests import scrape_links, scrape_text
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.text import summarize_text
|
||||
from autogpt.prompts.generator import PromptGenerator
|
||||
from autogpt.speech import say_text
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
|
@ -85,27 +84,26 @@ def map_command_synonyms(command_name: str):
|
|||
|
||||
|
||||
def execute_command(
|
||||
command_registry: CommandRegistry,
|
||||
command_name: str,
|
||||
arguments,
|
||||
prompt: PromptGenerator,
|
||||
config: Config,
|
||||
arguments: dict[str, str],
|
||||
agent: Agent,
|
||||
):
|
||||
"""Execute the command and return the result
|
||||
|
||||
Args:
|
||||
command_name (str): The name of the command to execute
|
||||
arguments (dict): The arguments for the command
|
||||
agent (Agent): The agent that is executing the command
|
||||
|
||||
Returns:
|
||||
str: The result of the command
|
||||
"""
|
||||
try:
|
||||
cmd = command_registry.commands.get(command_name)
|
||||
cmd = agent.command_registry.commands.get(command_name)
|
||||
|
||||
# If the command is found, call it with the provided arguments
|
||||
if cmd:
|
||||
return cmd(**arguments, config=config)
|
||||
return cmd(**arguments, agent=agent)
|
||||
|
||||
# TODO: Remove commands below after they are moved to the command registry.
|
||||
command_name = map_command_synonyms(command_name.lower())
|
||||
|
@ -113,7 +111,7 @@ def execute_command(
|
|||
# TODO: Change these to take in a file rather than pasted code, if
|
||||
# non-file is given, return instructions "Input should be a python
|
||||
# filepath, write your code to file and try again
|
||||
for command in prompt.commands:
|
||||
for command in agent.ai_config.prompt_generator.commands:
|
||||
if (
|
||||
command_name == command["label"].lower()
|
||||
or command_name == command["name"].lower()
|
||||
|
@ -132,7 +130,7 @@ def execute_command(
|
|||
"get_text_summary", "Get text summary", '"url": "<url>", "question": "<question>"'
|
||||
)
|
||||
@validate_url
|
||||
def get_text_summary(url: str, question: str, config: Config) -> str:
|
||||
def get_text_summary(url: str, question: str, agent: Agent) -> str:
|
||||
"""Get the text summary of a webpage
|
||||
|
||||
Args:
|
||||
|
@ -142,7 +140,7 @@ def get_text_summary(url: str, question: str, config: Config) -> str:
|
|||
Returns:
|
||||
str: The summary of the text
|
||||
"""
|
||||
text = scrape_text(url)
|
||||
text = scrape_text(url, agent)
|
||||
summary, _ = summarize_text(text, question=question)
|
||||
|
||||
return f""" "Result" : {summary}"""
|
||||
|
@ -150,7 +148,7 @@ def get_text_summary(url: str, question: str, config: Config) -> str:
|
|||
|
||||
@command("get_hyperlinks", "Get hyperlinks", '"url": "<url>"')
|
||||
@validate_url
|
||||
def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
||||
def get_hyperlinks(url: str, agent: Agent) -> Union[str, List[str]]:
|
||||
"""Get all hyperlinks on a webpage
|
||||
|
||||
Args:
|
||||
|
@ -159,7 +157,7 @@ def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
|||
Returns:
|
||||
str or list: The hyperlinks on the page
|
||||
"""
|
||||
return scrape_links(url, config)
|
||||
return scrape_links(url, agent)
|
||||
|
||||
|
||||
@command(
|
||||
|
@ -167,7 +165,7 @@ def get_hyperlinks(url: str, config: Config) -> Union[str, List[str]]:
|
|||
"Start GPT Agent",
|
||||
'"name": "<name>", "task": "<short_task_desc>", "prompt": "<prompt>"',
|
||||
)
|
||||
def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -> str:
|
||||
def start_agent(name: str, task: str, prompt: str, agent: Agent, model=None) -> str:
|
||||
"""Start an agent with a given name, task, and prompt
|
||||
|
||||
Args:
|
||||
|
@ -188,11 +186,11 @@ def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -
|
|||
agent_intro = f"{voice_name} here, Reporting for duty!"
|
||||
|
||||
# Create agent
|
||||
if config.speak_mode:
|
||||
if agent.config.speak_mode:
|
||||
say_text(agent_intro, 1)
|
||||
key, ack = agent_manager.create_agent(task, first_message, model)
|
||||
|
||||
if config.speak_mode:
|
||||
if agent.config.speak_mode:
|
||||
say_text(f"Hello {voice_name}. Your task is as follows. {task}.")
|
||||
|
||||
# Assign task (prompt), get response
|
||||
|
@ -202,7 +200,7 @@ def start_agent(name: str, task: str, prompt: str, config: Config, model=None) -
|
|||
|
||||
|
||||
@command("message_agent", "Message GPT Agent", '"key": "<key>", "message": "<message>"')
|
||||
def message_agent(key: str, message: str, config: Config) -> str:
|
||||
def message_agent(key: str, message: str, agent: Agent) -> str:
|
||||
"""Message an agent with a given key and message"""
|
||||
# Check if the key is a valid integer
|
||||
if is_valid_int(key):
|
||||
|
@ -211,13 +209,13 @@ def message_agent(key: str, message: str, config: Config) -> str:
|
|||
return "Invalid key, must be an integer."
|
||||
|
||||
# Speak response
|
||||
if config.speak_mode:
|
||||
if agent.config.speak_mode:
|
||||
say_text(agent_response, 1)
|
||||
return agent_response
|
||||
|
||||
|
||||
@command("list_agents", "List GPT Agents", "() -> str")
|
||||
def list_agents(config: Config) -> str:
|
||||
def list_agents(agent: Agent) -> str:
|
||||
"""List all agents
|
||||
|
||||
Returns:
|
||||
|
@ -229,7 +227,7 @@ def list_agents(config: Config) -> str:
|
|||
|
||||
|
||||
@command("delete_agent", "Delete GPT Agent", '"key": "<key>"')
|
||||
def delete_agent(key: str, config: Config) -> str:
|
||||
def delete_agent(key: str, agent: Agent) -> str:
|
||||
"""Delete an agent with a given key
|
||||
|
||||
Args:
|
||||
|
|
|
@ -1,21 +1,17 @@
|
|||
"""Code evaluation module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"analyze_code",
|
||||
"Analyze Code",
|
||||
'"code": "<full_code_string>"',
|
||||
)
|
||||
def analyze_code(code: str, config: Config) -> list[str]:
|
||||
def analyze_code(code: str, agent: Agent) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
@ -33,4 +29,6 @@ def analyze_code(code: str, config: Config) -> list[str]:
|
|||
"Analyzes the given code and returns a list of suggestions for improvements."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(
|
||||
function_string, args, description_string, config=agent.config
|
||||
)
|
||||
|
|
|
@ -1,14 +1,10 @@
|
|||
"""Commands for converting audio to text."""
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
|
@ -19,7 +15,7 @@ if TYPE_CHECKING:
|
|||
and config.huggingface_api_token,
|
||||
"Configure huggingface_audio_to_text_model and Hugging Face api token.",
|
||||
)
|
||||
def read_audio_from_file(filename: str, config: Config) -> str:
|
||||
def read_audio_from_file(filename: str, agent: Agent) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
|
@ -31,10 +27,10 @@ def read_audio_from_file(filename: str, config: Config) -> str:
|
|||
"""
|
||||
with open(filename, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio, config)
|
||||
return read_audio(audio, agent.config)
|
||||
|
||||
|
||||
def read_audio(audio: bytes, config: Config) -> str:
|
||||
def read_audio(audio: bytes, agent: Agent) -> str:
|
||||
"""
|
||||
Convert audio to text.
|
||||
|
||||
|
@ -44,9 +40,20 @@ def read_audio(audio: bytes, config: Config) -> str:
|
|||
Returns:
|
||||
str: The text from the audio
|
||||
"""
|
||||
model = config.huggingface_audio_to_text_model
|
||||
if agent.config.audio_to_text_provider == "huggingface":
|
||||
text = read_huggingface_audio(audio, agent.config)
|
||||
if text:
|
||||
return f"The audio says: {text}"
|
||||
else:
|
||||
return f"Error, couldn't convert audio to text"
|
||||
|
||||
return "Error: No audio to text provider given"
|
||||
|
||||
|
||||
def read_huggingface_audio(audio: bytes, agent: Agent) -> str:
|
||||
model = agent.config.huggingface_audio_to_text_model
|
||||
api_url = f"https://api-inference.huggingface.co/models/{model}"
|
||||
api_token = config.huggingface_api_token
|
||||
api_token = agent.config.huggingface_api_token
|
||||
headers = {"Authorization": f"Bearer {api_token}"}
|
||||
|
||||
if api_token is None:
|
||||
|
@ -60,5 +67,5 @@ def read_audio(audio: bytes, config: Config) -> str:
|
|||
data=audio,
|
||||
)
|
||||
|
||||
text = json.loads(response.content.decode("utf-8"))["text"]
|
||||
return f"The audio says: {text}"
|
||||
response_json = json.loads(response.content.decode("utf-8"))
|
||||
return response_json.get("text")
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import functools
|
||||
import importlib
|
||||
import inspect
|
||||
from inspect import Parameter
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
|
@ -175,3 +176,32 @@ def command(
|
|||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def ignore_unexpected_kwargs(func: Callable[..., Any]) -> Callable[..., Any]:
|
||||
def filter_kwargs(kwargs: dict) -> dict:
|
||||
sig = inspect.signature(func)
|
||||
# Parameter.VAR_KEYWORD - a dict of keyword arguments that aren't bound to any other
|
||||
if any(map(lambda p: p.kind == Parameter.VAR_KEYWORD, sig.parameters.values())):
|
||||
# if **kwargs exist, return directly
|
||||
return kwargs
|
||||
|
||||
_params = list(
|
||||
filter(
|
||||
lambda p: p.kind
|
||||
in {Parameter.KEYWORD_ONLY, Parameter.POSITIONAL_OR_KEYWORD},
|
||||
sig.parameters.values(),
|
||||
)
|
||||
)
|
||||
|
||||
res_kwargs = {
|
||||
param.name: kwargs[param.name] for param in _params if param.name in kwargs
|
||||
}
|
||||
return res_kwargs
|
||||
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs) -> Any:
|
||||
kwargs = filter_kwargs(kwargs)
|
||||
return func(*args, **kwargs)
|
||||
|
||||
return wrapper
|
||||
|
|
|
@ -6,13 +6,53 @@ from pathlib import Path
|
|||
import docker
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.setup import CFG
|
||||
from autogpt.workspace.workspace import Workspace
|
||||
|
||||
ALLOWLIST_CONTROL = "allowlist"
|
||||
DENYLIST_CONTROL = "denylist"
|
||||
|
||||
|
||||
@command(
|
||||
"execute_python_code",
|
||||
"Create a Python file and execute it",
|
||||
'"code": "<code>", "basename": "<basename>"',
|
||||
)
|
||||
def execute_python_code(code: str, basename: str, agent: Agent) -> str:
|
||||
"""Create and execute a Python file in a Docker container and return the STDOUT of the
|
||||
executed code. If there is any data that needs to be captured use a print statement
|
||||
|
||||
Args:
|
||||
code (str): The Python code to run
|
||||
basename (str): A name to be given to the Python file
|
||||
|
||||
Returns:
|
||||
str: The STDOUT captured from the code when it ran
|
||||
"""
|
||||
ai_name = agent.ai_name
|
||||
directory = os.path.join(agent.config.workspace_path, ai_name, "executed_code")
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
|
||||
if not basename.endswith(".py"):
|
||||
basename = basename + ".py"
|
||||
|
||||
path = os.path.join(directory, basename)
|
||||
|
||||
try:
|
||||
with open(path, "w+", encoding="utf-8") as f:
|
||||
f.write(code)
|
||||
|
||||
return execute_python_file(f.name, agent)
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
@command("execute_python_file", "Execute Python File", '"filename": "<filename>"')
|
||||
def execute_python_file(filename: str, config: Config) -> str:
|
||||
def execute_python_file(filename: str, agent: Agent) -> str:
|
||||
"""Execute a Python file in a Docker container and return the output
|
||||
|
||||
Args:
|
||||
|
@ -21,17 +61,30 @@ def execute_python_file(filename: str, config: Config) -> str:
|
|||
Returns:
|
||||
str: The output of the file
|
||||
"""
|
||||
logger.info(f"Executing file '{filename}'")
|
||||
logger.info(
|
||||
f"Executing python file '{filename}' in working directory '{CFG.workspace_path}'"
|
||||
)
|
||||
|
||||
if not filename.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
if not os.path.isfile(filename):
|
||||
return f"Error: File '{filename}' does not exist."
|
||||
workspace = Workspace(
|
||||
agent.config.workspace_path, agent.config.restrict_to_workspace
|
||||
)
|
||||
|
||||
path = workspace.get_path(filename)
|
||||
if not path.is_file():
|
||||
# Mimic the response that you get from the command line so that it's easier to identify
|
||||
return (
|
||||
f"python: can't open file '{filename}': [Errno 2] No such file or directory"
|
||||
)
|
||||
|
||||
if we_are_running_in_a_docker_container():
|
||||
result = subprocess.run(
|
||||
["python", filename], capture_output=True, encoding="utf8"
|
||||
["python", str(path)],
|
||||
capture_output=True,
|
||||
encoding="utf8",
|
||||
cwd=CFG.workspace_path,
|
||||
)
|
||||
if result.returncode == 0:
|
||||
return result.stdout
|
||||
|
@ -63,9 +116,9 @@ def execute_python_file(filename: str, config: Config) -> str:
|
|||
logger.info(status)
|
||||
container = client.containers.run(
|
||||
image_name,
|
||||
["python", str(Path(filename).relative_to(config.workspace_path))],
|
||||
["python", str(path.relative_to(workspace.root))],
|
||||
volumes={
|
||||
config.workspace_path: {
|
||||
agent.config.workspace_path: {
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
}
|
||||
|
@ -104,21 +157,15 @@ def validate_command(command: str, config: Config) -> bool:
|
|||
Returns:
|
||||
bool: True if the command is allowed, False otherwise
|
||||
"""
|
||||
tokens = command.split()
|
||||
|
||||
if not tokens:
|
||||
if not command:
|
||||
return False
|
||||
|
||||
if config.deny_commands and tokens[0] not in config.deny_commands:
|
||||
return False
|
||||
command_name = command.split()[0]
|
||||
|
||||
for keyword in config.allow_commands:
|
||||
if keyword in tokens:
|
||||
return True
|
||||
if config.allow_commands:
|
||||
return False
|
||||
|
||||
return True
|
||||
if config.shell_command_control == ALLOWLIST_CONTROL:
|
||||
return command_name in config.shell_allowlist
|
||||
else:
|
||||
return command_name not in config.shell_denylist
|
||||
|
||||
|
||||
@command(
|
||||
|
@ -130,7 +177,7 @@ def validate_command(command: str, config: Config) -> bool:
|
|||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config file: .env - do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell(command_line: str, config: Config) -> str:
|
||||
def execute_shell(command_line: str, agent: Agent) -> str:
|
||||
"""Execute a shell command and return the output
|
||||
|
||||
Args:
|
||||
|
@ -139,14 +186,14 @@ def execute_shell(command_line: str, config: Config) -> str:
|
|||
Returns:
|
||||
str: The output of the command
|
||||
"""
|
||||
if not validate_command(command_line, config):
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = Path.cwd()
|
||||
# Change dir into workspace if necessary
|
||||
if not current_dir.is_relative_to(config.workspace_path):
|
||||
os.chdir(config.workspace_path)
|
||||
if not current_dir.is_relative_to(agent.config.workspace_path):
|
||||
os.chdir(agent.config.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
|
@ -170,7 +217,7 @@ def execute_shell(command_line: str, config: Config) -> str:
|
|||
" shell commands, EXECUTE_LOCAL_COMMANDS must be set to 'True' "
|
||||
"in your config. Do not attempt to bypass the restriction.",
|
||||
)
|
||||
def execute_shell_popen(command_line, config: Config) -> str:
|
||||
def execute_shell_popen(command_line, agent: Agent) -> str:
|
||||
"""Execute a shell command with Popen and returns an english description
|
||||
of the event and the process id
|
||||
|
||||
|
@ -180,14 +227,14 @@ def execute_shell_popen(command_line, config: Config) -> str:
|
|||
Returns:
|
||||
str: Description of the fact that the process started and its id
|
||||
"""
|
||||
if not validate_command(command_line, config):
|
||||
if not validate_command(command_line, agent.config):
|
||||
logger.info(f"Command '{command_line}' not allowed")
|
||||
return "Error: This Shell Command is not allowed."
|
||||
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if config.workspace_path not in current_dir:
|
||||
os.chdir(config.workspace_path)
|
||||
if agent.config.workspace_path not in current_dir:
|
||||
os.chdir(agent.config.workspace_path)
|
||||
|
||||
logger.info(
|
||||
f"Executing command '{command_line}' in working directory '{os.getcwd()}'"
|
||||
|
|
|
@ -4,23 +4,23 @@ from __future__ import annotations
|
|||
import hashlib
|
||||
import os
|
||||
import os.path
|
||||
from typing import TYPE_CHECKING, Generator, Literal
|
||||
import re
|
||||
from typing import Generator, Literal
|
||||
|
||||
import requests
|
||||
from colorama import Back, Fore
|
||||
from confection import Config
|
||||
from requests.adapters import HTTPAdapter, Retry
|
||||
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command, ignore_unexpected_kwargs
|
||||
from autogpt.commands.file_operations_utils import read_textual_file
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, VectorMemory
|
||||
from autogpt.spinner import Spinner
|
||||
from autogpt.utils import readable_file_size
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
Operation = Literal["write", "append", "delete"]
|
||||
|
||||
|
||||
|
@ -102,7 +102,7 @@ def is_duplicate_operation(
|
|||
|
||||
|
||||
def log_operation(
|
||||
operation: str, filename: str, config: Config, checksum: str | None = None
|
||||
operation: str, filename: str, agent: Agent, checksum: str | None = None
|
||||
) -> None:
|
||||
"""Log the file operation to the file_logger.txt
|
||||
|
||||
|
@ -115,43 +115,13 @@ def log_operation(
|
|||
if checksum is not None:
|
||||
log_entry += f" #{checksum}"
|
||||
logger.debug(f"Logging file operation: {log_entry}")
|
||||
append_to_file(config.file_logger_path, f"{log_entry}\n", config, should_log=False)
|
||||
|
||||
|
||||
def split_file(
|
||||
content: str, max_length: int = 4000, overlap: int = 0
|
||||
) -> Generator[str, None, None]:
|
||||
"""
|
||||
Split text into chunks of a specified maximum length with a specified overlap
|
||||
between chunks.
|
||||
|
||||
:param content: The input text to be split into chunks
|
||||
:param max_length: The maximum length of each chunk,
|
||||
default is 4000 (about 1k token)
|
||||
:param overlap: The number of overlapping characters between chunks,
|
||||
default is no overlap
|
||||
:return: A generator yielding chunks of text
|
||||
"""
|
||||
start = 0
|
||||
content_length = len(content)
|
||||
|
||||
while start < content_length:
|
||||
end = start + max_length
|
||||
if end + overlap < content_length:
|
||||
chunk = content[start : end + max(overlap - 1, 0)]
|
||||
else:
|
||||
chunk = content[start:content_length]
|
||||
|
||||
# Account for the case where the last chunk is shorter than the overlap, so it has already been consumed
|
||||
if len(chunk) <= overlap:
|
||||
break
|
||||
|
||||
yield chunk
|
||||
start += max_length - overlap
|
||||
append_to_file(
|
||||
agent.config.file_logger_path, f"{log_entry}\n", agent, should_log=False
|
||||
)
|
||||
|
||||
|
||||
@command("read_file", "Read a file", '"filename": "<filename>"')
|
||||
def read_file(filename: str, config: Config) -> str:
|
||||
def read_file(filename: str, agent: Agent) -> str:
|
||||
"""Read a file and return the contents
|
||||
|
||||
Args:
|
||||
|
@ -200,7 +170,7 @@ def ingest_file(
|
|||
|
||||
|
||||
@command("write_to_file", "Write to file", '"filename": "<filename>", "text": "<text>"')
|
||||
def write_to_file(filename: str, text: str, config: Config) -> str:
|
||||
def write_to_file(filename: str, text: str, agent: Agent) -> str:
|
||||
"""Write text to a file
|
||||
|
||||
Args:
|
||||
|
@ -211,24 +181,86 @@ def write_to_file(filename: str, text: str, config: Config) -> str:
|
|||
str: A message indicating success or failure
|
||||
"""
|
||||
checksum = text_checksum(text)
|
||||
if is_duplicate_operation("write", filename, config, checksum):
|
||||
if is_duplicate_operation("write", filename, agent.config, checksum):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(text)
|
||||
log_operation("write", filename, config, checksum)
|
||||
log_operation("write", filename, agent, checksum)
|
||||
return "File written to successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command(
|
||||
"replace_in_file",
|
||||
"Replace text or code in a file",
|
||||
'"filename": "<filename>", '
|
||||
'"old_text": "<old_text>", "new_text": "<new_text>", '
|
||||
'"occurrence_index": "<occurrence_index>"',
|
||||
)
|
||||
def replace_in_file(
|
||||
filename: str, old_text: str, new_text: str, agent: Agent, occurrence_index=None
|
||||
):
|
||||
"""Update a file by replacing one or all occurrences of old_text with new_text using Python's built-in string
|
||||
manipulation and regular expression modules for cross-platform file editing similar to sed and awk.
|
||||
|
||||
Args:
|
||||
filename (str): The name of the file
|
||||
old_text (str): String to be replaced. \n will be stripped from the end.
|
||||
new_text (str): New string. \n will be stripped from the end.
|
||||
occurrence_index (int): Optional index of the occurrence to replace. If None, all occurrences will be replaced.
|
||||
|
||||
Returns:
|
||||
str: A message indicating whether the file was updated successfully or if there were no matches found for old_text
|
||||
in the file.
|
||||
|
||||
Raises:
|
||||
Exception: If there was an error updating the file.
|
||||
"""
|
||||
try:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
|
||||
old_text = old_text.rstrip("\n")
|
||||
new_text = new_text.rstrip("\n")
|
||||
|
||||
if occurrence_index is None:
|
||||
new_content = content.replace(old_text, new_text)
|
||||
else:
|
||||
matches = list(re.finditer(re.escape(old_text), content))
|
||||
if not matches:
|
||||
return f"No matches found for {old_text} in {filename}"
|
||||
|
||||
if int(occurrence_index) >= len(matches):
|
||||
return f"Occurrence index {occurrence_index} is out of range for {old_text} in {filename}"
|
||||
|
||||
match = matches[int(occurrence_index)]
|
||||
start, end = match.start(), match.end()
|
||||
new_content = content[:start] + new_text + content[end:]
|
||||
|
||||
if content == new_content:
|
||||
return f"No matches found for {old_text} in {filename}"
|
||||
|
||||
with open(filename, "w", encoding="utf-8") as f:
|
||||
f.write(new_content)
|
||||
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("update", filename, agent, checksum=checksum)
|
||||
|
||||
return f"File {filename} updated successfully."
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
||||
|
||||
@command(
|
||||
"append_to_file", "Append to file", '"filename": "<filename>", "text": "<text>"'
|
||||
)
|
||||
def append_to_file(
|
||||
filename: str, text: str, config: Config, should_log: bool = True
|
||||
filename: str, text: str, agent: Agent, should_log: bool = True
|
||||
) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
|
@ -249,7 +281,7 @@ def append_to_file(
|
|||
if should_log:
|
||||
with open(filename, "r", encoding="utf-8") as f:
|
||||
checksum = text_checksum(f.read())
|
||||
log_operation("append", filename, config, checksum=checksum)
|
||||
log_operation("append", filename, agent, checksum=checksum)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as err:
|
||||
|
@ -257,7 +289,7 @@ def append_to_file(
|
|||
|
||||
|
||||
@command("delete_file", "Delete file", '"filename": "<filename>"')
|
||||
def delete_file(filename: str, config: Config) -> str:
|
||||
def delete_file(filename: str, agent: Agent) -> str:
|
||||
"""Delete a file
|
||||
|
||||
Args:
|
||||
|
@ -266,18 +298,19 @@ def delete_file(filename: str, config: Config) -> str:
|
|||
Returns:
|
||||
str: A message indicating success or failure
|
||||
"""
|
||||
if is_duplicate_operation("delete", filename, config):
|
||||
if is_duplicate_operation("delete", filename, agent.config):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
os.remove(filename)
|
||||
log_operation("delete", filename, config)
|
||||
log_operation("delete", filename, agent)
|
||||
return "File deleted successfully."
|
||||
except Exception as err:
|
||||
return f"Error: {err}"
|
||||
|
||||
|
||||
@command("list_files", "List Files in Directory", '"directory": "<directory>"')
|
||||
def list_files(directory: str, config: Config) -> list[str]:
|
||||
@ignore_unexpected_kwargs
|
||||
def list_files(directory: str, agent: Agent) -> list[str]:
|
||||
"""lists files in a directory recursively
|
||||
|
||||
Args:
|
||||
|
@ -293,7 +326,7 @@ def list_files(directory: str, config: Config) -> list[str]:
|
|||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(
|
||||
os.path.join(root, file), config.workspace_path
|
||||
os.path.join(root, file), agent.config.workspace_path
|
||||
)
|
||||
found_files.append(relative_path)
|
||||
|
||||
|
@ -307,7 +340,7 @@ def list_files(directory: str, config: Config) -> list[str]:
|
|||
lambda config: config.allow_downloads,
|
||||
"Error: You do not have user authorization to download files locally.",
|
||||
)
|
||||
def download_file(url, filename, config: Config):
|
||||
def download_file(url, filename, agent: Agent):
|
||||
"""Downloads a file
|
||||
Args:
|
||||
url (str): URL of the file to download
|
||||
|
@ -317,7 +350,7 @@ def download_file(url, filename, config: Config):
|
|||
directory = os.path.dirname(filename)
|
||||
os.makedirs(directory, exist_ok=True)
|
||||
message = f"{Fore.YELLOW}Downloading file from {Back.LIGHTBLUE_EX}{url}{Back.RESET}{Fore.RESET}"
|
||||
with Spinner(message, plain_output=config.plain_output) as spinner:
|
||||
with Spinner(message, plain_output=agent.config.plain_output) as spinner:
|
||||
session = requests.Session()
|
||||
retry = Retry(total=3, backoff_factor=1, status_forcelist=[502, 503, 504])
|
||||
adapter = HTTPAdapter(max_retries=retry)
|
||||
|
|
|
@ -146,7 +146,9 @@ def is_file_binary_fn(file_path: str):
|
|||
|
||||
def read_textual_file(file_path: str, logger: logs.Logger) -> str:
|
||||
if not os.path.isfile(file_path):
|
||||
raise FileNotFoundError(f"{file_path} not found!")
|
||||
raise FileNotFoundError(
|
||||
f"read_file {file_path} failed: no such file or directory"
|
||||
)
|
||||
is_binary = is_file_binary_fn(file_path)
|
||||
file_extension = os.path.splitext(file_path)[1].lower()
|
||||
parser = extension_to_parser.get(file_extension)
|
||||
|
|
|
@ -1,15 +1,11 @@
|
|||
"""Git operations for autogpt"""
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from git.repo import Repo
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"clone_repository",
|
||||
|
@ -19,7 +15,7 @@ if TYPE_CHECKING:
|
|||
"Configure github_username and github_api_key.",
|
||||
)
|
||||
@validate_url
|
||||
def clone_repository(url: str, clone_path: str, config: Config) -> str:
|
||||
def clone_repository(url: str, clone_path: str, agent: Agent) -> str:
|
||||
"""Clone a GitHub repository locally.
|
||||
|
||||
Args:
|
||||
|
@ -30,9 +26,11 @@ def clone_repository(url: str, clone_path: str, config: Config) -> str:
|
|||
str: The result of the clone operation.
|
||||
"""
|
||||
split_url = url.split("//")
|
||||
auth_repo_url = f"//{config.github_username}:{config.github_api_key}@".join(
|
||||
auth_repo_url = (
|
||||
f"//{agent.config.github_username}:{agent.config.github_api_key}@".join(
|
||||
split_url
|
||||
)
|
||||
)
|
||||
try:
|
||||
Repo.clone_from(url=auth_repo_url, to_path=clone_path)
|
||||
return f"""Cloned {url} to {clone_path}"""
|
||||
|
|
|
@ -2,15 +2,15 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
import time
|
||||
from itertools import islice
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from duckduckgo_search import DDGS
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
DUCKDUCKGO_MAX_ATTEMPTS = 3
|
||||
|
||||
|
||||
@command(
|
||||
|
@ -19,7 +19,7 @@ if TYPE_CHECKING:
|
|||
'"query": "<query>"',
|
||||
lambda config: not config.google_api_key,
|
||||
)
|
||||
def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
||||
def google_search(query: str, agent: Agent, num_results: int = 8) -> str:
|
||||
"""Return the results of a Google search
|
||||
|
||||
Args:
|
||||
|
@ -30,15 +30,20 @@ def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
|||
str: The results of the search.
|
||||
"""
|
||||
search_results = []
|
||||
attempts = 0
|
||||
|
||||
while attempts < DUCKDUCKGO_MAX_ATTEMPTS:
|
||||
if not query:
|
||||
return json.dumps(search_results)
|
||||
|
||||
results = DDGS().text(query)
|
||||
if not results:
|
||||
return json.dumps(search_results)
|
||||
search_results = list(islice(results, num_results))
|
||||
|
||||
for item in islice(results, num_results):
|
||||
search_results.append(item)
|
||||
if search_results:
|
||||
break
|
||||
|
||||
time.sleep(1)
|
||||
attempts += 1
|
||||
|
||||
results = json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
return safe_google_results(results)
|
||||
|
@ -48,11 +53,12 @@ def google_search(query: str, config: Config, num_results: int = 8) -> str:
|
|||
"google",
|
||||
"Google Search",
|
||||
'"query": "<query>"',
|
||||
lambda config: bool(config.google_api_key) and bool(config.custom_search_engine_id),
|
||||
lambda config: bool(config.google_api_key)
|
||||
and bool(config.google_custom_search_engine_id),
|
||||
"Configure google_api_key and custom_search_engine_id.",
|
||||
)
|
||||
def google_official_search(
|
||||
query: str, config: Config, num_results: int = 8
|
||||
query: str, agent: Agent, num_results: int = 8
|
||||
) -> str | list[str]:
|
||||
"""Return the results of a Google search using the official Google API
|
||||
|
||||
|
@ -69,8 +75,8 @@ def google_official_search(
|
|||
|
||||
try:
|
||||
# Get the Google API key and Custom Search Engine ID from the config file
|
||||
api_key = config.google_api_key
|
||||
custom_search_engine_id = config.custom_search_engine_id
|
||||
api_key = agent.config.google_api_key
|
||||
custom_search_engine_id = agent.config.google_custom_search_engine_id
|
||||
|
||||
# Initialize the Custom Search API service
|
||||
service = build("customsearch", "v1", developerKey=api_key)
|
||||
|
|
|
@ -4,19 +4,15 @@ import json
|
|||
import time
|
||||
import uuid
|
||||
from base64 import b64decode
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"generate_image",
|
||||
|
@ -25,7 +21,7 @@ if TYPE_CHECKING:
|
|||
lambda config: config.image_provider,
|
||||
"Requires a image provider to be set.",
|
||||
)
|
||||
def generate_image(prompt: str, config: Config, size: int = 256) -> str:
|
||||
def generate_image(prompt: str, agent: Agent, size: int = 256) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
||||
Args:
|
||||
|
@ -35,21 +31,21 @@ def generate_image(prompt: str, config: Config, size: int = 256) -> str:
|
|||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
filename = f"{config.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
filename = f"{agent.config.workspace_path}/{str(uuid.uuid4())}.jpg"
|
||||
|
||||
# DALL-E
|
||||
if config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, config)
|
||||
if agent.config.image_provider == "dalle":
|
||||
return generate_image_with_dalle(prompt, filename, size, agent)
|
||||
# HuggingFace
|
||||
elif config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, config)
|
||||
elif agent.config.image_provider == "huggingface":
|
||||
return generate_image_with_hf(prompt, filename, agent)
|
||||
# SD WebUI
|
||||
elif config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, config, size)
|
||||
elif agent.config.image_provider == "sdwebui":
|
||||
return generate_image_with_sd_webui(prompt, filename, agent, size)
|
||||
return "No Image Provider Set"
|
||||
|
||||
|
||||
def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
||||
def generate_image_with_hf(prompt: str, filename: str, agent: Agent) -> str:
|
||||
"""Generate an image with HuggingFace's API.
|
||||
|
||||
Args:
|
||||
|
@ -59,15 +55,13 @@ def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
|||
Returns:
|
||||
str: The filename of the image
|
||||
"""
|
||||
API_URL = (
|
||||
f"https://api-inference.huggingface.co/models/{config.huggingface_image_model}"
|
||||
)
|
||||
if config.huggingface_api_token is None:
|
||||
API_URL = f"https://api-inference.huggingface.co/models/{agent.config.huggingface_image_model}"
|
||||
if agent.config.huggingface_api_token is None:
|
||||
raise ValueError(
|
||||
"You need to set your Hugging Face API token in the config file."
|
||||
)
|
||||
headers = {
|
||||
"Authorization": f"Bearer {config.huggingface_api_token}",
|
||||
"Authorization": f"Bearer {agent.config.huggingface_api_token}",
|
||||
"X-Use-Cache": "false",
|
||||
}
|
||||
|
||||
|
@ -110,7 +104,7 @@ def generate_image_with_hf(prompt: str, filename: str, config: Config) -> str:
|
|||
|
||||
|
||||
def generate_image_with_dalle(
|
||||
prompt: str, filename: str, size: int, config: Config
|
||||
prompt: str, filename: str, size: int, agent: Agent
|
||||
) -> str:
|
||||
"""Generate an image with DALL-E.
|
||||
|
||||
|
@ -136,7 +130,7 @@ def generate_image_with_dalle(
|
|||
n=1,
|
||||
size=f"{size}x{size}",
|
||||
response_format="b64_json",
|
||||
api_key=config.openai_api_key,
|
||||
api_key=agent.config.openai_api_key,
|
||||
)
|
||||
|
||||
logger.info(f"Image Generated for prompt:{prompt}")
|
||||
|
@ -152,7 +146,7 @@ def generate_image_with_dalle(
|
|||
def generate_image_with_sd_webui(
|
||||
prompt: str,
|
||||
filename: str,
|
||||
config: Config,
|
||||
agent: Agent,
|
||||
size: int = 512,
|
||||
negative_prompt: str = "",
|
||||
extra: dict = {},
|
||||
|
@ -169,13 +163,13 @@ def generate_image_with_sd_webui(
|
|||
"""
|
||||
# Create a session and set the basic auth if needed
|
||||
s = requests.Session()
|
||||
if config.sd_webui_auth:
|
||||
username, password = config.sd_webui_auth.split(":")
|
||||
if agent.config.sd_webui_auth:
|
||||
username, password = agent.config.sd_webui_auth.split(":")
|
||||
s.auth = (username, password or "")
|
||||
|
||||
# Generate the images
|
||||
response = requests.post(
|
||||
f"{config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
f"{agent.config.sd_webui_url}/sdapi/v1/txt2img",
|
||||
json={
|
||||
"prompt": prompt,
|
||||
"negative_prompt": negative_prompt,
|
||||
|
|
|
@ -1,21 +1,18 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"improve_code",
|
||||
"Get Improved Code",
|
||||
'"suggestions": "<list_of_suggestions>", "code": "<full_code_string>"',
|
||||
)
|
||||
def improve_code(suggestions: list[str], code: str, config: Config) -> str:
|
||||
def improve_code(suggestions: list[str], code: str, agent: Agent) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
|
@ -36,4 +33,6 @@ def improve_code(suggestions: list[str], code: str, config: Config) -> str:
|
|||
" provided, making no other changes."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(
|
||||
function_string, args, description_string, config=agent.config
|
||||
)
|
||||
|
|
|
@ -1,21 +1,19 @@
|
|||
"""Task Statuses module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING, NoReturn
|
||||
from typing import NoReturn
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.logs import logger
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"task_complete",
|
||||
"Task Complete (Shutdown)",
|
||||
'"reason": "<reason>"',
|
||||
)
|
||||
def task_complete(reason: str, config: Config) -> NoReturn:
|
||||
def task_complete(reason: str, agent: Agent) -> NoReturn:
|
||||
"""
|
||||
A function that takes in a string and exits the program
|
||||
|
||||
|
|
|
@ -1,20 +1,24 @@
|
|||
"""Browse a webpage and summarize it using the LLM model"""
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
import requests
|
||||
from bs4 import BeautifulSoup
|
||||
from requests import Response
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
session = requests.Session()
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.agent.agent import Agent
|
||||
|
||||
|
||||
@validate_url
|
||||
def get_response(
|
||||
url: str, config: Config, timeout: int = 10
|
||||
url: str, agent: Agent, timeout: int = 10
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
"""Get the response from a URL
|
||||
|
||||
|
@ -30,7 +34,7 @@ def get_response(
|
|||
requests.exceptions.RequestException: If the HTTP request fails
|
||||
"""
|
||||
try:
|
||||
session.headers.update({"User-Agent": config.user_agent})
|
||||
session.headers.update({"User-Agent": agent.config.user_agent})
|
||||
response = session.get(url, timeout=timeout)
|
||||
|
||||
# Check if the response contains an HTTP error
|
||||
|
@ -48,7 +52,7 @@ def get_response(
|
|||
return None, f"Error: {str(re)}"
|
||||
|
||||
|
||||
def scrape_text(url: str, config: Config) -> str:
|
||||
def scrape_text(url: str, agent: Agent) -> str:
|
||||
"""Scrape text from a webpage
|
||||
|
||||
Args:
|
||||
|
@ -57,7 +61,7 @@ def scrape_text(url: str, config: Config) -> str:
|
|||
Returns:
|
||||
str: The scraped text
|
||||
"""
|
||||
response, error_message = get_response(url, config)
|
||||
response, error_message = get_response(url, agent)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
|
@ -76,7 +80,7 @@ def scrape_text(url: str, config: Config) -> str:
|
|||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str, config: Config) -> str | list[str]:
|
||||
def scrape_links(url: str, agent: Agent) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
|
@ -85,7 +89,7 @@ def scrape_links(url: str, config: Config) -> str | list[str]:
|
|||
Returns:
|
||||
str | list[str]: The scraped links
|
||||
"""
|
||||
response, error_message = get_response(url, config)
|
||||
response, error_message = get_response(url, agent)
|
||||
if error_message:
|
||||
return error_message
|
||||
if not response:
|
||||
|
|
|
@ -4,7 +4,7 @@ from __future__ import annotations
|
|||
import logging
|
||||
from pathlib import Path
|
||||
from sys import platform
|
||||
from typing import TYPE_CHECKING, Optional, Type
|
||||
from typing import Optional, Type
|
||||
|
||||
from bs4 import BeautifulSoup
|
||||
from selenium.common.exceptions import WebDriverException
|
||||
|
@ -27,15 +27,13 @@ from webdriver_manager.chrome import ChromeDriverManager
|
|||
from webdriver_manager.firefox import GeckoDriverManager
|
||||
from webdriver_manager.microsoft import EdgeChromiumDriverManager as EdgeDriverManager
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.logs import logger
|
||||
from autogpt.memory.vector import MemoryItem, get_memory
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from autogpt.url_utils.validators import validate_url
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
BrowserOptions = ChromeOptions | EdgeOptions | FirefoxOptions | SafariOptions
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
|
@ -47,7 +45,7 @@ FILE_DIR = Path(__file__).parent.parent
|
|||
'"url": "<url>", "question": "<what_you_want_to_find_on_website>"',
|
||||
)
|
||||
@validate_url
|
||||
def browse_website(url: str, question: str, config: Config) -> str:
|
||||
def browse_website(url: str, question: str, agent: Agent) -> str:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
|
@ -58,7 +56,7 @@ def browse_website(url: str, question: str, config: Config) -> str:
|
|||
Tuple[str, WebDriver]: The answer and links to the user and the webdriver
|
||||
"""
|
||||
try:
|
||||
driver, text = scrape_text_with_selenium(url, config)
|
||||
driver, text = scrape_text_with_selenium(url, agent)
|
||||
except WebDriverException as e:
|
||||
# These errors are often quite long and include lots of context.
|
||||
# Just grab the first line.
|
||||
|
@ -66,7 +64,7 @@ def browse_website(url: str, question: str, config: Config) -> str:
|
|||
return f"Error: {msg}"
|
||||
|
||||
add_header(driver)
|
||||
summary = summarize_memorize_webpage(url, text, question, config, driver)
|
||||
summary = summarize_memorize_webpage(url, text, question, agent, driver)
|
||||
links = scrape_links_with_selenium(driver, url)
|
||||
|
||||
# Limit links to 5
|
||||
|
@ -76,7 +74,7 @@ def browse_website(url: str, question: str, config: Config) -> str:
|
|||
return f"Answer gathered from website: {summary}\n\nLinks: {links}"
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]:
|
||||
def scrape_text_with_selenium(url: str, agent: Agent) -> tuple[WebDriver, str]:
|
||||
"""Scrape text from a website using selenium
|
||||
|
||||
Args:
|
||||
|
@ -94,23 +92,23 @@ def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]
|
|||
"safari": SafariOptions,
|
||||
}
|
||||
|
||||
options: BrowserOptions = options_available[config.selenium_web_browser]()
|
||||
options: BrowserOptions = options_available[agent.config.selenium_web_browser]()
|
||||
options.add_argument(
|
||||
"user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/112.0.5615.49 Safari/537.36"
|
||||
)
|
||||
|
||||
if config.selenium_web_browser == "firefox":
|
||||
if config.selenium_headless:
|
||||
if agent.config.selenium_web_browser == "firefox":
|
||||
if agent.config.selenium_headless:
|
||||
options.headless = True
|
||||
options.add_argument("--disable-gpu")
|
||||
driver = FirefoxDriver(
|
||||
service=GeckoDriverService(GeckoDriverManager().install()), options=options
|
||||
)
|
||||
elif config.selenium_web_browser == "edge":
|
||||
elif agent.config.selenium_web_browser == "edge":
|
||||
driver = EdgeDriver(
|
||||
service=EdgeDriverService(EdgeDriverManager().install()), options=options
|
||||
)
|
||||
elif config.selenium_web_browser == "safari":
|
||||
elif agent.config.selenium_web_browser == "safari":
|
||||
# Requires a bit more setup on the users end
|
||||
# See https://developer.apple.com/documentation/webkit/testing_with_webdriver_in_safari
|
||||
driver = SafariDriver(options=options)
|
||||
|
@ -120,7 +118,7 @@ def scrape_text_with_selenium(url: str, config: Config) -> tuple[WebDriver, str]
|
|||
options.add_argument("--remote-debugging-port=9222")
|
||||
|
||||
options.add_argument("--no-sandbox")
|
||||
if config.selenium_headless:
|
||||
if agent.config.selenium_headless:
|
||||
options.add_argument("--headless=new")
|
||||
options.add_argument("--disable-gpu")
|
||||
|
||||
|
@ -205,7 +203,7 @@ def summarize_memorize_webpage(
|
|||
url: str,
|
||||
text: str,
|
||||
question: str,
|
||||
config: Config,
|
||||
agent: Agent,
|
||||
driver: Optional[WebDriver] = None,
|
||||
) -> str:
|
||||
"""Summarize text using the OpenAI API
|
||||
|
@ -225,7 +223,7 @@ def summarize_memorize_webpage(
|
|||
text_length = len(text)
|
||||
logger.info(f"Text length: {text_length} characters")
|
||||
|
||||
memory = get_memory(config)
|
||||
memory = get_memory(agent.config)
|
||||
|
||||
new_memory = MemoryItem.from_webpage(text, url, question=question)
|
||||
memory.add(new_memory)
|
||||
|
|
|
@ -2,21 +2,18 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import TYPE_CHECKING
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import command
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.config import Config
|
||||
|
||||
|
||||
@command(
|
||||
"write_tests",
|
||||
"Write Tests",
|
||||
'"code": "<full_code_string>", "focus": "<list_of_focus_areas>"',
|
||||
)
|
||||
def write_tests(code: str, focus: list[str], config: Config) -> str:
|
||||
def write_tests(code: str, focus: list[str], agent: Agent) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
|
@ -38,4 +35,6 @@ def write_tests(code: str, focus: list[str], config: Config) -> str:
|
|||
" specific areas if required."
|
||||
)
|
||||
|
||||
return call_ai_function(function_string, args, description_string, config=config)
|
||||
return call_ai_function(
|
||||
function_string, args, description_string, config=agent.config
|
||||
)
|
||||
|
|
|
@ -7,6 +7,7 @@ import yaml
|
|||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from colorama import Fore
|
||||
|
||||
import autogpt
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
||||
|
@ -38,27 +39,30 @@ class Config(metaclass=Singleton):
|
|||
else:
|
||||
self.disabled_command_categories = []
|
||||
|
||||
deny_commands = os.getenv("DENY_COMMANDS")
|
||||
if deny_commands:
|
||||
self.deny_commands = deny_commands.split(",")
|
||||
else:
|
||||
self.deny_commands = []
|
||||
self.shell_command_control = os.getenv("SHELL_COMMAND_CONTROL", "denylist")
|
||||
|
||||
allow_commands = os.getenv("ALLOW_COMMANDS")
|
||||
if allow_commands:
|
||||
self.allow_commands = allow_commands.split(",")
|
||||
# DENY_COMMANDS is deprecated and included for backwards-compatibility
|
||||
shell_denylist = os.getenv("SHELL_DENYLIST", os.getenv("DENY_COMMANDS"))
|
||||
if shell_denylist:
|
||||
self.shell_denylist = shell_denylist.split(",")
|
||||
else:
|
||||
self.allow_commands = []
|
||||
self.shell_denylist = ["sudo", "su"]
|
||||
|
||||
# ALLOW_COMMANDS is deprecated and included for backwards-compatibility
|
||||
shell_allowlist = os.getenv("SHELL_ALLOWLIST", os.getenv("ALLOW_COMMANDS"))
|
||||
if shell_allowlist:
|
||||
self.shell_allowlist = shell_allowlist.split(",")
|
||||
else:
|
||||
self.shell_allowlist = []
|
||||
|
||||
self.ai_settings_file = os.getenv("AI_SETTINGS_FILE", "ai_settings.yaml")
|
||||
self.prompt_settings_file = os.getenv(
|
||||
"PROMPT_SETTINGS_FILE", "prompt_settings.yaml"
|
||||
)
|
||||
self.fast_llm_model = os.getenv("FAST_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-4")
|
||||
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
|
||||
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
|
||||
self.smart_llm_model = os.getenv("SMART_LLM_MODEL", "gpt-3.5-turbo")
|
||||
self.embedding_model = os.getenv("EMBEDDING_MODEL", "text-embedding-ada-002")
|
||||
|
||||
self.browse_spacy_language_model = os.getenv(
|
||||
"BROWSE_SPACY_LANGUAGE_MODEL", "en_core_web_sm"
|
||||
)
|
||||
|
@ -79,27 +83,41 @@ class Config(metaclass=Singleton):
|
|||
openai.api_type = self.openai_api_type
|
||||
openai.api_base = self.openai_api_base
|
||||
openai.api_version = self.openai_api_version
|
||||
elif os.getenv("OPENAI_API_BASE_URL", None):
|
||||
openai.api_base = os.getenv("OPENAI_API_BASE_URL")
|
||||
|
||||
if self.openai_organization is not None:
|
||||
openai.organization = self.openai_organization
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
|
||||
# ELEVENLABS_VOICE_1_ID is deprecated and included for backwards-compatibility
|
||||
self.elevenlabs_voice_id = os.getenv(
|
||||
"ELEVENLABS_VOICE_ID", os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
)
|
||||
self.streamelements_voice = os.getenv("STREAMELEMENTS_VOICE", "Brian")
|
||||
|
||||
self.use_mac_os_tts = False
|
||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||
# Backwards-compatibility shim for deprecated env variables
|
||||
if os.getenv("USE_MAC_OS_TTS"):
|
||||
default_tts_provider = "macos"
|
||||
elif self.elevenlabs_api_key:
|
||||
default_tts_provider = "elevenlabs"
|
||||
elif os.getenv("USE_BRIAN_TTS"):
|
||||
default_tts_provider = "streamelements"
|
||||
else:
|
||||
default_tts_provider = "gtts"
|
||||
|
||||
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
|
||||
|
||||
self.use_brian_tts = False
|
||||
self.use_brian_tts = os.getenv("USE_BRIAN_TTS")
|
||||
self.text_to_speech_provider = os.getenv(
|
||||
"TEXT_TO_SPEECH_PROVIDER", default_tts_provider
|
||||
)
|
||||
|
||||
self.github_api_key = os.getenv("GITHUB_API_KEY")
|
||||
self.github_username = os.getenv("GITHUB_USERNAME")
|
||||
|
||||
self.google_api_key = os.getenv("GOOGLE_API_KEY")
|
||||
self.custom_search_engine_id = os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
# CUSTOM_SEARCH_ENGINE_ID is deprecated and included for backwards-compatibility
|
||||
self.google_custom_search_engine_id = os.getenv(
|
||||
"GOOGLE_CUSTOM_SEARCH_ENGINE_ID", os.getenv("CUSTOM_SEARCH_ENGINE_ID")
|
||||
)
|
||||
|
||||
self.image_provider = os.getenv("IMAGE_PROVIDER")
|
||||
self.image_size = int(os.getenv("IMAGE_SIZE", 256))
|
||||
|
@ -107,6 +125,7 @@ class Config(metaclass=Singleton):
|
|||
self.huggingface_image_model = os.getenv(
|
||||
"HUGGINGFACE_IMAGE_MODEL", "CompVis/stable-diffusion-v1-4"
|
||||
)
|
||||
self.audio_to_text_provider = os.getenv("AUDIO_TO_TEXT_PROVIDER", "huggingface")
|
||||
self.huggingface_audio_to_text_model = os.getenv(
|
||||
"HUGGINGFACE_AUDIO_TO_TEXT_MODEL"
|
||||
)
|
||||
|
@ -138,18 +157,37 @@ class Config(metaclass=Singleton):
|
|||
self.plugins: List[AutoGPTPluginTemplate] = []
|
||||
self.plugins_openai = []
|
||||
|
||||
# Deprecated. Kept for backwards-compatibility. Will remove in a future version.
|
||||
plugins_allowlist = os.getenv("ALLOWLISTED_PLUGINS")
|
||||
if plugins_allowlist:
|
||||
self.plugins_allowlist = plugins_allowlist.split(",")
|
||||
else:
|
||||
self.plugins_allowlist = []
|
||||
|
||||
# Deprecated. Kept for backwards-compatibility. Will remove in a future version.
|
||||
plugins_denylist = os.getenv("DENYLISTED_PLUGINS")
|
||||
if plugins_denylist:
|
||||
self.plugins_denylist = plugins_denylist.split(",")
|
||||
else:
|
||||
self.plugins_denylist = []
|
||||
|
||||
# Avoid circular imports
|
||||
from autogpt.plugins import DEFAULT_PLUGINS_CONFIG_FILE
|
||||
|
||||
self.plugins_config_file = os.getenv(
|
||||
"PLUGINS_CONFIG_FILE", DEFAULT_PLUGINS_CONFIG_FILE
|
||||
)
|
||||
self.load_plugins_config()
|
||||
|
||||
self.chat_messages_enabled = os.getenv("CHAT_MESSAGES_ENABLED") == "True"
|
||||
|
||||
def load_plugins_config(self) -> "autogpt.plugins.PluginsConfig":
|
||||
# Avoid circular import
|
||||
from autogpt.plugins.plugins_config import PluginsConfig
|
||||
|
||||
self.plugins_config = PluginsConfig.load_config(global_config=self)
|
||||
return self.plugins_config
|
||||
|
||||
def get_azure_deployment_id_for_model(self, model: str) -> str:
|
||||
"""
|
||||
Returns the relevant deployment id for the model specified.
|
||||
|
@ -217,14 +255,6 @@ class Config(metaclass=Singleton):
|
|||
"""Set the smart LLM model value."""
|
||||
self.smart_llm_model = value
|
||||
|
||||
def set_fast_token_limit(self, value: int) -> None:
|
||||
"""Set the fast token limit value."""
|
||||
self.fast_token_limit = value
|
||||
|
||||
def set_smart_token_limit(self, value: int) -> None:
|
||||
"""Set the smart token limit value."""
|
||||
self.smart_token_limit = value
|
||||
|
||||
def set_embedding_model(self, value: str) -> None:
|
||||
"""Set the model to use for creating embeddings."""
|
||||
self.embedding_model = value
|
||||
|
@ -239,7 +269,7 @@ class Config(metaclass=Singleton):
|
|||
|
||||
def set_elevenlabs_voice_1_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 1 ID value."""
|
||||
self.elevenlabs_voice_1_id = value
|
||||
self.elevenlabs_voice_id = value
|
||||
|
||||
def set_elevenlabs_voice_2_id(self, value: str) -> None:
|
||||
"""Set the ElevenLabs Voice 2 ID value."""
|
||||
|
@ -251,7 +281,7 @@ class Config(metaclass=Singleton):
|
|||
|
||||
def set_custom_search_engine_id(self, value: str) -> None:
|
||||
"""Set the custom search engine id value."""
|
||||
self.custom_search_engine_id = value
|
||||
self.google_custom_search_engine_id = value
|
||||
|
||||
def set_debug_mode(self, value: bool) -> None:
|
||||
"""Set the debug mode value."""
|
||||
|
|
|
@ -1,121 +0,0 @@
|
|||
"""This module contains functions to fix JSON strings using general programmatic approaches, suitable for addressing
|
||||
common JSON formatting issues."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
import re
|
||||
from typing import Optional
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.utilities import extract_char_position
|
||||
from autogpt.logs import logger
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def fix_invalid_escape(json_to_load: str, error_message: str) -> str:
|
||||
"""Fix invalid escape sequences in JSON strings.
|
||||
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
error_message (str): The error message from the JSONDecodeError
|
||||
exception.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with invalid escape sequences fixed.
|
||||
"""
|
||||
while error_message.startswith("Invalid \\escape"):
|
||||
bad_escape_location = extract_char_position(error_message)
|
||||
json_to_load = (
|
||||
json_to_load[:bad_escape_location] + json_to_load[bad_escape_location + 1 :]
|
||||
)
|
||||
try:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error - fix invalid escape", e)
|
||||
error_message = str(e)
|
||||
return json_to_load
|
||||
|
||||
|
||||
def balance_braces(json_string: str) -> Optional[str]:
|
||||
"""
|
||||
Balance the braces in a JSON string.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with braces balanced.
|
||||
"""
|
||||
|
||||
open_braces_count = json_string.count("{")
|
||||
close_braces_count = json_string.count("}")
|
||||
|
||||
while open_braces_count > close_braces_count:
|
||||
json_string += "}"
|
||||
close_braces_count += 1
|
||||
|
||||
while close_braces_count > open_braces_count:
|
||||
json_string = json_string.rstrip("}")
|
||||
close_braces_count -= 1
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json.loads(json_string)
|
||||
return json_string
|
||||
|
||||
|
||||
def add_quotes_to_property_names(json_string: str) -> str:
|
||||
"""
|
||||
Add quotes to property names in a JSON string.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string.
|
||||
|
||||
Returns:
|
||||
str: The JSON string with quotes added to property names.
|
||||
"""
|
||||
|
||||
def replace_func(match: re.Match) -> str:
|
||||
return f'"{match[1]}":'
|
||||
|
||||
property_name_pattern = re.compile(r"(\w+):")
|
||||
corrected_json_string = property_name_pattern.sub(replace_func, json_string)
|
||||
|
||||
try:
|
||||
json.loads(corrected_json_string)
|
||||
return corrected_json_string
|
||||
except json.JSONDecodeError as e:
|
||||
raise e
|
||||
|
||||
|
||||
def correct_json(json_to_load: str) -> str:
|
||||
"""
|
||||
Correct common JSON errors.
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
"""
|
||||
|
||||
try:
|
||||
logger.debug("json", json_to_load)
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error", e)
|
||||
error_message = str(e)
|
||||
if error_message.startswith("Invalid \\escape"):
|
||||
json_to_load = fix_invalid_escape(json_to_load, error_message)
|
||||
if error_message.startswith(
|
||||
"Expecting property name enclosed in double quotes"
|
||||
):
|
||||
json_to_load = add_quotes_to_property_names(json_to_load)
|
||||
try:
|
||||
json.loads(json_to_load)
|
||||
return json_to_load
|
||||
except json.JSONDecodeError as e:
|
||||
logger.debug("json loads error - add quotes", e)
|
||||
error_message = str(e)
|
||||
if balanced_str := balance_braces(json_to_load):
|
||||
return balanced_str
|
||||
return json_to_load
|
|
@ -1,239 +0,0 @@
|
|||
"""This module contains functions to fix JSON strings generated by LLM models, such as ChatGPT, using the assistance
|
||||
of the ChatGPT API or LLM models."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Any, Dict
|
||||
|
||||
from colorama import Fore
|
||||
from regex import regex
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_utils.json_fix_general import correct_json
|
||||
from autogpt.llm.utils import call_ai_function
|
||||
from autogpt.logs import logger
|
||||
from autogpt.speech import say_text
|
||||
|
||||
JSON_SCHEMA = """
|
||||
{
|
||||
"command": {
|
||||
"name": "command name",
|
||||
"args": {
|
||||
"arg name": "value"
|
||||
}
|
||||
},
|
||||
"thoughts":
|
||||
{
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user"
|
||||
}
|
||||
}
|
||||
"""
|
||||
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def auto_fix_json(json_string: str, schema: str) -> str:
|
||||
"""Fix the given JSON string to make it parseable and fully compliant with
|
||||
the provided schema using GPT-3.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string to fix.
|
||||
schema (str): The schema to use to fix the JSON.
|
||||
Returns:
|
||||
str: The fixed JSON string.
|
||||
"""
|
||||
# Try to fix the JSON using GPT:
|
||||
function_string = "def fix_json(json_string: str, schema:str=None) -> str:"
|
||||
args = [f"'''{json_string}'''", f"'''{schema}'''"]
|
||||
description_string = (
|
||||
"This function takes a JSON string and ensures that it"
|
||||
" is parseable and fully compliant with the provided schema. If an object"
|
||||
" or field specified in the schema isn't contained within the correct JSON,"
|
||||
" it is omitted. The function also escapes any double quotes within JSON"
|
||||
" string values to ensure that they are valid. If the JSON string contains"
|
||||
" any None or NaN values, they are replaced with null before being parsed."
|
||||
)
|
||||
|
||||
# If it doesn't already start with a "`", add one:
|
||||
if not json_string.startswith("`"):
|
||||
json_string = "```json\n" + json_string + "\n```"
|
||||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=CFG.fast_llm_model
|
||||
)
|
||||
logger.debug("------------ JSON FIX ATTEMPT ---------------")
|
||||
logger.debug(f"Original JSON: {json_string}")
|
||||
logger.debug("-----------")
|
||||
logger.debug(f"Fixed JSON: {result_string}")
|
||||
logger.debug("----------- END OF FIX ATTEMPT ----------------")
|
||||
|
||||
try:
|
||||
json.loads(result_string) # just check the validity
|
||||
return result_string
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
# Get the call stack:
|
||||
# import traceback
|
||||
# call_stack = traceback.format_exc()
|
||||
# print(f"Failed to fix JSON: '{json_string}' "+call_stack)
|
||||
return "failed"
|
||||
|
||||
|
||||
def fix_json_using_multiple_techniques(assistant_reply: str) -> Dict[Any, Any]:
|
||||
"""Fix the given JSON string to make it parseable and fully compliant with two techniques.
|
||||
|
||||
Args:
|
||||
json_string (str): The JSON string to fix.
|
||||
|
||||
Returns:
|
||||
str: The fixed JSON string.
|
||||
"""
|
||||
assistant_reply = assistant_reply.strip()
|
||||
if assistant_reply.startswith("```json"):
|
||||
assistant_reply = assistant_reply[7:]
|
||||
if assistant_reply.endswith("```"):
|
||||
assistant_reply = assistant_reply[:-3]
|
||||
try:
|
||||
return json.loads(assistant_reply) # just check the validity
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
pass
|
||||
|
||||
if assistant_reply.startswith("json "):
|
||||
assistant_reply = assistant_reply[5:]
|
||||
assistant_reply = assistant_reply.strip()
|
||||
try:
|
||||
return json.loads(assistant_reply) # just check the validity
|
||||
except json.JSONDecodeError: # noqa: E722
|
||||
pass
|
||||
|
||||
# Parse and print Assistant response
|
||||
assistant_reply_json = fix_and_parse_json(assistant_reply)
|
||||
logger.debug("Assistant reply JSON: %s", str(assistant_reply_json))
|
||||
if assistant_reply_json == {}:
|
||||
assistant_reply_json = attempt_to_fix_json_by_finding_outermost_brackets(
|
||||
assistant_reply
|
||||
)
|
||||
|
||||
logger.debug("Assistant reply JSON 2: %s", str(assistant_reply_json))
|
||||
if assistant_reply_json != {}:
|
||||
return assistant_reply_json
|
||||
|
||||
logger.error(
|
||||
"Error: The following AI output couldn't be converted to a JSON:\n",
|
||||
assistant_reply,
|
||||
)
|
||||
if CFG.speak_mode:
|
||||
say_text("I have received an invalid JSON response from the OpenAI API.")
|
||||
|
||||
return {}
|
||||
|
||||
|
||||
def fix_and_parse_json(
|
||||
json_to_load: str, try_to_fix_with_gpt: bool = True
|
||||
) -> Dict[Any, Any]:
|
||||
"""Fix and parse JSON string
|
||||
|
||||
Args:
|
||||
json_to_load (str): The JSON string.
|
||||
try_to_fix_with_gpt (bool, optional): Try to fix the JSON with GPT.
|
||||
Defaults to True.
|
||||
|
||||
Returns:
|
||||
str or dict[Any, Any]: The parsed JSON.
|
||||
"""
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json_to_load = json_to_load.replace("\t", "")
|
||||
return json.loads(json_to_load)
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
json_to_load = correct_json(json_to_load)
|
||||
return json.loads(json_to_load)
|
||||
# Let's do something manually:
|
||||
# sometimes GPT responds with something BEFORE the braces:
|
||||
# "I'm sorry, I don't understand. Please try again."
|
||||
# {"text": "I'm sorry, I don't understand. Please try again.",
|
||||
# "confidence": 0.0}
|
||||
# So let's try to find the first brace and then parse the rest
|
||||
# of the string
|
||||
try:
|
||||
brace_index = json_to_load.index("{")
|
||||
maybe_fixed_json = json_to_load[brace_index:]
|
||||
last_brace_index = maybe_fixed_json.rindex("}")
|
||||
maybe_fixed_json = maybe_fixed_json[: last_brace_index + 1]
|
||||
return json.loads(maybe_fixed_json)
|
||||
except (json.JSONDecodeError, ValueError) as e:
|
||||
return try_ai_fix(try_to_fix_with_gpt, e, json_to_load)
|
||||
|
||||
|
||||
def try_ai_fix(
|
||||
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
|
||||
) -> Dict[Any, Any]:
|
||||
"""Try to fix the JSON with the AI
|
||||
|
||||
Args:
|
||||
try_to_fix_with_gpt (bool): Whether to try to fix the JSON with the AI.
|
||||
exception (Exception): The exception that was raised.
|
||||
json_to_load (str): The JSON string to load.
|
||||
|
||||
Raises:
|
||||
exception: If try_to_fix_with_gpt is False.
|
||||
|
||||
Returns:
|
||||
str or dict[Any, Any]: The JSON string or dictionary.
|
||||
"""
|
||||
if not try_to_fix_with_gpt:
|
||||
raise exception
|
||||
if CFG.debug_mode:
|
||||
logger.warn(
|
||||
"Warning: Failed to parse AI output, attempting to fix."
|
||||
"\n If you see this warning frequently, it's likely that"
|
||||
" your prompt is confusing the AI. Try changing it up"
|
||||
" slightly."
|
||||
)
|
||||
# Now try to fix this up using the ai_functions
|
||||
ai_fixed_json = auto_fix_json(json_to_load, JSON_SCHEMA)
|
||||
|
||||
if ai_fixed_json != "failed":
|
||||
return json.loads(ai_fixed_json)
|
||||
# This allows the AI to react to the error message,
|
||||
# which usually results in it correcting its ways.
|
||||
# logger.error("Failed to fix AI output, telling the AI.")
|
||||
return {}
|
||||
|
||||
|
||||
def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
|
||||
if CFG.speak_mode and CFG.debug_mode:
|
||||
say_text(
|
||||
"I have received an invalid JSON response from the OpenAI API. "
|
||||
"Trying to fix it now."
|
||||
)
|
||||
logger.error("Attempting to fix JSON by finding outermost brackets\n")
|
||||
|
||||
try:
|
||||
json_pattern = regex.compile(r"\{(?:[^{}]|(?R))*\}")
|
||||
json_match = json_pattern.search(json_string)
|
||||
|
||||
if json_match:
|
||||
# Extract the valid JSON object from the string
|
||||
json_string = json_match.group(0)
|
||||
logger.typewriter_log(
|
||||
title="Apparently json was fixed.", title_color=Fore.GREEN
|
||||
)
|
||||
if CFG.speak_mode and CFG.debug_mode:
|
||||
say_text("Apparently json was fixed.")
|
||||
else:
|
||||
return {}
|
||||
|
||||
except (json.JSONDecodeError, ValueError):
|
||||
if CFG.debug_mode:
|
||||
logger.error(f"Error: Invalid JSON: {json_string}\n")
|
||||
if CFG.speak_mode:
|
||||
say_text("Didn't work. I will have to ignore this response then.")
|
||||
logger.error("Error: Invalid JSON, setting it to empty JSON now.\n")
|
||||
json_string = {}
|
||||
|
||||
return fix_and_parse_json(json_string)
|
|
@ -5,11 +5,25 @@
|
|||
"thoughts": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"text": {"type": "string"},
|
||||
"reasoning": {"type": "string"},
|
||||
"plan": {"type": "string"},
|
||||
"criticism": {"type": "string"},
|
||||
"speak": {"type": "string"}
|
||||
"text": {
|
||||
"type": "string",
|
||||
"description": "thoughts"
|
||||
},
|
||||
"reasoning": {
|
||||
"type": "string"
|
||||
},
|
||||
"plan": {
|
||||
"type": "string",
|
||||
"description": "- short bulleted\n- list that conveys\n- long-term plan"
|
||||
},
|
||||
"criticism": {
|
||||
"type": "string",
|
||||
"description": "constructive self-criticism"
|
||||
},
|
||||
"speak": {
|
||||
"type": "string",
|
||||
"description": "thoughts summary to say to user"
|
||||
}
|
||||
},
|
||||
"required": ["text", "reasoning", "plan", "criticism", "speak"],
|
||||
"additionalProperties": false
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
"""Utilities for the json_fixes package."""
|
||||
import ast
|
||||
import json
|
||||
import os.path
|
||||
import re
|
||||
from typing import Any
|
||||
|
||||
from jsonschema import Draft7Validator
|
||||
|
||||
|
@ -12,37 +13,47 @@ CFG = Config()
|
|||
LLM_DEFAULT_RESPONSE_FORMAT = "llm_response_format_1"
|
||||
|
||||
|
||||
def extract_char_position(error_message: str) -> int:
|
||||
"""Extract the character position from the JSONDecodeError message.
|
||||
def extract_json_from_response(response_content: str) -> dict:
|
||||
# Sometimes the response includes the JSON in a code block with ```
|
||||
if response_content.startswith("```") and response_content.endswith("```"):
|
||||
# Discard the first and last ```, then re-join in case the response naturally included ```
|
||||
response_content = "```".join(response_content.split("```")[1:-1])
|
||||
|
||||
Args:
|
||||
error_message (str): The error message from the JSONDecodeError
|
||||
exception.
|
||||
|
||||
Returns:
|
||||
int: The character position.
|
||||
"""
|
||||
|
||||
char_pattern = re.compile(r"\(char (\d+)\)")
|
||||
if match := char_pattern.search(error_message):
|
||||
return int(match[1])
|
||||
else:
|
||||
raise ValueError("Character position not found in the error message.")
|
||||
# response content comes from OpenAI as a Python `str(content_dict)`, literal_eval reverses this
|
||||
try:
|
||||
return ast.literal_eval(response_content)
|
||||
except BaseException as e:
|
||||
logger.error(f"Error parsing JSON response with literal_eval {e}")
|
||||
# TODO: How to raise an error here without causing the program to exit?
|
||||
return {}
|
||||
|
||||
|
||||
def validate_json(json_object: object, schema_name: str) -> dict | None:
|
||||
def llm_response_schema(
|
||||
schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT,
|
||||
) -> dict[str, Any]:
|
||||
filename = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
|
||||
with open(filename, "r") as f:
|
||||
return json.load(f)
|
||||
|
||||
|
||||
def validate_json(
|
||||
json_object: object, schema_name: str = LLM_DEFAULT_RESPONSE_FORMAT
|
||||
) -> bool:
|
||||
"""
|
||||
:type schema_name: object
|
||||
:param schema_name: str
|
||||
:type json_object: object
|
||||
|
||||
Returns:
|
||||
bool: Whether the json_object is valid or not
|
||||
"""
|
||||
scheme_file = os.path.join(os.path.dirname(__file__), f"{schema_name}.json")
|
||||
with open(scheme_file, "r") as f:
|
||||
schema = json.load(f)
|
||||
schema = llm_response_schema(schema_name)
|
||||
validator = Draft7Validator(schema)
|
||||
|
||||
if errors := sorted(validator.iter_errors(json_object), key=lambda e: e.path):
|
||||
logger.error("The JSON object is invalid.")
|
||||
for error in errors:
|
||||
logger.error(f"JSON Validation Error: {error}")
|
||||
|
||||
if CFG.debug_mode:
|
||||
logger.error(
|
||||
json.dumps(json_object, indent=4)
|
||||
|
@ -51,10 +62,11 @@ def validate_json(json_object: object, schema_name: str) -> dict | None:
|
|||
|
||||
for error in errors:
|
||||
logger.error(f"Error: {error.message}")
|
||||
else:
|
||||
return False
|
||||
|
||||
logger.debug("The JSON object is valid.")
|
||||
|
||||
return json_object
|
||||
return True
|
||||
|
||||
|
||||
def validate_json_string(json_string: str, schema_name: str) -> dict | None:
|
||||
|
@ -66,7 +78,9 @@ def validate_json_string(json_string: str, schema_name: str) -> dict | None:
|
|||
|
||||
try:
|
||||
json_loaded = json.loads(json_string)
|
||||
return validate_json(json_loaded, schema_name)
|
||||
if not validate_json(json_loaded, schema_name):
|
||||
return None
|
||||
return json_loaded
|
||||
except:
|
||||
return None
|
||||
|
||||
|
|
|
@ -6,8 +6,8 @@ import openai
|
|||
from openai import Model
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.llm.base import MessageDict
|
||||
from autogpt.llm.modelsinfo import COSTS
|
||||
from autogpt.llm.base import CompletionModelInfo, MessageDict
|
||||
from autogpt.llm.providers.openai import OPEN_AI_MODELS
|
||||
from autogpt.logs import logger
|
||||
from autogpt.singleton import Singleton
|
||||
|
||||
|
@ -34,7 +34,7 @@ class ApiManager(metaclass=Singleton):
|
|||
temperature: float = None,
|
||||
max_tokens: int | None = None,
|
||||
deployment_id=None,
|
||||
) -> str:
|
||||
):
|
||||
"""
|
||||
Create a chat completion and update the cost.
|
||||
Args:
|
||||
|
@ -83,13 +83,16 @@ class ApiManager(metaclass=Singleton):
|
|||
"""
|
||||
# the .model property in API responses can contain version suffixes like -v2
|
||||
model = model[:-3] if model.endswith("-v2") else model
|
||||
model_info = OPEN_AI_MODELS[model]
|
||||
|
||||
self.total_prompt_tokens += prompt_tokens
|
||||
self.total_completion_tokens += completion_tokens
|
||||
self.total_cost += prompt_tokens * model_info.prompt_token_cost / 1000
|
||||
if issubclass(type(model_info), CompletionModelInfo):
|
||||
self.total_cost += (
|
||||
prompt_tokens * COSTS[model]["prompt"]
|
||||
+ completion_tokens * COSTS[model]["completion"]
|
||||
) / 1000
|
||||
completion_tokens * model_info.completion_token_cost / 1000
|
||||
)
|
||||
|
||||
logger.debug(f"Total running cost: ${self.total_cost:.3f}")
|
||||
|
||||
def set_total_budget(self, total_budget):
|
||||
|
|
|
@ -31,22 +31,27 @@ class ModelInfo:
|
|||
|
||||
Would be lovely to eventually get this directly from APIs, but needs to be scraped from
|
||||
websites for now.
|
||||
|
||||
"""
|
||||
|
||||
name: str
|
||||
prompt_token_cost: float
|
||||
completion_token_cost: float
|
||||
max_tokens: int
|
||||
prompt_token_cost: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatModelInfo(ModelInfo):
|
||||
class CompletionModelInfo(ModelInfo):
|
||||
"""Struct for generic completion model information."""
|
||||
|
||||
completion_token_cost: float
|
||||
|
||||
|
||||
@dataclass
|
||||
class ChatModelInfo(CompletionModelInfo):
|
||||
"""Struct for chat model information."""
|
||||
|
||||
|
||||
@dataclass
|
||||
class TextModelInfo(ModelInfo):
|
||||
class TextModelInfo(CompletionModelInfo):
|
||||
"""Struct for text completion model information."""
|
||||
|
||||
|
||||
|
|
|
@ -150,7 +150,7 @@ def chat_with_ai(
|
|||
if not plugin.can_handle_on_planning():
|
||||
continue
|
||||
plugin_response = plugin.on_planning(
|
||||
agent.config.prompt_generator, message_sequence.raw()
|
||||
agent.ai_config.prompt_generator, message_sequence.raw()
|
||||
)
|
||||
if not plugin_response or plugin_response == "":
|
||||
continue
|
||||
|
@ -181,7 +181,7 @@ def chat_with_ai(
|
|||
logger.debug("")
|
||||
logger.debug("----------- END OF CONTEXT ----------------")
|
||||
agent.log_cycle_handler.log_cycle(
|
||||
agent.config.ai_name,
|
||||
agent.ai_name,
|
||||
agent.created_at,
|
||||
agent.cycle_count,
|
||||
message_sequence.raw(),
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
COSTS = {
|
||||
"gpt-3.5-turbo": {"prompt": 0.002, "completion": 0.002},
|
||||
"gpt-3.5-turbo-0301": {"prompt": 0.002, "completion": 0.002},
|
||||
"gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
|
||||
"gpt-4": {"prompt": 0.03, "completion": 0.06},
|
||||
"gpt-4-0314": {"prompt": 0.03, "completion": 0.06},
|
||||
"gpt-4-32k": {"prompt": 0.06, "completion": 0.12},
|
||||
"gpt-4-32k-0314": {"prompt": 0.06, "completion": 0.12},
|
||||
"text-embedding-ada-002": {"prompt": 0.0004, "completion": 0.0},
|
||||
"text-davinci-003": {"prompt": 0.02, "completion": 0.02},
|
||||
}
|
|
@ -3,23 +3,23 @@ from autogpt.llm.base import ChatModelInfo, EmbeddingModelInfo, TextModelInfo
|
|||
OPEN_AI_CHAT_MODELS = {
|
||||
info.name: info
|
||||
for info in [
|
||||
ChatModelInfo(
|
||||
name="gpt-3.5-turbo",
|
||||
prompt_token_cost=0.002,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-3.5-turbo-0301",
|
||||
prompt_token_cost=0.002,
|
||||
prompt_token_cost=0.0015,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4",
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
name="gpt-3.5-turbo-0613",
|
||||
prompt_token_cost=0.0015,
|
||||
completion_token_cost=0.002,
|
||||
max_tokens=4096,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-3.5-turbo-16k-0613",
|
||||
prompt_token_cost=0.003,
|
||||
completion_token_cost=0.004,
|
||||
max_tokens=16384,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4-0314",
|
||||
|
@ -28,10 +28,10 @@ OPEN_AI_CHAT_MODELS = {
|
|||
max_tokens=8192,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4-32k",
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
name="gpt-4-0613",
|
||||
prompt_token_cost=0.03,
|
||||
completion_token_cost=0.06,
|
||||
max_tokens=8192,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4-32k-0314",
|
||||
|
@ -39,8 +39,25 @@ OPEN_AI_CHAT_MODELS = {
|
|||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
ChatModelInfo(
|
||||
name="gpt-4-32k-0613",
|
||||
prompt_token_cost=0.06,
|
||||
completion_token_cost=0.12,
|
||||
max_tokens=32768,
|
||||
),
|
||||
]
|
||||
}
|
||||
# Set aliases for rolling model IDs
|
||||
chat_model_mapping = {
|
||||
"gpt-3.5-turbo": "gpt-3.5-turbo-0301",
|
||||
"gpt-3.5-turbo-16k": "gpt-3.5-turbo-16k-0613",
|
||||
"gpt-4": "gpt-4-0314",
|
||||
"gpt-4-32k": "gpt-4-32k-0314",
|
||||
}
|
||||
for alias, target in chat_model_mapping.items():
|
||||
alias_info = ChatModelInfo(**OPEN_AI_CHAT_MODELS[target].__dict__)
|
||||
alias_info.name = alias
|
||||
OPEN_AI_CHAT_MODELS[alias] = alias_info
|
||||
|
||||
OPEN_AI_TEXT_MODELS = {
|
||||
info.name: info
|
||||
|
@ -59,8 +76,7 @@ OPEN_AI_EMBEDDING_MODELS = {
|
|||
for info in [
|
||||
EmbeddingModelInfo(
|
||||
name="text-embedding-ada-002",
|
||||
prompt_token_cost=0.0004,
|
||||
completion_token_cost=0.0,
|
||||
prompt_token_cost=0.0001,
|
||||
max_tokens=8191,
|
||||
embedding_dimensions=1536,
|
||||
),
|
||||
|
|
|
@ -17,6 +17,7 @@ from autogpt.logs import logger
|
|||
|
||||
from ..api_manager import ApiManager
|
||||
from ..base import ChatSequence, Message
|
||||
from ..providers.openai import OPEN_AI_CHAT_MODELS
|
||||
from .token_counter import *
|
||||
|
||||
|
||||
|
@ -205,6 +206,8 @@ def create_chat_completion(
|
|||
model = prompt.model.name
|
||||
if temperature is None:
|
||||
temperature = cfg.temperature
|
||||
if max_tokens is None:
|
||||
max_tokens = OPEN_AI_CHAT_MODELS[model].max_tokens - prompt.token_length
|
||||
|
||||
logger.debug(
|
||||
f"{Fore.GREEN}Creating chat completion with model {model}, temperature {temperature}, max_tokens {max_tokens}{Fore.RESET}"
|
||||
|
@ -239,7 +242,7 @@ def create_chat_completion(
|
|||
max_tokens=max_tokens,
|
||||
)
|
||||
|
||||
resp = response.choices[0].message["content"]
|
||||
resp = response.choices[0].message.content
|
||||
for plugin in cfg.plugins:
|
||||
if not plugin.can_handle_on_response():
|
||||
continue
|
||||
|
|
|
@ -24,32 +24,28 @@ def count_message_tokens(
|
|||
Returns:
|
||||
int: The number of tokens used by the list of messages.
|
||||
"""
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(model)
|
||||
except KeyError:
|
||||
logger.warn("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
if model == "gpt-3.5-turbo":
|
||||
# !Note: gpt-3.5-turbo may change over time.
|
||||
# Returning num tokens assuming gpt-3.5-turbo-0301.")
|
||||
return count_message_tokens(messages, model="gpt-3.5-turbo-0301")
|
||||
elif model == "gpt-4":
|
||||
# !Note: gpt-4 may change over time. Returning num tokens assuming gpt-4-0314.")
|
||||
return count_message_tokens(messages, model="gpt-4-0314")
|
||||
elif model == "gpt-3.5-turbo-0301":
|
||||
if model.startswith("gpt-3.5-turbo"):
|
||||
tokens_per_message = (
|
||||
4 # every message follows <|start|>{role/name}\n{content}<|end|>\n
|
||||
)
|
||||
tokens_per_name = -1 # if there's a name, the role is omitted
|
||||
elif model == "gpt-4-0314":
|
||||
encoding_model = "gpt-3.5-turbo"
|
||||
elif model.startswith("gpt-4"):
|
||||
tokens_per_message = 3
|
||||
tokens_per_name = 1
|
||||
encoding_model = "gpt-4"
|
||||
else:
|
||||
raise NotImplementedError(
|
||||
f"num_tokens_from_messages() is not implemented for model {model}.\n"
|
||||
f"count_message_tokens() is not implemented for model {model}.\n"
|
||||
" See https://github.com/openai/openai-python/blob/main/chatml.md for"
|
||||
" information on how messages are converted to tokens."
|
||||
)
|
||||
try:
|
||||
encoding = tiktoken.encoding_for_model(encoding_model)
|
||||
except KeyError:
|
||||
logger.warn("Warning: model not found. Using cl100k_base encoding.")
|
||||
encoding = tiktoken.get_encoding("cl100k_base")
|
||||
|
||||
num_tokens = 0
|
||||
for message in messages:
|
||||
num_tokens += tokens_per_message
|
||||
|
|
|
@ -34,7 +34,7 @@ class LogCycleHandler:
|
|||
if os.environ.get("OVERWRITE_DEBUG") == "1":
|
||||
outer_folder_name = "auto_gpt"
|
||||
else:
|
||||
ai_name_short = ai_name[:15] if ai_name else DEFAULT_PREFIX
|
||||
ai_name_short = self.get_agent_short_name(ai_name)
|
||||
outer_folder_name = f"{created_at}_{ai_name_short}"
|
||||
|
||||
outer_folder_path = os.path.join(log_directory, "DEBUG", outer_folder_name)
|
||||
|
@ -42,6 +42,9 @@ class LogCycleHandler:
|
|||
|
||||
return outer_folder_path
|
||||
|
||||
def get_agent_short_name(self, ai_name):
|
||||
return ai_name[:15].rstrip() if ai_name else DEFAULT_PREFIX
|
||||
|
||||
def create_inner_directory(self, outer_folder_path: str, cycle_count: int) -> str:
|
||||
nested_folder_name = str(cycle_count).zfill(3)
|
||||
nested_folder_path = os.path.join(outer_folder_path, nested_folder_name)
|
||||
|
|
|
@ -189,9 +189,10 @@ def run_auto_gpt(
|
|||
memory=memory,
|
||||
next_action_count=next_action_count,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace_directory,
|
||||
ai_config=ai_config,
|
||||
config=cfg,
|
||||
)
|
||||
agent.start_interaction_loop()
|
||||
|
|
|
@ -11,10 +11,12 @@ if TYPE_CHECKING:
|
|||
from autogpt.config import Config
|
||||
from autogpt.json_utils.utilities import (
|
||||
LLM_DEFAULT_RESPONSE_FORMAT,
|
||||
extract_json_from_response,
|
||||
is_string_valid_json,
|
||||
)
|
||||
from autogpt.llm.base import ChatSequence, Message, MessageRole, MessageType
|
||||
from autogpt.llm.utils import create_chat_completion
|
||||
from autogpt.llm.providers.openai import OPEN_AI_CHAT_MODELS
|
||||
from autogpt.llm.utils import count_string_tokens, create_chat_completion
|
||||
from autogpt.log_cycle.log_cycle import PROMPT_SUMMARY_FILE_NAME, SUMMARY_FILE_NAME
|
||||
from autogpt.logs import logger
|
||||
|
||||
|
@ -152,13 +154,14 @@ class MessageHistory:
|
|||
|
||||
# Remove "thoughts" dictionary from "content"
|
||||
try:
|
||||
content_dict = json.loads(event.content)
|
||||
content_dict = extract_json_from_response(event.content)
|
||||
if "thoughts" in content_dict:
|
||||
del content_dict["thoughts"]
|
||||
event.content = json.dumps(content_dict)
|
||||
except json.decoder.JSONDecodeError:
|
||||
except json.JSONDecodeError as e:
|
||||
logger.error(f"Error: Invalid JSON: {e}")
|
||||
if cfg.debug_mode:
|
||||
logger.error(f"Error: Invalid JSON: {event.content}\n")
|
||||
logger.error(f"{event.content}")
|
||||
|
||||
elif event.role.lower() == "system":
|
||||
event.role = "your computer"
|
||||
|
@ -167,9 +170,45 @@ class MessageHistory:
|
|||
elif event.role == "user":
|
||||
new_events.remove(event)
|
||||
|
||||
# Summarize events and current summary in batch to a new running summary
|
||||
|
||||
# Assume an upper bound length for the summary prompt template, i.e. Your task is to create a concise running summary...., in summarize_batch func
|
||||
# TODO make this default dynamic
|
||||
prompt_template_length = 100
|
||||
max_tokens = OPEN_AI_CHAT_MODELS.get(cfg.fast_llm_model).max_tokens
|
||||
summary_tlength = count_string_tokens(str(self.summary), cfg.fast_llm_model)
|
||||
batch = []
|
||||
batch_tlength = 0
|
||||
|
||||
# TODO Can put a cap on length of total new events and drop some previous events to save API cost, but need to think thru more how to do it without losing the context
|
||||
for event in new_events:
|
||||
event_tlength = count_string_tokens(str(event), cfg.fast_llm_model)
|
||||
|
||||
if (
|
||||
batch_tlength + event_tlength
|
||||
> max_tokens - prompt_template_length - summary_tlength
|
||||
):
|
||||
# The batch is full. Summarize it and start a new one.
|
||||
self.summarize_batch(batch, cfg)
|
||||
summary_tlength = count_string_tokens(
|
||||
str(self.summary), cfg.fast_llm_model
|
||||
)
|
||||
batch = [event]
|
||||
batch_tlength = event_tlength
|
||||
else:
|
||||
batch.append(event)
|
||||
batch_tlength += event_tlength
|
||||
|
||||
if batch:
|
||||
# There's an unprocessed batch. Summarize it.
|
||||
self.summarize_batch(batch, cfg)
|
||||
|
||||
return self.summary_message()
|
||||
|
||||
def summarize_batch(self, new_events_batch, cfg):
|
||||
prompt = f'''Your task is to create a concise running summary of actions and information results in the provided text, focusing on key and potentially important information to remember.
|
||||
|
||||
You will receive the current summary and the your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
|
||||
You will receive the current summary and your latest actions. Combine them, adding relevant key information from the latest development in 1st person past tense and keeping the summary concise.
|
||||
|
||||
Summary So Far:
|
||||
"""
|
||||
|
@ -178,13 +217,13 @@ Summary So Far:
|
|||
|
||||
Latest Development:
|
||||
"""
|
||||
{new_events or "Nothing new happened."}
|
||||
{new_events_batch or "Nothing new happened."}
|
||||
"""
|
||||
'''
|
||||
|
||||
prompt = ChatSequence.for_model(cfg.fast_llm_model, [Message("user", prompt)])
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.config.ai_name,
|
||||
self.agent.ai_name,
|
||||
self.agent.created_at,
|
||||
self.agent.cycle_count,
|
||||
prompt.raw(),
|
||||
|
@ -194,11 +233,9 @@ Latest Development:
|
|||
self.summary = create_chat_completion(prompt)
|
||||
|
||||
self.agent.log_cycle_handler.log_cycle(
|
||||
self.agent.config.ai_name,
|
||||
self.agent.ai_name,
|
||||
self.agent.created_at,
|
||||
self.agent.cycle_count,
|
||||
self.summary,
|
||||
SUMMARY_FILE_NAME,
|
||||
)
|
||||
|
||||
return self.summary_message()
|
||||
|
|
|
@ -1,8 +1,10 @@
|
|||
"""Handles loading of plugins."""
|
||||
|
||||
import importlib.util
|
||||
import inspect
|
||||
import json
|
||||
import os
|
||||
import sys
|
||||
import zipfile
|
||||
from pathlib import Path
|
||||
from typing import List
|
||||
|
@ -14,10 +16,14 @@ import requests
|
|||
from auto_gpt_plugin_template import AutoGPTPluginTemplate
|
||||
from openapi_python_client.config import Config as OpenAPIConfig
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.models.base_open_ai_plugin import BaseOpenAIPlugin
|
||||
|
||||
DEFAULT_PLUGINS_CONFIG_FILE = os.path.join(
|
||||
os.path.dirname(os.path.abspath(__file__)), "..", "..", "plugins_config.yaml"
|
||||
)
|
||||
|
||||
|
||||
def inspect_zip_for_modules(zip_path: str, debug: bool = False) -> list[str]:
|
||||
"""
|
||||
|
@ -213,10 +219,33 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
|||
loaded_plugins = []
|
||||
# Generic plugins
|
||||
plugins_path_path = Path(cfg.plugins_dir)
|
||||
plugins_config = cfg.plugins_config
|
||||
|
||||
logger.debug(f"Allowlisted Plugins: {cfg.plugins_allowlist}")
|
||||
logger.debug(f"Denylisted Plugins: {cfg.plugins_denylist}")
|
||||
# Directory-based plugins
|
||||
for plugin_path in [f.path for f in os.scandir(cfg.plugins_dir) if f.is_dir()]:
|
||||
# Avoid going into __pycache__ or other hidden directories
|
||||
if plugin_path.startswith("__"):
|
||||
continue
|
||||
|
||||
plugin_module_path = plugin_path.split(os.path.sep)
|
||||
plugin_module_name = plugin_module_path[-1]
|
||||
qualified_module_name = ".".join(plugin_module_path)
|
||||
|
||||
__import__(qualified_module_name)
|
||||
plugin = sys.modules[qualified_module_name]
|
||||
|
||||
if not plugins_config.is_enabled(plugin_module_name):
|
||||
logger.warn(f"Plugin {plugin_module_name} found but not configured")
|
||||
continue
|
||||
|
||||
for _, class_obj in inspect.getmembers(plugin):
|
||||
if (
|
||||
hasattr(class_obj, "_abc_impl")
|
||||
and AutoGPTPluginTemplate in class_obj.__bases__
|
||||
):
|
||||
loaded_plugins.append(class_obj())
|
||||
|
||||
# Zip-based plugins
|
||||
for plugin in plugins_path_path.glob("*.zip"):
|
||||
if moduleList := inspect_zip_for_modules(str(plugin), debug):
|
||||
for module in moduleList:
|
||||
|
@ -225,6 +254,7 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
|||
logger.debug(f"Plugin: {plugin} Module: {module}")
|
||||
zipped_package = zipimporter(str(plugin))
|
||||
zipped_module = zipped_package.load_module(str(module.parent))
|
||||
|
||||
for key in dir(zipped_module):
|
||||
if key.startswith("__"):
|
||||
continue
|
||||
|
@ -233,9 +263,28 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
|||
if (
|
||||
"_abc_impl" in a_keys
|
||||
and a_module.__name__ != "AutoGPTPluginTemplate"
|
||||
and denylist_allowlist_check(a_module.__name__, cfg)
|
||||
):
|
||||
plugin_name = a_module.__name__
|
||||
plugin_configured = plugins_config.get(plugin_name) is not None
|
||||
plugin_enabled = plugins_config.is_enabled(plugin_name)
|
||||
|
||||
if plugin_configured and plugin_enabled:
|
||||
logger.debug(
|
||||
f"Loading plugin {plugin_name} as it was enabled in config."
|
||||
)
|
||||
loaded_plugins.append(a_module())
|
||||
elif plugin_configured and not plugin_enabled:
|
||||
logger.debug(
|
||||
f"Not loading plugin {plugin_name} as it was disabled in config."
|
||||
)
|
||||
elif not plugin_configured:
|
||||
logger.warn(
|
||||
f"Not loading plugin {plugin_name} as it was not found in config. "
|
||||
f"Please check your config. Starting with 0.4.1, plugins will not be loaded unless "
|
||||
f"they are enabled in plugins_config.yaml. Zipped plugins should use the class "
|
||||
f"name ({plugin_name}) as the key."
|
||||
)
|
||||
|
||||
# OpenAI plugins
|
||||
if cfg.plugins_openai:
|
||||
manifests_specs = fetch_openai_plugins_manifest_and_spec(cfg)
|
||||
|
@ -244,7 +293,10 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
|||
manifests_specs, cfg, debug
|
||||
)
|
||||
for url, openai_plugin_meta in manifests_specs_clients.items():
|
||||
if denylist_allowlist_check(url, cfg):
|
||||
if not plugins_config.is_enabled(url):
|
||||
logger.warn(f"Plugin {plugin_module_name} found but not configured")
|
||||
continue
|
||||
|
||||
plugin = BaseOpenAIPlugin(openai_plugin_meta)
|
||||
loaded_plugins.append(plugin)
|
||||
|
||||
|
@ -253,31 +305,3 @@ def scan_plugins(cfg: Config, debug: bool = False) -> List[AutoGPTPluginTemplate
|
|||
for plugin in loaded_plugins:
|
||||
logger.info(f"{plugin._name}: {plugin._version} - {plugin._description}")
|
||||
return loaded_plugins
|
||||
|
||||
|
||||
def denylist_allowlist_check(plugin_name: str, cfg: Config) -> bool:
|
||||
"""Check if the plugin is in the allowlist or denylist.
|
||||
|
||||
Args:
|
||||
plugin_name (str): Name of the plugin.
|
||||
cfg (Config): Config object.
|
||||
|
||||
Returns:
|
||||
True or False
|
||||
"""
|
||||
logger.debug(f"Checking if plugin {plugin_name} should be loaded")
|
||||
if (
|
||||
plugin_name in cfg.plugins_denylist
|
||||
or "all" in cfg.plugins_denylist
|
||||
or "none" in cfg.plugins_allowlist
|
||||
):
|
||||
logger.debug(f"Not loading plugin {plugin_name} as it was in the denylist.")
|
||||
return False
|
||||
if plugin_name in cfg.plugins_allowlist or "all" in cfg.plugins_allowlist:
|
||||
logger.debug(f"Loading plugin {plugin_name} as it was in the allowlist.")
|
||||
return True
|
||||
ack = input(
|
||||
f"WARNING: Plugin {plugin_name} found. But not in the"
|
||||
f" allowlist... Load? ({cfg.authorise_key}/{cfg.exit_key}): "
|
||||
)
|
||||
return ack.lower() == cfg.authorise_key
|
|
@ -0,0 +1,14 @@
|
|||
from typing import Any
|
||||
|
||||
|
||||
class PluginConfig:
|
||||
"""Class for holding configuration of a single plugin"""
|
||||
|
||||
def __init__(self, name: str, enabled: bool = False, config: dict[str, Any] = None):
|
||||
self.name = name
|
||||
self.enabled = enabled
|
||||
# Arbitray config options for this plugin. API keys or plugin-specific options live here.
|
||||
self.config = config or {}
|
||||
|
||||
def __repr__(self):
|
||||
return f"PluginConfig('{self.name}', {self.enabled}, {str(self.config)}"
|
|
@ -0,0 +1,81 @@
|
|||
import os
|
||||
from typing import Any, Union
|
||||
|
||||
import yaml
|
||||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.logs import logger
|
||||
from autogpt.plugins.plugin_config import PluginConfig
|
||||
|
||||
|
||||
class PluginsConfig:
|
||||
"""Class for holding configuration of all plugins"""
|
||||
|
||||
def __init__(self, plugins_config: dict[str, Any]):
|
||||
self.plugins = {}
|
||||
for name, plugin in plugins_config.items():
|
||||
if type(plugin) == dict:
|
||||
self.plugins[name] = PluginConfig(
|
||||
name,
|
||||
plugin.get("enabled", False),
|
||||
plugin.get("config", {}),
|
||||
)
|
||||
elif type(plugin) == PluginConfig:
|
||||
self.plugins[name] = plugin
|
||||
else:
|
||||
raise ValueError(f"Invalid plugin config data type: {type(plugin)}")
|
||||
|
||||
def __repr__(self):
|
||||
return f"PluginsConfig({self.plugins})"
|
||||
|
||||
def get(self, name: str) -> Union[PluginConfig, None]:
|
||||
return self.plugins.get(name)
|
||||
|
||||
def is_enabled(self, name) -> bool:
|
||||
plugin_config = self.plugins.get(name)
|
||||
return plugin_config and plugin_config.enabled
|
||||
|
||||
@classmethod
|
||||
def load_config(cls, global_config: Config) -> "PluginsConfig":
|
||||
empty_config = cls({})
|
||||
|
||||
try:
|
||||
config_data = cls.deserialize_config_file(global_config=global_config)
|
||||
if type(config_data) != dict:
|
||||
logger.error(
|
||||
f"Expected plugins config to be a dict, got {type(config_data)}, continuing without plugins"
|
||||
)
|
||||
return empty_config
|
||||
return cls(config_data)
|
||||
|
||||
except BaseException as e:
|
||||
logger.error(
|
||||
f"Plugin config is invalid, continuing without plugins. Error: {e}"
|
||||
)
|
||||
return empty_config
|
||||
|
||||
@classmethod
|
||||
def deserialize_config_file(cls, global_config: Config) -> dict[str, Any]:
|
||||
plugins_config_path = global_config.plugins_config_file
|
||||
if not os.path.exists(plugins_config_path):
|
||||
logger.warn("plugins_config.yaml does not exist, creating base config.")
|
||||
cls.create_empty_plugins_config(global_config=global_config)
|
||||
|
||||
with open(plugins_config_path, "r") as f:
|
||||
return yaml.load(f, Loader=yaml.FullLoader)
|
||||
|
||||
@staticmethod
|
||||
def create_empty_plugins_config(global_config: Config):
|
||||
"""Create an empty plugins_config.yaml file. Fill it with values from old env variables."""
|
||||
base_config = {}
|
||||
|
||||
# Backwards-compatibility shim
|
||||
for plugin_name in global_config.plugins_denylist:
|
||||
base_config[plugin_name] = {"enabled": False, "config": {}}
|
||||
|
||||
for plugin_name in global_config.plugins_allowlist:
|
||||
base_config[plugin_name] = {"enabled": True, "config": {}}
|
||||
|
||||
with open(global_config.plugins_config_file, "w+") as f:
|
||||
f.write(yaml.dump(base_config))
|
||||
return base_config
|
|
@ -1,7 +1,8 @@
|
|||
""" A module for generating custom prompt strings."""
|
||||
import json
|
||||
from typing import TYPE_CHECKING, Any, Callable, Dict, List, Optional
|
||||
|
||||
from autogpt.json_utils.utilities import llm_response_schema
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
|
||||
|
@ -25,16 +26,6 @@ class PromptGenerator:
|
|||
self.command_registry: CommandRegistry | None = None
|
||||
self.name = "Bob"
|
||||
self.role = "AI"
|
||||
self.response_format = {
|
||||
"thoughts": {
|
||||
"text": "thought",
|
||||
"reasoning": "reasoning",
|
||||
"plan": "- short bulleted\n- list that conveys\n- long-term plan",
|
||||
"criticism": "constructive self-criticism",
|
||||
"speak": "thoughts summary to say to user",
|
||||
},
|
||||
"command": {"name": "command name", "args": {"arg name": "value"}},
|
||||
}
|
||||
|
||||
def add_constraint(self, constraint: str) -> None:
|
||||
"""
|
||||
|
@ -144,7 +135,6 @@ class PromptGenerator:
|
|||
Returns:
|
||||
str: The generated prompt string.
|
||||
"""
|
||||
formatted_response_format = json.dumps(self.response_format, indent=4)
|
||||
return (
|
||||
f"Constraints:\n{self._generate_numbered_list(self.constraints)}\n\n"
|
||||
"Commands:\n"
|
||||
|
@ -152,7 +142,6 @@ class PromptGenerator:
|
|||
f"Resources:\n{self._generate_numbered_list(self.resources)}\n\n"
|
||||
"Performance Evaluation:\n"
|
||||
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
||||
"You should only respond in JSON format as described below \nResponse"
|
||||
f" Format: \n{formatted_response_format} \nEnsure the response can be"
|
||||
" parsed by Python json.loads"
|
||||
"Respond with only valid JSON conforming to the following schema: \n"
|
||||
f"{llm_response_schema()}\n"
|
||||
)
|
||||
|
|
|
@ -11,9 +11,7 @@ from autogpt.utils import clean_input
|
|||
|
||||
CFG = Config()
|
||||
|
||||
DEFAULT_TRIGGERING_PROMPT = (
|
||||
"Determine which next command to use, and respond using the format specified above:"
|
||||
)
|
||||
DEFAULT_TRIGGERING_PROMPT = "Determine exactly one command to use, and respond using the JSON schema specified previously:"
|
||||
|
||||
|
||||
def build_default_prompt_generator() -> PromptGenerator:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
"""Base class for all voice classes."""
|
||||
import abc
|
||||
import re
|
||||
from threading import Lock
|
||||
|
||||
from autogpt.singleton import AbstractSingleton
|
||||
|
@ -29,6 +30,11 @@ class VoiceBase(AbstractSingleton):
|
|||
text (str): The text to say.
|
||||
voice_index (int): The index of the voice to use.
|
||||
"""
|
||||
text = re.sub(
|
||||
r"\b(?:https?://[-\w_.]+/?\w[-\w_.]*\.(?:[-\w_.]+/?\w[-\w_.]*\.)?[a-z]+(?:/[-\w_.%]+)*\b(?!\.))",
|
||||
"",
|
||||
text,
|
||||
)
|
||||
with self._mutex:
|
||||
return self._speech(text, voice_index)
|
||||
|
||||
|
|
|
@ -38,11 +38,11 @@ class ElevenLabsSpeech(VoiceBase):
|
|||
"xi-api-key": cfg.elevenlabs_api_key,
|
||||
}
|
||||
self._voices = default_voices.copy()
|
||||
if cfg.elevenlabs_voice_1_id in voice_options:
|
||||
cfg.elevenlabs_voice_1_id = voice_options[cfg.elevenlabs_voice_1_id]
|
||||
if cfg.elevenlabs_voice_id in voice_options:
|
||||
cfg.elevenlabs_voice_id = voice_options[cfg.elevenlabs_voice_id]
|
||||
if cfg.elevenlabs_voice_2_id in voice_options:
|
||||
cfg.elevenlabs_voice_2_id = voice_options[cfg.elevenlabs_voice_2_id]
|
||||
self._use_custom_voice(cfg.elevenlabs_voice_1_id, 0)
|
||||
self._use_custom_voice(cfg.elevenlabs_voice_id, 0)
|
||||
self._use_custom_voice(cfg.elevenlabs_voice_2_id, 1)
|
||||
|
||||
def _use_custom_voice(self, voice, voice_index) -> None:
|
||||
|
|
|
@ -4,10 +4,10 @@ from threading import Semaphore
|
|||
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.speech.base import VoiceBase
|
||||
from autogpt.speech.brian import BrianSpeech
|
||||
from autogpt.speech.eleven_labs import ElevenLabsSpeech
|
||||
from autogpt.speech.gtts import GTTSVoice
|
||||
from autogpt.speech.macos_tts import MacOSTTS
|
||||
from autogpt.speech.stream_elements_speech import StreamElementsSpeech
|
||||
|
||||
_QUEUE_SEMAPHORE = Semaphore(
|
||||
1
|
||||
|
@ -33,14 +33,14 @@ def say_text(text: str, voice_index: int = 0) -> None:
|
|||
|
||||
def _get_voice_engine(config: Config) -> tuple[VoiceBase, VoiceBase]:
|
||||
"""Get the voice engine to use for the given configuration"""
|
||||
default_voice_engine = GTTSVoice()
|
||||
if config.elevenlabs_api_key:
|
||||
tts_provider = config.text_to_speech_provider
|
||||
if tts_provider == "elevenlabs":
|
||||
voice_engine = ElevenLabsSpeech()
|
||||
elif config.use_mac_os_tts == "True":
|
||||
elif tts_provider == "macos":
|
||||
voice_engine = MacOSTTS()
|
||||
elif config.use_brian_tts == "True":
|
||||
voice_engine = BrianSpeech()
|
||||
elif tts_provider == "streamelements":
|
||||
voice_engine = StreamElementsSpeech()
|
||||
else:
|
||||
voice_engine = GTTSVoice()
|
||||
|
||||
return default_voice_engine, voice_engine
|
||||
return GTTSVoice(), voice_engine
|
||||
|
|
|
@ -7,23 +7,24 @@ from playsound import playsound
|
|||
from autogpt.speech.base import VoiceBase
|
||||
|
||||
|
||||
class BrianSpeech(VoiceBase):
|
||||
"""Brian speech module for autogpt"""
|
||||
class StreamElementsSpeech(VoiceBase):
|
||||
"""Streamelements speech module for autogpt"""
|
||||
|
||||
def _setup(self) -> None:
|
||||
"""Setup the voices, API key, etc."""
|
||||
|
||||
def _speech(self, text: str, _: int = 0) -> bool:
|
||||
"""Speak text using Brian with the streamelements API
|
||||
def _speech(self, text: str, voice: str, _: int = 0) -> bool:
|
||||
"""Speak text using the streamelements API
|
||||
|
||||
Args:
|
||||
text (str): The text to speak
|
||||
voice (str): The voice to use
|
||||
|
||||
Returns:
|
||||
bool: True if the request was successful, False otherwise
|
||||
"""
|
||||
tts_url = (
|
||||
f"https://api.streamelements.com/kappa/v2/speech?voice=Brian&text={text}"
|
||||
f"https://api.streamelements.com/kappa/v2/speech?voice={voice}&text={text}"
|
||||
)
|
||||
response = requests.get(tts_url)
|
||||
|
|
@ -5,10 +5,14 @@ import requests
|
|||
import yaml
|
||||
from colorama import Fore, Style
|
||||
from git.repo import Repo
|
||||
from prompt_toolkit import ANSI, PromptSession
|
||||
from prompt_toolkit.history import InMemoryHistory
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.logs import logger
|
||||
|
||||
session = PromptSession(history=InMemoryHistory())
|
||||
|
||||
|
||||
def batch(iterable, max_batch_length: int, overlap: int = 0):
|
||||
"""Batch data from iterable into slices of length N. The last batch may be shorter."""
|
||||
|
@ -52,7 +56,7 @@ def clean_input(prompt: str = "", talk=False):
|
|||
|
||||
# ask for input, default when just pressing Enter is y
|
||||
logger.info("Asking user via keyboard...")
|
||||
answer = input(prompt)
|
||||
answer = session.prompt(ANSI(prompt))
|
||||
return answer
|
||||
except KeyboardInterrupt:
|
||||
logger.info("You interrupted Auto-GPT")
|
||||
|
|
|
@ -70,7 +70,7 @@ def kubernetes_agent(
|
|||
```
|
||||
|
||||
## Creating your challenge
|
||||
Go to `tests/integration/challenges`and create a file that is called `test_your_test_description.py` and add it to the appropriate folder. If no category exists you can create a new one.
|
||||
Go to `tests/challenges`and create a file that is called `test_your_test_description.py` and add it to the appropriate folder. If no category exists you can create a new one.
|
||||
|
||||
Your test could look something like this
|
||||
|
||||
|
@ -84,7 +84,7 @@ import yaml
|
|||
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from tests.integration.agent_utils import run_interaction_loop
|
||||
from tests.integration.challenges.utils import run_multiple_times
|
||||
from tests.challenges.utils import run_multiple_times
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
|
||||
|
@ -111,7 +111,7 @@ def test_information_retrieval_challenge_a(kubernetes_agent, monkeypatch) -> Non
|
|||
"""
|
||||
input_sequence = ["s", "s", "s", "s", "s", "EXIT"]
|
||||
gen = input_generator(input_sequence)
|
||||
monkeypatch.setattr("builtins.input", lambda _: next(gen))
|
||||
monkeypatch.setattr("autogpt.utils.session.prompt", lambda _: next(gen))
|
||||
|
||||
with contextlib.suppress(SystemExit):
|
||||
run_interaction_loop(kubernetes_agent, None)
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
**Command to try**:
|
||||
|
||||
```
|
||||
pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_a.py --level=2
|
||||
pytest -s tests/challenges/information_retrieval/test_information_retrieval_challenge_a.py --level=2
|
||||
```
|
||||
|
||||
## Description
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
**Command to try**:
|
||||
|
||||
```
|
||||
pytest -s tests/integration/challenges/information_retrieval/test_information_retrieval_challenge_b.py
|
||||
pytest -s tests/challenges/information_retrieval/test_information_retrieval_challenge_b.py
|
||||
```
|
||||
|
||||
## Description
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
**Command to try**:
|
||||
```
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_b.py --level=3
|
||||
pytest -s tests/challenges/memory/test_memory_challenge_b.py --level=3
|
||||
``
|
||||
|
||||
## Description
|
||||
|
@ -41,4 +41,3 @@ Write all the task_ids into the file output.txt. The file has not been created y
|
|||
## Objective
|
||||
|
||||
The objective of this challenge is to test the agent's ability to follow instructions and maintain memory of the task IDs throughout the process. The agent successfully completed this challenge if it wrote the task ids in a file.
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
**Command to try**:
|
||||
```
|
||||
pytest -s tests/integration/challenges/memory/test_memory_challenge_c.py --level=2
|
||||
pytest -s tests/challenges/memory/test_memory_challenge_c.py --level=2
|
||||
``
|
||||
|
||||
## Description
|
||||
|
|
|
@ -0,0 +1,75 @@
|
|||
# Memory Challenge C
|
||||
|
||||
**Status**: Current level to beat: level 1
|
||||
|
||||
**Command to try**:
|
||||
```
|
||||
pytest -s tests/challenges/memory/test_memory_challenge_d.py --level=1
|
||||
``
|
||||
|
||||
## Description
|
||||
|
||||
The provided code is a unit test designed to validate an AI's ability to track events and beliefs of characters in a story involving moving objects, specifically marbles. This scenario is an advanced form of the classic "Sally-Anne test", a psychological test used to measure a child's social cognitive ability to understand that others' perspectives and beliefs may differ from their own.
|
||||
|
||||
Here is an explanation of the challenge:
|
||||
|
||||
The AI is given a series of events involving characters Sally, Anne, Bob, and Charlie, and the movements of different marbles. These events are designed as tests at increasing levels of complexity.
|
||||
|
||||
For each level, the AI is expected to keep track of the events and the resulting beliefs of each character about the locations of each marble. These beliefs are affected by whether the character was inside or outside the room when events occurred, as characters inside the room are aware of the actions, while characters outside the room aren't.
|
||||
|
||||
After the AI processes the events and generates the beliefs of each character, it writes these beliefs to an output file in JSON format.
|
||||
|
||||
The check_beliefs function then checks the AI's beliefs against the expected beliefs for that level. The expected beliefs are predefined and represent the correct interpretation of the events for each level.
|
||||
|
||||
If the AI's beliefs match the expected beliefs, it means the AI has correctly interpreted the events and the perspectives of each character. This would indicate that the AI has passed the test for that level.
|
||||
|
||||
The test runs for levels up to the maximum level that the AI has successfully beaten, or up to a user-selected level.
|
||||
|
||||
|
||||
## Files
|
||||
|
||||
- `instructions_1.txt`
|
||||
|
||||
"Sally has a marble (marble A) and she puts it in her basket (basket S), then leaves the room. Anne moves marble A from Sally's basket (basket S) to her own basket (basket A).",
|
||||
|
||||
|
||||
- `instructions_2.txt`
|
||||
|
||||
"Sally gives a new marble (marble B) to Bob who is outside with her. Bob goes into the room and places marble B into Anne's basket (basket A). Anne tells Bob to tell Sally that he lost the marble b. Bob leaves the room and speaks to Sally about the marble B. Meanwhile, after Bob left the room, Anne moves marble A into the green box, but tells Charlie to tell Sally that marble A is under the sofa. Charlie leaves the room and speak to Sally about the marble A as instructed by Anne.",
|
||||
|
||||
|
||||
...and so on.
|
||||
|
||||
- `instructions_n.txt`
|
||||
|
||||
The expected believes of every characters are given in a list:
|
||||
|
||||
expected_beliefs = {
|
||||
1: {
|
||||
'Sally': {
|
||||
'marble A': 'basket S',
|
||||
},
|
||||
'Anne': {
|
||||
'marble A': 'basket A',
|
||||
}
|
||||
},
|
||||
2: {
|
||||
'Sally': {
|
||||
'marble A': 'sofa', # Because Charlie told her
|
||||
},
|
||||
'Anne': {
|
||||
'marble A': 'green box', # Because she moved it there
|
||||
'marble B': 'basket A', # Because Bob put it there and she was in the room
|
||||
},
|
||||
'Bob': {
|
||||
'B': 'basket A', # Last place he put it
|
||||
},
|
||||
'Charlie': {
|
||||
'A': 'sofa', # Because Anne told him to tell Sally so
|
||||
}
|
||||
},...
|
||||
|
||||
|
||||
## Objective
|
||||
|
||||
This test essentially checks if an AI can accurately model and track the beliefs of different characters based on their knowledge of events, which is a critical aspect of understanding and generating human-like narratives. This ability would be beneficial for tasks such as writing stories, dialogue systems, and more.
|
|
@ -0,0 +1,53 @@
|
|||
# Configuration
|
||||
|
||||
Configuration is controlled through the `Config` object. You can set configuration variables via the `.env` file. If you don't have a `.env` file, create a copy of `.env.template` in your `Auto-GPT` folder and name it `.env`.
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `AI_SETTINGS_FILE`: Location of AI Settings file. Default: ai_settings.yaml
|
||||
- `AUDIO_TO_TEXT_PROVIDER`: Audio To Text Provider. Only option currently is `huggingface`. Default: huggingface
|
||||
- `AUTHORISE_COMMAND_KEY`: Key response accepted when authorising commands. Default: y
|
||||
- `BROWSE_CHUNK_MAX_LENGTH`: When browsing website, define the length of chunks to summarize. Default: 3000
|
||||
- `BROWSE_SPACY_LANGUAGE_MODEL`: [spaCy language model](https://spacy.io/usage/models) to use when creating chunks. Default: en_core_web_sm
|
||||
- `CHAT_MESSAGES_ENABLED`: Enable chat messages. Optional
|
||||
- `DISABLED_COMMAND_CATEGORIES`: Command categories to disable. Command categories are Python module names, e.g. autogpt.commands.analyze_code. See the directory `autogpt/commands` in the source for all command modules. Default: None
|
||||
- `ELEVENLABS_API_KEY`: ElevenLabs API Key. Optional.
|
||||
- `ELEVENLABS_VOICE_ID`: ElevenLabs Voice ID. Optional.
|
||||
- `EMBEDDING_MODEL`: LLM Model to use for embedding tasks. Default: text-embedding-ada-002
|
||||
- `EXECUTE_LOCAL_COMMANDS`: If shell commands should be executed locally. Default: False
|
||||
- `EXIT_KEY`: Exit key accepted to exit. Default: n
|
||||
- `FAST_LLM_MODEL`: LLM Model to use for most tasks. Default: gpt-3.5-turbo
|
||||
- `GITHUB_API_KEY`: [Github API Key](https://github.com/settings/tokens). Optional.
|
||||
- `GITHUB_USERNAME`: GitHub Username. Optional.
|
||||
- `GOOGLE_API_KEY`: Google API key. Optional.
|
||||
- `GOOGLE_CUSTOM_SEARCH_ENGINE_ID`: [Google custom search engine ID](https://programmablesearchengine.google.com/controlpanel/all). Optional.
|
||||
- `HEADLESS_BROWSER`: Use a headless browser while Auto-GPT uses a web browser. Setting to `False` will allow you to see Auto-GPT operate the browser. Default: True
|
||||
- `HUGGINGFACE_API_TOKEN`: HuggingFace API, to be used for both image generation and audio to text. Optional.
|
||||
- `HUGGINGFACE_AUDIO_TO_TEXT_MODEL`: HuggingFace audio to text model. Default: CompVis/stable-diffusion-v1-4
|
||||
- `HUGGINGFACE_IMAGE_MODEL`: HuggingFace model to use for image generation. Default: CompVis/stable-diffusion-v1-4
|
||||
- `IMAGE_PROVIDER`: Image provider. Options are `dalle`, `huggingface`, and `sdwebui`. Default: dalle
|
||||
- `IMAGE_SIZE`: Default size of image to generate. Default: 256
|
||||
- `MEMORY_BACKEND`: Memory back-end to use. Currently `json_file` is the only supported and enabled backend. Default: json_file
|
||||
- `MEMORY_INDEX`: Value used in the Memory backend for scoping, naming, or indexing. Default: auto-gpt
|
||||
- `OPENAI_API_KEY`: *REQUIRED*- Your [OpenAI API Key](https://platform.openai.com/account/api-keys).
|
||||
- `OPENAI_ORGANIZATION`: Organization ID in OpenAI. Optional.
|
||||
- `PLAIN_OUTPUT`: Plain output, which disables the spinner. Default: False
|
||||
- `PLUGINS_CONFIG_FILE`: Path of plugins_config.yaml file. Default: plugins_config.yaml
|
||||
- `PROMPT_SETTINGS_FILE`: Location of Prompt Settings file. Default: prompt_settings.yaml
|
||||
- `REDIS_HOST`: Redis Host. Default: localhost
|
||||
- `REDIS_PASSWORD`: Redis Password. Optional. Default:
|
||||
- `REDIS_PORT`: Redis Port. Default: 6379
|
||||
- `RESTRICT_TO_WORKSPACE`: The restrict file reading and writing to the workspace directory. Default: True
|
||||
- `SD_WEBUI_AUTH`: Stable Diffusion Web UI username:password pair. Optional.
|
||||
- `SD_WEBUI_URL`: Stable Diffusion Web UI URL. Default: http://localhost:7860
|
||||
- `SHELL_ALLOWLIST`: List of shell commands that ARE allowed to be executed by Auto-GPT. Only applies if `SHELL_COMMAND_CONTROL` is set to `allowlist`. Default: None
|
||||
- `SHELL_COMMAND_CONTROL`: Whether to use `allowlist` or `denylist` to determine what shell commands can be executed (Default: denylist)
|
||||
- `SHELL_DENYLIST`: List of shell commands that ARE NOT allowed to be executed by Auto-GPT. Only applies if `SHELL_COMMAND_CONTROL` is set to `denylist`. Default: sudo,su
|
||||
- `SMART_LLM_MODEL`: LLM Model to use for "smart" tasks. Default: gpt-3.5-turbo
|
||||
- `STREAMELEMENTS_VOICE`: StreamElements voice to use. Default: Brian
|
||||
- `TEMPERATURE`: Value of temperature given to OpenAI. Value from 0 to 2. Lower is more deterministic, higher is more random. See https://platform.openai.com/docs/api-reference/completions/create#completions/create-temperature
|
||||
- `TEXT_TO_SPEECH_PROVIDER`: Text to Speech Provider. Options are `gtts`, `macos`, `elevenlabs`, and `streamelements`. Default: gtts
|
||||
- `USER_AGENT`: User-Agent given when browsing websites. Default: "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
|
||||
- `USE_AZURE`: Use Azure's LLM Default: False
|
||||
- `USE_WEB_BROWSER`: Which web browser to use. Options are `chrome`, `firefox`, `safari` or `edge` Default: chrome
|
||||
- `WIPE_REDIS_ON_START`: Wipes data / index on start. Default: True
|
|
@ -2,6 +2,18 @@
|
|||
|
||||
⚠️💀 **WARNING** 💀⚠️: Review the code of any plugin you use thoroughly, as plugins can execute any Python code, potentially leading to malicious activities, such as stealing your API keys.
|
||||
|
||||
To configure plugins, you can create or edit the `plugins_config.yaml` file in the root directory of Auto-GPT. This file allows you to enable or disable plugins as desired. For specific configuration instructions, please refer to the documentation provided for each plugin. The file should be formatted in YAML. Here is an example for your reference:
|
||||
|
||||
```yaml
|
||||
plugin_a:
|
||||
config:
|
||||
api_key: my-api-key
|
||||
enabled: false
|
||||
plugin_b:
|
||||
config: {}
|
||||
enabled: true
|
||||
```
|
||||
|
||||
See our [Plugins Repo](https://github.com/Significant-Gravitas/Auto-GPT-Plugins) for more info on how to install all the amazing plugins the community has built!
|
||||
|
||||
Alternatively, developers can use the [Auto-GPT Plugin Template](https://github.com/Significant-Gravitas/Auto-GPT-Plugin-Template) as a starting point for creating your own plugins.
|
||||
|
|
|
@ -172,7 +172,7 @@ If you need to upgrade Docker Compose to a newer version, you can follow the ins
|
|||
|
||||
Once you have a recent version of docker-compose, run the commands below in your Auto-GPT folder.
|
||||
|
||||
1. Build the image. If you have pulled the image from Docker Hub, skip this step (NOTE: You *will* need to do this if you are modifying requirements.txt to add/remove depedencies like Python libs/frameworks)
|
||||
1. Build the image. If you have pulled the image from Docker Hub, skip this step (NOTE: You *will* need to do this if you are modifying requirements.txt to add/remove dependencies like Python libs/frameworks)
|
||||
|
||||
:::shell
|
||||
docker-compose build auto-gpt
|
||||
|
|
|
@ -25,6 +25,7 @@ nav:
|
|||
- Memory Challenge A: challenges/memory/challenge_a.md
|
||||
- Memory Challenge B: challenges/memory/challenge_b.md
|
||||
- Memory Challenge C: challenges/memory/challenge_c.md
|
||||
- Memory Challenge D: challenges/memory/challenge_d.md
|
||||
- Information retrieval:
|
||||
- Introduction: challenges/information_retrieval/introduction.md
|
||||
- Information Retrieval Challenge A: challenges/information_retrieval/challenge_a.md
|
||||
|
|
2
mypy.ini
2
mypy.ini
|
@ -2,7 +2,7 @@
|
|||
follow_imports = skip
|
||||
check_untyped_defs = True
|
||||
disallow_untyped_defs = True
|
||||
files = tests/integration/challenges/**/*.py
|
||||
files = tests/challenges/**/*.py
|
||||
|
||||
[mypy-requests.*]
|
||||
ignore_missing_imports = True
|
||||
|
|
|
@ -0,0 +1,6 @@
|
|||
# Netlify config for Auto-GPT docs
|
||||
|
||||
[build]
|
||||
publish = "public/"
|
||||
command = "mkdocs build -d public"
|
||||
ignore = "git diff --quiet HEAD^ HEAD docs mkdocs.yml CONTRIBUTING.md CODE_OF_CONDUCT.md LICENSE"
|
|
@ -14,6 +14,5 @@ performance_evaluations: [
|
|||
'Continuously review and analyze your actions to ensure you are performing to the best of your abilities.',
|
||||
'Constructively self-criticize your big-picture behavior constantly.',
|
||||
'Reflect on past decisions and strategies to refine your approach.',
|
||||
'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.',
|
||||
'Write all code to a file.'
|
||||
'Every command has a cost, so be smart and efficient. Aim to complete tasks in the least number of steps.'
|
||||
]
|
||||
|
|
|
@ -4,7 +4,7 @@ build-backend = "hatchling.build"
|
|||
|
||||
[project]
|
||||
name = "agpt"
|
||||
version = "0.4.0"
|
||||
version = "0.4.1"
|
||||
authors = [
|
||||
{ name="Torantulino", email="support@agpt.co" },
|
||||
]
|
||||
|
|
|
@ -27,6 +27,7 @@ click
|
|||
charset-normalizer>=3.1.0
|
||||
spacy>=3.0.0,<4.0.0
|
||||
en-core-web-sm @ https://github.com/explosion/spacy-models/releases/download/en_core_web_sm-3.5.0/en_core_web_sm-3.5.0-py3-none-any.whl
|
||||
prompt_toolkit>=3.0.38
|
||||
|
||||
##Dev
|
||||
coverage
|
||||
|
@ -61,3 +62,4 @@ pytest-mock
|
|||
vcrpy @ git+https://github.com/Significant-Gravitas/vcrpy.git@master
|
||||
pytest-recording
|
||||
pytest-xdist
|
||||
flaky
|
||||
|
|
2
run.sh
2
run.sh
|
@ -1,4 +1,4 @@
|
|||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
function find_python_command() {
|
||||
if command -v python &> /dev/null
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
import subprocess
|
||||
import sys
|
||||
import zipfile
|
||||
from glob import glob
|
||||
from pathlib import Path
|
||||
|
||||
|
||||
|
@ -16,6 +17,8 @@ def install_plugin_dependencies():
|
|||
None
|
||||
"""
|
||||
plugins_dir = Path(os.getenv("PLUGINS_DIR", "plugins"))
|
||||
|
||||
# Install zip-based plugins
|
||||
for plugin in plugins_dir.glob("*.zip"):
|
||||
with zipfile.ZipFile(str(plugin), "r") as zfile:
|
||||
try:
|
||||
|
@ -30,6 +33,13 @@ def install_plugin_dependencies():
|
|||
except KeyError:
|
||||
continue
|
||||
|
||||
# Install directory-based plugins
|
||||
for requirements_file in glob(f"{plugins_dir}/*/requirements.txt"):
|
||||
subprocess.check_call(
|
||||
[sys.executable, "-m", "pip", "install", "-r", requirements_file],
|
||||
stdout=subprocess.DEVNULL,
|
||||
)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
install_plugin_dependencies()
|
||||
|
|
|
@ -1 +1 @@
|
|||
Subproject commit be280df43d6a23b8074d9cba10d18ed8724a54c9
|
||||
Subproject commit 427de6721cb5209a7a34359a81b71d60e80a110a
|
|
@ -1,26 +1,24 @@
|
|||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import run_interaction_loop
|
||||
|
||||
CYCLE_COUNT = 2
|
||||
|
||||
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@pytest.mark.vcr
|
||||
@challenge
|
||||
@challenge()
|
||||
def test_browse_website(
|
||||
browser_agent: Agent,
|
||||
patched_api_requestor: None,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
file_path = browser_agent.workspace.get_path("browse_website.txt")
|
||||
run_interaction_loop(monkeypatch, browser_agent, CYCLE_COUNT)
|
||||
run_interaction_loop(
|
||||
monkeypatch, browser_agent, CYCLE_COUNT, challenge_name, level_to_run
|
||||
)
|
||||
|
||||
# content = read_file(file_path, config)
|
||||
content = open(file_path, encoding="utf-8").read()
|
|
@ -0,0 +1,42 @@
|
|||
from typing import List
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
|
||||
CYCLE_COUNT_PER_LEVEL = [1, 1]
|
||||
EXPECTED_OUTPUTS_PER_LEVEL = [
|
||||
{"hello_world.txt": ["Hello World"]},
|
||||
{"hello_world_1.txt": ["Hello World"], "hello_world_2.txt": ["Hello World"]},
|
||||
]
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_write_file(
|
||||
file_system_agents: List[Agent],
|
||||
patched_api_requestor: None,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
file_system_agent = file_system_agents[level_to_run - 1]
|
||||
run_interaction_loop(
|
||||
monkeypatch,
|
||||
file_system_agent,
|
||||
CYCLE_COUNT_PER_LEVEL[level_to_run - 1],
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
)
|
||||
|
||||
expected_outputs = EXPECTED_OUTPUTS_PER_LEVEL[level_to_run - 1]
|
||||
|
||||
for file_name, expected_lines in expected_outputs.items():
|
||||
file_path = get_workspace_path(file_system_agent, file_name)
|
||||
content = read_file(file_path, file_system_agent)
|
||||
for expected_line in expected_lines:
|
||||
assert (
|
||||
expected_line in content
|
||||
), f"Expected '{expected_line}' in file {file_name}, but it was not found"
|
|
@ -3,6 +3,7 @@ from typing import Optional
|
|||
|
||||
class Challenge:
|
||||
BEAT_CHALLENGES = False
|
||||
DEFAULT_CHALLENGE_NAME = "default_challenge_name"
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
|
@ -10,7 +11,7 @@ class Challenge:
|
|||
category: str,
|
||||
max_level: int,
|
||||
is_new_challenge: bool,
|
||||
max_level_beaten: Optional[int],
|
||||
max_level_beaten: Optional[int] = None,
|
||||
level_to_run: Optional[int] = None,
|
||||
) -> None:
|
||||
self.name = name
|
|
@ -0,0 +1,89 @@
|
|||
import os
|
||||
from functools import wraps
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
import pytest
|
||||
from flaky import flaky # type: ignore
|
||||
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.challenges.challenge_decorator.challenge_utils import create_challenge
|
||||
from tests.challenges.challenge_decorator.score_utils import (
|
||||
get_scores,
|
||||
update_new_score,
|
||||
)
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
MAX_LEVEL_TO_IMPROVE_ON = (
|
||||
1 # we will attempt to beat 1 level above the current level for now.
|
||||
)
|
||||
|
||||
CHALLENGE_FAILED_MESSAGE = "Challenges can sometimes fail randomly, please run this test again and if it fails reach out to us on https://discord.gg/autogpt in the 'challenges' channel to let us know the challenge you're struggling with."
|
||||
|
||||
|
||||
def challenge(
|
||||
max_runs: int = 2, min_passes: int = 1, api_key: str = "OPENAI_API_KEY"
|
||||
) -> Callable[[Callable[..., Any]], Callable[..., None]]:
|
||||
def decorator(func: Callable[..., Any]) -> Callable[..., None]:
|
||||
@requires_api_key(api_key)
|
||||
@pytest.mark.vcr
|
||||
@flaky(max_runs=max_runs, min_passes=min_passes)
|
||||
@wraps(func)
|
||||
def wrapper(*args: Any, **kwargs: Any) -> None:
|
||||
run_remaining = MAX_LEVEL_TO_IMPROVE_ON if Challenge.BEAT_CHALLENGES else 1
|
||||
original_error: Optional[Exception] = None
|
||||
|
||||
while run_remaining > 0:
|
||||
current_score, new_score, new_score_location = get_scores()
|
||||
level_to_run = (
|
||||
kwargs["level_to_run"] if "level_to_run" in kwargs else None
|
||||
)
|
||||
challenge = create_challenge(
|
||||
func, current_score, Challenge.BEAT_CHALLENGES, level_to_run
|
||||
)
|
||||
if challenge.level_to_run is not None:
|
||||
kwargs["level_to_run"] = challenge.level_to_run
|
||||
kwargs["challenge_name"] = challenge.name
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
challenge.succeeded = True
|
||||
except AssertionError as err:
|
||||
original_error = AssertionError(
|
||||
f"{CHALLENGE_FAILED_MESSAGE}\n{err}"
|
||||
)
|
||||
challenge.succeeded = False
|
||||
except Exception as err:
|
||||
original_error = err
|
||||
challenge.succeeded = False
|
||||
else:
|
||||
challenge.skipped = True
|
||||
if os.environ.get("CI") == "true":
|
||||
new_max_level_beaten = get_new_max_level_beaten(
|
||||
challenge, Challenge.BEAT_CHALLENGES
|
||||
)
|
||||
update_new_score(
|
||||
new_score_location, new_score, challenge, new_max_level_beaten
|
||||
)
|
||||
if challenge.level_to_run is None:
|
||||
pytest.skip("This test has not been unlocked yet.")
|
||||
|
||||
if not challenge.succeeded:
|
||||
if Challenge.BEAT_CHALLENGES or challenge.is_new_challenge:
|
||||
pytest.xfail(str(original_error))
|
||||
if original_error:
|
||||
raise original_error
|
||||
run_remaining -= 1
|
||||
|
||||
return wrapper
|
||||
|
||||
return decorator
|
||||
|
||||
|
||||
def get_new_max_level_beaten(
|
||||
challenge: Challenge, beat_challenges: bool
|
||||
) -> Optional[int]:
|
||||
if challenge.succeeded:
|
||||
return challenge.level_to_run
|
||||
if challenge.skipped:
|
||||
return challenge.max_level_beaten
|
||||
# Challenge failed
|
||||
return challenge.max_level_beaten if beat_challenges else None
|
|
@ -1,7 +1,7 @@
|
|||
import os
|
||||
from typing import Any, Callable, Dict, Optional, Tuple
|
||||
|
||||
from tests.integration.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
|
||||
CHALLENGE_PREFIX = "test_"
|
||||
|
|
@ -2,7 +2,7 @@ import json
|
|||
import os
|
||||
from typing import Any, Dict, Optional, Tuple
|
||||
|
||||
from tests.integration.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
|
||||
CURRENT_SCORE_LOCATION = "../current_score"
|
||||
NEW_SCORE_LOCATION = "../new_score"
|
|
@ -5,9 +5,8 @@ from _pytest.config import Config
|
|||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
|
||||
from tests.integration.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.integration.conftest import BASE_VCR_CONFIG
|
||||
from tests.vcr.vcr_filter import before_record_response
|
||||
from tests.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.vcr import before_record_response
|
||||
|
||||
|
||||
def before_record_response_filter_errors(
|
||||
|
@ -21,9 +20,9 @@ def before_record_response_filter_errors(
|
|||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def vcr_config() -> Dict[str, Any]:
|
||||
def vcr_config(get_base_vcr_config: Dict[str, Any]) -> Dict[str, Any]:
|
||||
# this fixture is called by the pytest-recording vcr decorator.
|
||||
return BASE_VCR_CONFIG | {
|
||||
return get_base_vcr_config | {
|
||||
"before_record_response": before_record_response_filter_errors,
|
||||
}
|
||||
|
||||
|
@ -52,6 +51,11 @@ def level_to_run(request: FixtureRequest) -> int:
|
|||
return request.config.option.level
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def challenge_name() -> str:
|
||||
return Challenge.DEFAULT_CHALLENGE_NAME
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def check_beat_challenges(request: FixtureRequest) -> None:
|
||||
Challenge.BEAT_CHALLENGES = request.config.getoption("--beat-challenges")
|
|
@ -5,20 +5,20 @@
|
|||
"max_level_beaten": 1
|
||||
},
|
||||
"write_file": {
|
||||
"max_level": 1,
|
||||
"max_level": 2,
|
||||
"max_level_beaten": 1
|
||||
}
|
||||
},
|
||||
"debug_code": {
|
||||
"debug_code_challenge_a": {
|
||||
"max_level": 1,
|
||||
"max_level": 2,
|
||||
"max_level_beaten": 1
|
||||
}
|
||||
},
|
||||
"information_retrieval": {
|
||||
"information_retrieval_challenge_a": {
|
||||
"max_level": 3,
|
||||
"max_level_beaten": 1
|
||||
"max_level_beaten": null
|
||||
},
|
||||
"information_retrieval_challenge_b": {
|
||||
"max_level": 1,
|
||||
|
@ -42,7 +42,11 @@
|
|||
},
|
||||
"memory_challenge_c": {
|
||||
"max_level": 5,
|
||||
"max_level_beaten": 1
|
||||
"max_level_beaten": null
|
||||
},
|
||||
"memory_challenge_d": {
|
||||
"max_level": 5,
|
||||
"max_level_beaten": null
|
||||
}
|
||||
}
|
||||
}
|
|
@ -2,18 +2,12 @@
|
|||
from typing import List, Optional
|
||||
|
||||
|
||||
def two_sum(nums: List, target: int) -> Optional[int]:
|
||||
def two_sum(nums: List, target: int) -> Optional[List[int]]:
|
||||
seen = {}
|
||||
for i, num in enumerate(nums):
|
||||
typo
|
||||
complement = target - num
|
||||
if complement in seen:
|
||||
return [seen[complement], i]
|
||||
seen[num] = i
|
||||
return None
|
||||
|
||||
|
||||
# Example usage:
|
||||
nums = [2, 7, 11, 15]
|
||||
target = 9
|
||||
result = two_sum(nums, target)
|
||||
print(result) # Output: [0, 1]
|
|
@ -0,0 +1,31 @@
|
|||
# mypy: ignore-errors
|
||||
from code import two_sum
|
||||
from typing import List
|
||||
|
||||
|
||||
def test_two_sum(nums: List, target: int, expected_result: List[int]) -> None:
|
||||
result = two_sum(nums, target)
|
||||
print(result)
|
||||
assert (
|
||||
result == expected_result
|
||||
), f"AssertionError: Expected the output to be {expected_result}"
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
# test the trivial case with the first two numbers
|
||||
nums = [2, 7, 11, 15]
|
||||
target = 9
|
||||
expected_result = [0, 1]
|
||||
test_two_sum(nums, target, expected_result)
|
||||
|
||||
# test for ability to use zero and the same number twice
|
||||
nums = [2, 7, 0, 15, 12, 0]
|
||||
target = 0
|
||||
expected_result = [2, 5]
|
||||
test_two_sum(nums, target, expected_result)
|
||||
|
||||
# test for first and last index usage and negative numbers
|
||||
nums = [-6, 7, 11, 4]
|
||||
target = -2
|
||||
expected_result = [0, 3]
|
||||
test_two_sum(nums, target, expected_result)
|
|
@ -0,0 +1,56 @@
|
|||
from pathlib import Path
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.execute_code import execute_python_file
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import (
|
||||
copy_file_into_workspace,
|
||||
get_workspace_path,
|
||||
run_interaction_loop,
|
||||
)
|
||||
|
||||
CYCLE_COUNT = 5
|
||||
EXPECTED_VALUES = ["[0, 1]", "[2, 5]", "[0, 3]"]
|
||||
DIRECTORY_PATH = Path(__file__).parent / "data"
|
||||
CODE_FILE_PATH = "code.py"
|
||||
TEST_FILE_PATH = "test.py"
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_debug_code_challenge_a(
|
||||
debug_code_agents: Agent,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Test whether the agent can debug a simple code snippet.
|
||||
|
||||
:param debug_code_agent: The agent to test.
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
:patched_api_requestor: Sends api requests to our API CI pipeline
|
||||
:level_to_run: The level to run.
|
||||
"""
|
||||
debug_code_agent = debug_code_agents[level_to_run - 1]
|
||||
|
||||
copy_file_into_workspace(debug_code_agent, DIRECTORY_PATH, CODE_FILE_PATH)
|
||||
copy_file_into_workspace(debug_code_agent, DIRECTORY_PATH, TEST_FILE_PATH)
|
||||
|
||||
run_interaction_loop(
|
||||
monkeypatch, debug_code_agent, CYCLE_COUNT, challenge_name, level_to_run
|
||||
)
|
||||
|
||||
output = execute_python_file(
|
||||
get_workspace_path(debug_code_agent, TEST_FILE_PATH), debug_code_agent
|
||||
)
|
||||
|
||||
assert "error" not in output.lower(), f"Errors found in output: {output}!"
|
||||
|
||||
for expected_value in EXPECTED_VALUES:
|
||||
assert (
|
||||
expected_value in output
|
||||
), f"Expected output to contain {expected_value}, but it was not found in {output}!"
|
|
@ -1,27 +1,24 @@
|
|||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
|
||||
CYCLE_COUNT = 3
|
||||
EXPECTED_REVENUES = [["81"], ["81"], ["81", "53", "24", "21", "11", "7", "4", "3", "2"]]
|
||||
from autogpt.agent import Agent
|
||||
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@challenge
|
||||
|
||||
@challenge()
|
||||
def test_information_retrieval_challenge_a(
|
||||
information_retrieval_agents: Agent,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: None,
|
||||
config: Config,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_a function in a given agent by mocking user inputs and checking the output file content.
|
||||
|
@ -30,10 +27,16 @@ def test_information_retrieval_challenge_a(
|
|||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
"""
|
||||
information_retrieval_agent = information_retrieval_agents[level_to_run - 1]
|
||||
run_interaction_loop(monkeypatch, information_retrieval_agent, CYCLE_COUNT)
|
||||
run_interaction_loop(
|
||||
monkeypatch,
|
||||
information_retrieval_agent,
|
||||
CYCLE_COUNT,
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
)
|
||||
|
||||
file_path = str(information_retrieval_agent.workspace.get_path("output.txt"))
|
||||
content = read_file(file_path, config)
|
||||
file_path = get_workspace_path(information_retrieval_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, information_retrieval_agent)
|
||||
expected_revenues = EXPECTED_REVENUES[level_to_run - 1]
|
||||
for revenue in expected_revenues:
|
||||
assert (
|
|
@ -1,28 +1,24 @@
|
|||
import contextlib
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
|
||||
CYCLE_COUNT = 3
|
||||
OUTPUT_LOCATION = "2010_nobel_prize_winners.txt"
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@challenge
|
||||
@challenge()
|
||||
def test_information_retrieval_challenge_b(
|
||||
get_nobel_prize_agent: Agent,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
patched_api_requestor: None,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
config: Config,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_b function in a given agent by mocking user inputs and checking the output file content.
|
||||
|
@ -31,16 +27,19 @@ def test_information_retrieval_challenge_b(
|
|||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
:param patched_api_requestor: APIRequestor Patch to override the openai.api_requestor module for testing.
|
||||
:param level_to_run: The level to run.
|
||||
:param config: The config object.
|
||||
"""
|
||||
|
||||
with contextlib.suppress(SystemExit):
|
||||
run_interaction_loop(monkeypatch, get_nobel_prize_agent, CYCLE_COUNT)
|
||||
|
||||
file_path = str(
|
||||
get_nobel_prize_agent.workspace.get_path("2010_nobel_prize_winners.txt")
|
||||
run_interaction_loop(
|
||||
monkeypatch,
|
||||
get_nobel_prize_agent,
|
||||
CYCLE_COUNT,
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
)
|
||||
content = read_file(file_path, config)
|
||||
file_path = get_workspace_path(get_nobel_prize_agent, OUTPUT_LOCATION)
|
||||
|
||||
content = read_file(file_path, get_nobel_prize_agent)
|
||||
assert "Andre Geim" in content, "Expected the file to contain Andre Geim"
|
||||
assert (
|
||||
"Konstantin Novoselov" in content
|
|
@ -1,26 +1,23 @@
|
|||
import pytest
|
||||
import yaml
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
|
||||
CYCLE_COUNT = 3
|
||||
OUTPUT_LOCATION = "kube.yaml"
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@challenge
|
||||
@challenge()
|
||||
def test_kubernetes_template_challenge_a(
|
||||
kubernetes_agent: Agent,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
patched_api_requestor: MockerFixture,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Test the challenge_a function in a given agent by mocking user inputs
|
||||
|
@ -29,13 +26,14 @@ def test_kubernetes_template_challenge_a(
|
|||
Args:
|
||||
kubernetes_agent (Agent)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
config (Config)
|
||||
level_to_run (int)
|
||||
"""
|
||||
run_interaction_loop(monkeypatch, kubernetes_agent, CYCLE_COUNT)
|
||||
run_interaction_loop(
|
||||
monkeypatch, kubernetes_agent, CYCLE_COUNT, challenge_name, level_to_run
|
||||
)
|
||||
|
||||
file_path = str(kubernetes_agent.workspace.get_path("kube.yaml"))
|
||||
content = read_file(file_path, config)
|
||||
file_path = get_workspace_path(kubernetes_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, kubernetes_agent)
|
||||
|
||||
for word in ["apiVersion", "kind", "metadata", "spec"]:
|
||||
assert word in content, f"Expected the file to contain {word}"
|
|
@ -1,24 +1,21 @@
|
|||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@challenge
|
||||
@challenge()
|
||||
def test_memory_challenge_a(
|
||||
memory_management_agent: Agent,
|
||||
patched_api_requestor: None,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
The agent reads a file containing a task_id. Then, it reads a series of other files.
|
||||
|
@ -27,17 +24,21 @@ def test_memory_challenge_a(
|
|||
memory_management_agent (Agent)
|
||||
patched_api_requestor (MockerFixture)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
config (Config)
|
||||
level_to_run (int)
|
||||
"""
|
||||
|
||||
task_id = "2314"
|
||||
create_instructions_files(memory_management_agent, level_to_run, task_id, config)
|
||||
create_instructions_files(memory_management_agent, level_to_run, task_id)
|
||||
|
||||
run_interaction_loop(monkeypatch, memory_management_agent, level_to_run + 2)
|
||||
run_interaction_loop(
|
||||
monkeypatch,
|
||||
memory_management_agent,
|
||||
level_to_run + 2,
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
)
|
||||
|
||||
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||
content = read_file(file_path, config)
|
||||
file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, memory_management_agent)
|
||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||
|
||||
|
||||
|
@ -45,7 +46,6 @@ def create_instructions_files(
|
|||
memory_management_agent: Agent,
|
||||
num_files: int,
|
||||
task_id: str,
|
||||
config: Config,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
|
@ -59,8 +59,8 @@ def create_instructions_files(
|
|||
for i in range(1, num_files + 1):
|
||||
content = generate_content(i, task_id, base_filename, num_files)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||
write_to_file(file_path, content, config)
|
||||
file_path = get_workspace_path(memory_management_agent, file_name)
|
||||
write_to_file(file_path, content, memory_management_agent)
|
||||
|
||||
|
||||
def generate_content(
|
|
@ -1,26 +1,26 @@
|
|||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import (
|
||||
generate_noise,
|
||||
get_workspace_path,
|
||||
run_interaction_loop,
|
||||
)
|
||||
from tests.integration.challenges.utils import generate_noise, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
NOISE = 1000
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
|
||||
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@challenge
|
||||
@challenge()
|
||||
def test_memory_challenge_b(
|
||||
memory_management_agent: Agent,
|
||||
patched_api_requestor: None,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
The agent reads a series of files, each containing a task_id and noise. After reading 'n' files,
|
||||
|
@ -33,12 +33,18 @@ def test_memory_challenge_b(
|
|||
level_to_run (int)
|
||||
"""
|
||||
task_ids = [str(i * 1111) for i in range(1, level_to_run + 1)]
|
||||
create_instructions_files(memory_management_agent, level_to_run, task_ids, config)
|
||||
create_instructions_files(memory_management_agent, level_to_run, task_ids)
|
||||
|
||||
run_interaction_loop(monkeypatch, memory_management_agent, level_to_run + 2)
|
||||
run_interaction_loop(
|
||||
monkeypatch,
|
||||
memory_management_agent,
|
||||
level_to_run + 2,
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
)
|
||||
|
||||
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||
content = read_file(file_path, config)
|
||||
file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, memory_management_agent)
|
||||
for task_id in task_ids:
|
||||
assert task_id in content, f"Expected the file to contain {task_id}"
|
||||
|
||||
|
@ -47,7 +53,6 @@ def create_instructions_files(
|
|||
memory_management_agent: Agent,
|
||||
level: int,
|
||||
task_ids: list,
|
||||
config: Config,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
|
@ -62,8 +67,9 @@ def create_instructions_files(
|
|||
for i in range(1, level + 1):
|
||||
content = generate_content(i, task_ids, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||
write_to_file(file_path, content, config)
|
||||
file_path = get_workspace_path(memory_management_agent, file_name)
|
||||
|
||||
write_to_file(file_path, content, memory_management_agent)
|
||||
|
||||
|
||||
def generate_content(index: int, task_ids: list, base_filename: str, level: int) -> str:
|
|
@ -1,27 +1,26 @@
|
|||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import (
|
||||
generate_noise,
|
||||
get_workspace_path,
|
||||
run_interaction_loop,
|
||||
)
|
||||
from tests.integration.challenges.utils import generate_noise, run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
NOISE = 1000
|
||||
NOISE = 1200
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
|
||||
|
||||
# @pytest.mark.vcr
|
||||
@pytest.mark.vcr
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@challenge
|
||||
@challenge()
|
||||
def test_memory_challenge_c(
|
||||
memory_management_agent: Agent,
|
||||
patched_api_requestor: None,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
Instead of reading task Ids from files as with the previous challenges, the agent now must remember
|
||||
|
@ -32,31 +31,37 @@ def test_memory_challenge_c(
|
|||
memory_management_agent (Agent)
|
||||
patched_api_requestor (MockerFixture)
|
||||
monkeypatch (pytest.MonkeyPatch)
|
||||
config (Config)
|
||||
level_to_run (int)
|
||||
"""
|
||||
silly_phrases = [
|
||||
"The purple elephant danced on a rainbow while eating a taco.",
|
||||
"The sneaky toaster stole my socks and ran away to Hawaii.",
|
||||
"My pet rock sings better than Beyoncé on Tuesdays.",
|
||||
"The giant hamster rode a unicycle through the crowded mall.",
|
||||
"The talking tree gave me a high-five and then flew away.",
|
||||
"I have a collection of invisible hats that I wear on special occasions.",
|
||||
"The flying spaghetti monster stole my sandwich and left a note saying 'thanks for the snack!'",
|
||||
"My imaginary friend is a dragon who loves to play video games.",
|
||||
"I once saw a cloud shaped like a giant chicken eating a pizza.",
|
||||
"The ninja unicorn disguised itself as a potted plant and infiltrated the office.",
|
||||
"The purple elephant danced on a rainbow while eating a taco",
|
||||
"The sneaky toaster stole my socks and ran away to Hawaii",
|
||||
"My pet rock sings better than Beyoncé on Tuesdays",
|
||||
"The giant hamster rode a unicycle through the crowded mall",
|
||||
"The talking tree gave me a high-five and then flew away",
|
||||
"I have a collection of invisible hats that I wear on special occasions",
|
||||
"The flying spaghetti monster stole my sandwich and left a note saying 'thanks for the snack'",
|
||||
"My imaginary friend is a dragon who loves to play video games",
|
||||
"I once saw a cloud shaped like a giant chicken eating a pizza",
|
||||
"The ninja unicorn disguised itself as a potted plant and infiltrated the office",
|
||||
]
|
||||
|
||||
level_silly_phrases = silly_phrases[:level_to_run]
|
||||
create_instructions_files(
|
||||
memory_management_agent, level_to_run, level_silly_phrases, config=config
|
||||
memory_management_agent,
|
||||
level_to_run,
|
||||
level_silly_phrases,
|
||||
)
|
||||
|
||||
run_interaction_loop(monkeypatch, memory_management_agent, level_to_run + 2)
|
||||
|
||||
file_path = str(memory_management_agent.workspace.get_path("output.txt"))
|
||||
content = read_file(file_path, config)
|
||||
run_interaction_loop(
|
||||
monkeypatch,
|
||||
memory_management_agent,
|
||||
level_to_run + 2,
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
)
|
||||
file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION)
|
||||
content = read_file(file_path, agent=memory_management_agent)
|
||||
for phrase in level_silly_phrases:
|
||||
assert phrase in content, f"Expected the file to contain {phrase}"
|
||||
|
||||
|
@ -65,7 +70,6 @@ def create_instructions_files(
|
|||
memory_management_agent: Agent,
|
||||
level: int,
|
||||
task_ids: list,
|
||||
config: Config,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
|
@ -80,8 +84,8 @@ def create_instructions_files(
|
|||
for i in range(1, level + 1):
|
||||
content = generate_content(i, task_ids, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = str(memory_management_agent.workspace.get_path(file_name))
|
||||
write_to_file(file_path, content, config)
|
||||
file_path = get_workspace_path(memory_management_agent, file_name)
|
||||
write_to_file(file_path, content, memory_management_agent)
|
||||
|
||||
|
||||
def generate_content(
|
|
@ -0,0 +1,241 @@
|
|||
import json
|
||||
from typing import Dict
|
||||
|
||||
import pytest
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file, write_to_file
|
||||
from tests.challenges.challenge_decorator.challenge_decorator import challenge
|
||||
from tests.challenges.utils import get_workspace_path, run_interaction_loop
|
||||
|
||||
LEVEL_CURRENTLY_BEATEN = 1
|
||||
MAX_LEVEL = 5
|
||||
OUTPUT_LOCATION = "output.txt"
|
||||
|
||||
|
||||
@challenge()
|
||||
def test_memory_challenge_d(
|
||||
memory_management_agent: Agent,
|
||||
patched_api_requestor: MockerFixture,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
level_to_run: int,
|
||||
challenge_name: str,
|
||||
) -> None:
|
||||
"""
|
||||
The agent is given a series of events and must remember the respective beliefs of the characters.
|
||||
Args:
|
||||
memory_management_agent (Agent)
|
||||
user_selected_level (int)
|
||||
"""
|
||||
sally_anne_test_phrases = [
|
||||
"Sally has a marble (marble A) and she puts it in her basket (basket S), then leaves the room. Anne moves marble A from Sally's basket (basket S) to her own basket (basket A).",
|
||||
"Sally gives a new marble (marble B) to Bob who is outside with her. Bob goes into the room and places marble B into Anne's basket (basket A). Anne tells Bob to tell Sally that he lost the marble b. Bob leaves the room and speaks to Sally about the marble B. Meanwhile, after Bob left the room, Anne moves marble A into the green box, but tells Charlie to tell Sally that marble A is under the sofa. Charlie leaves the room and speaks to Sally about the marble A as instructed by Anne.",
|
||||
"Sally gives a new marble (marble C) to Charlie who is outside with her. Charlie enters the room and exchanges marble C with marble B in Anne's basket (basket A). Anne tells Charlie to tell Sally that he put marble C into the red box. Charlie leaves the room and speak to Sally about marble C as instructed by Anne. Meanwhile, after Charlie leaves the room, Bob enters into the room and moves marble A from the green box to under the sofa, but tells Anne to tell Sally that marble A is in the green box. Anne leaves the room and speak to Sally about the marble A as instructed by Bob",
|
||||
"Sally gives a new marble (marble D) to Anne. Anne gives the marble to Charlie. Charlie enters the room and gives marble D to Bob. Bob tells Charlie to tell Sally that he put marble D under the sofa. Bob put marble D under the sofa Charlie leaves the room and speaks to Sally about marble D. Meanwhile, after Charlie leaves the room, Bob takes marble A from under the sofa and places it in the blue box.",
|
||||
"Sally gives a new marble (marble E) to Charlie who is outside with her. Charlie enters the room and places marble E in the red box. Anne, who is already in the room, takes marble E from the red box, and hides it under the sofa. Then Anne leaves the room and tells Sally that marble E is in the green box. Meanwhile, after Anne leaves the room, Charlie who re-enters the room takes marble D from under the sofa and places it in his own basket (basket C).",
|
||||
]
|
||||
level_sally_anne_test_phrases = sally_anne_test_phrases[:level_to_run]
|
||||
create_instructions_files(
|
||||
memory_management_agent, level_to_run, level_sally_anne_test_phrases
|
||||
)
|
||||
run_interaction_loop(
|
||||
monkeypatch,
|
||||
memory_management_agent,
|
||||
level_to_run + 2,
|
||||
challenge_name,
|
||||
level_to_run,
|
||||
)
|
||||
file_path = get_workspace_path(memory_management_agent, OUTPUT_LOCATION)
|
||||
|
||||
content = read_file(file_path, memory_management_agent)
|
||||
check_beliefs(content, level_to_run)
|
||||
|
||||
|
||||
def check_beliefs(content: str, level: int) -> None:
|
||||
# Define the expected beliefs for each level
|
||||
expected_beliefs = {
|
||||
1: {
|
||||
"Sally": {
|
||||
"marble A": "basket S",
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "basket A",
|
||||
},
|
||||
},
|
||||
2: {
|
||||
"Sally": {
|
||||
"marble A": "sofa", # Because Charlie told her
|
||||
"marble B": "lost", # Because Bob told her
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "green box", # Because she moved it there
|
||||
"marble B": "basket A", # Because Bob put it there and she was in the room
|
||||
},
|
||||
"Bob": {
|
||||
"marble B": "basket A", # Last place he put it
|
||||
},
|
||||
"Charlie": {
|
||||
"marble A": "sofa", # Because Anne told him to tell Sally so
|
||||
},
|
||||
},
|
||||
3: {
|
||||
"Sally": {
|
||||
"marble A": "green box", # Because Anne told her
|
||||
"marble C": "red box", # Because Charlie told her
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "sofa", # Because Bob moved it there and told her
|
||||
"marble B": "basket A", # Because Charlie exchanged marble C with marble B in her basket
|
||||
"marble C": "basket A", # Because Charlie exchanged marble C with marble B in her basket
|
||||
},
|
||||
"Bob": {
|
||||
"marble A": "sofa", # Because he moved it there
|
||||
"marble B": "basket A",
|
||||
# Because Charlie exchanged marble C with marble B in Anne's basket, and he was in the room
|
||||
"marble C": "basket A",
|
||||
# Because Charlie exchanged marble C with marble B in Anne's basket, and he was in the room
|
||||
},
|
||||
"Charlie": {
|
||||
"marble A": "sofa", # Last place he knew it was
|
||||
"marble B": "basket A", # Because he exchanged marble C with marble B in Anne's basket
|
||||
"marble C": "red box", # Because Anne told him to tell Sally so
|
||||
},
|
||||
},
|
||||
4: {
|
||||
"Sally": {
|
||||
"marble A": "green box", # Because Anne told her in the last conversation
|
||||
"marble C": "red box", # Because Charlie told her
|
||||
"marble D": "sofa", # Because Charlie told her
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "blue box", # Because Bob moved it there, and she was not in the room to see
|
||||
"marble B": "basket A", # Last place she knew it was
|
||||
"marble C": "basket A", # Last place she knew it was
|
||||
"marble D": "sofa", # Because Bob moved it there, and she was in the room to see
|
||||
},
|
||||
"Bob": {
|
||||
"marble A": "blue box", # Because he moved it there
|
||||
"marble B": "basket A", # Last place he knew it was
|
||||
"marble C": "basket A", # Last place he knew it was
|
||||
"marble D": "sofa", # Because he moved it there
|
||||
},
|
||||
"Charlie": {
|
||||
"marble A": "sofa", # Last place he knew it was
|
||||
"marble B": "basket A", # Last place he knew it was
|
||||
"marble C": "red box", # Last place he knew it was
|
||||
"marble D": "sofa", # Because Bob told him to tell Sally so
|
||||
},
|
||||
},
|
||||
5: {
|
||||
"Sally": {
|
||||
"marble A": "green box", # Because Anne told her in the last level
|
||||
"marble C": "red box", # Because Charlie told her
|
||||
"marble D": "sofa", # Because Charlie told her
|
||||
"marble E": "green box", # Because Anne told her
|
||||
},
|
||||
"Anne": {
|
||||
"marble A": "blue box", # Last place she knew it was
|
||||
"marble B": "basket A", # Last place she knew it was
|
||||
"marble C": "basket A", # Last place she knew it was
|
||||
"marble D": "basket C", # Last place she knew it was
|
||||
"marble E": "sofa", # Because she moved it there
|
||||
},
|
||||
"Charlie": {
|
||||
"marble A": "blue box", # Last place he knew it was
|
||||
"marble B": "basket A", # Last place he knew it was
|
||||
"marble C": "basket A", # Last place he knew it was
|
||||
"marble D": "basket C", # Because he moved it there
|
||||
"marble E": "red box", # Last place he knew it was
|
||||
},
|
||||
"Bob": {
|
||||
"marble A": "blue box", # Last place he knew it was
|
||||
"marble C": "red box", # Last place he knew it was
|
||||
"marble D": "sofa", # Last place he knew it was
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
# Extract the beliefs from the AI's response
|
||||
ai_beliefs = extract_beliefs(content)
|
||||
# Check the AI's beliefs against the expected beliefs
|
||||
for character, belief in expected_beliefs[level].items():
|
||||
for marble, location in belief.items():
|
||||
ai_belief = ai_beliefs.get(character, {}).get(marble, "")
|
||||
assert (
|
||||
location in ai_belief
|
||||
), f"For {character}'s {marble}, expected '{location}' to be in '{ai_belief}'"
|
||||
|
||||
|
||||
def extract_beliefs(content: str) -> Dict[str, Dict[str, str]]:
|
||||
"""Extract the beliefs of each character from the AI's output."""
|
||||
# Parse the JSON content
|
||||
content_dict = json.loads(content)
|
||||
beliefs = content_dict.get("beliefs", {})
|
||||
return beliefs
|
||||
|
||||
|
||||
def create_instructions_files(
|
||||
memory_management_agent: Agent,
|
||||
level: int,
|
||||
test_phrases: list,
|
||||
base_filename: str = "instructions_",
|
||||
) -> None:
|
||||
"""
|
||||
Creates a series of instructions files for the memory challenge.
|
||||
Args:
|
||||
level:
|
||||
memory_management_agent (Agent)
|
||||
test_phrases (list)
|
||||
base_filename (str, optional)
|
||||
"""
|
||||
for i in range(1, level + 1):
|
||||
content = generate_content(i, test_phrases, base_filename, level)
|
||||
file_name = f"{base_filename}{i}.txt"
|
||||
file_path = get_workspace_path(memory_management_agent, file_name)
|
||||
write_to_file(file_path, content, memory_management_agent)
|
||||
|
||||
|
||||
def generate_content(
|
||||
index: int, test_phrases: list, base_filename: str, level: int
|
||||
) -> str:
|
||||
"""
|
||||
Args:
|
||||
index: int
|
||||
test_phrases: list
|
||||
base_filename: str
|
||||
level: int
|
||||
Returns: str
|
||||
"""
|
||||
phrase = test_phrases[index - 1]
|
||||
|
||||
if index != level:
|
||||
return f"The current event is '{phrase}'.\nRead the file {base_filename}{index + 1}.txt using the read_file command."
|
||||
else:
|
||||
return f"""
|
||||
This event is '{phrase}'
|
||||
rules:
|
||||
1. Everyone inside the room see and know what are the actions of the people inside the room.
|
||||
2. Everyone outside the room do not see and do not know the actions of the people inside the room.
|
||||
3. Only write about the marbles that are present in the level
|
||||
4. Marbles names are marble A, marble B, marble C, marble D, ...
|
||||
5. Believed_location_of_the_specific_marble describes where is the marble like drawer, basket S, sofa, ...
|
||||
6. Do not use expression like <Anne's basket> use <basket A> instead.
|
||||
7. All characters believe what they're being told by other characters.
|
||||
Instructions:
|
||||
I) Write the following information in the file output.txt in JSON format:
|
||||
1. The respective beliefs of the characters (which means where every marble is according to character x, y, z. Character x should say where it believes every marble it is aware exist is)
|
||||
The format should be as follows:
|
||||
{{
|
||||
"beliefs": {{
|
||||
"<character_name>": {{
|
||||
"<marble_name>": "<believed_location_of_the_specific_marble>",
|
||||
...
|
||||
}},
|
||||
...
|
||||
}},
|
||||
}}
|
||||
Example of output (only use this to understand and learn how to use the format. Nothing else):
|
||||
{{"beliefs": {{"Sally": {{"marble A": "basket A"}}, "Bob": {{"marble B": "basket S"}}, "Anne": {{"marble A": "green box"}}, "Charlie": {{"marble B": "sofa"}}}}
|
||||
II) The file output.txt has not been created yet. You need to create it. After that, use the task_complete command.
|
||||
"""
|
|
@ -0,0 +1,76 @@
|
|||
import contextlib
|
||||
import random
|
||||
import shutil
|
||||
from pathlib import Path
|
||||
from typing import Any, Generator
|
||||
|
||||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.log_cycle.log_cycle import LogCycleHandler
|
||||
|
||||
|
||||
def generate_noise(noise_size: int) -> str:
|
||||
random.seed(42)
|
||||
return "".join(
|
||||
random.choices(
|
||||
"ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789",
|
||||
k=noise_size,
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
def setup_mock_input(monkeypatch: pytest.MonkeyPatch, cycle_count: int) -> None:
|
||||
"""
|
||||
Sets up the mock input for testing.
|
||||
|
||||
:param monkeypatch: pytest's monkeypatch utility for modifying builtins.
|
||||
:param cycle_count: The number of cycles to mock.
|
||||
"""
|
||||
input_sequence = ["y"] * (cycle_count) + ["EXIT"]
|
||||
|
||||
def input_generator() -> Generator[str, None, None]:
|
||||
"""
|
||||
Creates a generator that yields input strings from the given sequence.
|
||||
"""
|
||||
yield from input_sequence
|
||||
|
||||
gen = input_generator()
|
||||
monkeypatch.setattr("autogpt.utils.session.prompt", lambda _: next(gen))
|
||||
|
||||
|
||||
def run_interaction_loop(
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
agent: Agent,
|
||||
cycle_count: int,
|
||||
challenge_name: str,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
setup_mock_input(monkeypatch, cycle_count)
|
||||
|
||||
setup_mock_log_cycle_agent_name(monkeypatch, challenge_name, level_to_run)
|
||||
with contextlib.suppress(SystemExit):
|
||||
agent.start_interaction_loop()
|
||||
|
||||
|
||||
def setup_mock_log_cycle_agent_name(
|
||||
monkeypatch: pytest.MonkeyPatch, challenge_name: str, level_to_run: int
|
||||
) -> None:
|
||||
def mock_get_agent_short_name(*args: Any, **kwargs: Any) -> str:
|
||||
return f"{challenge_name}_level_{level_to_run}"
|
||||
|
||||
monkeypatch.setattr(
|
||||
LogCycleHandler, "get_agent_short_name", mock_get_agent_short_name
|
||||
)
|
||||
|
||||
|
||||
def get_workspace_path(agent: Agent, file_name: str) -> str:
|
||||
return str(agent.workspace.get_path(file_name))
|
||||
|
||||
|
||||
def copy_file_into_workspace(
|
||||
agent: Agent, directory_path: Path, file_path: str
|
||||
) -> None:
|
||||
workspace_code_file_path = get_workspace_path(agent, file_path)
|
||||
code_file_path = directory_path / file_path
|
||||
shutil.copy(code_file_path, workspace_code_file_path)
|
|
@ -26,12 +26,8 @@ def recursive_sort_dict(data: dict) -> dict:
|
|||
|
||||
|
||||
cwd = os.getcwd() # get current working directory
|
||||
new_score_filename_pattern = os.path.join(
|
||||
cwd, "tests/integration/challenges/new_score_*.json"
|
||||
)
|
||||
current_score_filename = os.path.join(
|
||||
cwd, "tests/integration/challenges/current_score.json"
|
||||
)
|
||||
new_score_filename_pattern = os.path.join(cwd, "tests/challenges/new_score_*.json")
|
||||
current_score_filename = os.path.join(cwd, "tests/challenges/current_score.json")
|
||||
|
||||
merged_data: Dict[str, Any] = {}
|
||||
for filename in glob.glob(new_score_filename_pattern):
|
||||
|
@ -44,4 +40,5 @@ for filename in glob.glob(new_score_filename_pattern):
|
|||
sorted_data = recursive_sort_dict(merged_data)
|
||||
|
||||
with open(current_score_filename, "w") as f_current:
|
||||
json.dump(sorted_data, f_current, indent=4)
|
||||
json_data = json.dumps(sorted_data, indent=4)
|
||||
f_current.write(json_data + "\n")
|
|
@ -1,22 +1,26 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
from tempfile import TemporaryDirectory
|
||||
|
||||
import pytest
|
||||
import yaml
|
||||
from pytest_mock import MockerFixture
|
||||
|
||||
from autogpt.agent.agent import Agent
|
||||
from autogpt.commands.command import CommandRegistry
|
||||
from autogpt.config.ai_config import AIConfig
|
||||
from autogpt.config.config import Config
|
||||
from autogpt.llm.api_manager import ApiManager
|
||||
from autogpt.logs import TypingConsoleHandler
|
||||
from autogpt.memory.vector import get_memory
|
||||
from autogpt.prompts.prompt import DEFAULT_TRIGGERING_PROMPT
|
||||
from autogpt.workspace import Workspace
|
||||
|
||||
pytest_plugins = ["tests.integration.agent_factory", "tests.integration.memory.utils"]
|
||||
|
||||
PROXY = os.environ.get("PROXY")
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def vcr_cassette_dir(request):
|
||||
test_name = os.path.splitext(request.node.name)[0]
|
||||
return os.path.join("tests/Auto-GPT-test-cassettes", test_name)
|
||||
pytest_plugins = [
|
||||
"tests.integration.agent_factory",
|
||||
"tests.integration.memory.utils",
|
||||
"tests.vcr",
|
||||
]
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
|
@ -30,9 +34,25 @@ def workspace(workspace_root: Path) -> Workspace:
|
|||
return Workspace(workspace_root, restrict_to_workspace=True)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def temp_plugins_config_file():
|
||||
"""Create a plugins_config.yaml file in a temp directory so that it doesn't mess with existing ones"""
|
||||
config_directory = TemporaryDirectory()
|
||||
config_file = os.path.join(config_directory.name, "plugins_config.yaml")
|
||||
with open(config_file, "w+") as f:
|
||||
f.write(yaml.dump({}))
|
||||
|
||||
yield config_file
|
||||
|
||||
|
||||
@pytest.fixture()
|
||||
def config(mocker: MockerFixture, workspace: Workspace) -> Config:
|
||||
def config(
|
||||
temp_plugins_config_file: str, mocker: MockerFixture, workspace: Workspace
|
||||
) -> Config:
|
||||
config = Config()
|
||||
config.plugins_dir = "tests/unit/data/test_plugins"
|
||||
config.plugins_config_file = temp_plugins_config_file
|
||||
config.load_plugins_config()
|
||||
|
||||
# Do a little setup and teardown since the config object is a singleton
|
||||
mocker.patch.multiple(
|
||||
|
@ -48,3 +68,44 @@ def api_manager() -> ApiManager:
|
|||
if ApiManager in ApiManager._instances:
|
||||
del ApiManager._instances[ApiManager]
|
||||
return ApiManager()
|
||||
|
||||
|
||||
@pytest.fixture(autouse=True)
|
||||
def patch_emit(monkeypatch):
|
||||
# convert plain_output to a boolean
|
||||
|
||||
if bool(os.environ.get("PLAIN_OUTPUT")):
|
||||
|
||||
def quick_emit(self, record: str):
|
||||
print(self.format(record))
|
||||
|
||||
monkeypatch.setattr(TypingConsoleHandler, "emit", quick_emit)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def agent(config: Config, workspace: Workspace) -> Agent:
|
||||
ai_config = AIConfig(
|
||||
ai_name="Base",
|
||||
ai_role="A base AI",
|
||||
ai_goals=[],
|
||||
)
|
||||
|
||||
command_registry = CommandRegistry()
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
config.set_memory_backend("json_file")
|
||||
memory_json_file = get_memory(config, init=True)
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
|
||||
return Agent(
|
||||
ai_name=ai_config.ai_name,
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
ai_config=ai_config,
|
||||
config=config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
|
|
@ -59,7 +59,8 @@ def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace
|
|||
ai_name="",
|
||||
memory=memory_none,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -70,49 +71,45 @@ def browser_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def writer_agent(agent_test_config, memory_none: NoMemory, workspace: Workspace):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
command_registry.import_commands("autogpt.commands.task_statuses")
|
||||
def file_system_agents(
|
||||
agent_test_config, memory_json_file: NoMemory, workspace: Workspace
|
||||
):
|
||||
agents = []
|
||||
command_registry = get_command_registry(agent_test_config)
|
||||
|
||||
ai_goals = [
|
||||
"Write 'Hello World' into a file named \"hello_world.txt\".",
|
||||
'Write \'Hello World\' into 2 files named "hello_world_1.txt"and "hello_world_2.txt".',
|
||||
]
|
||||
|
||||
for ai_goal in ai_goals:
|
||||
ai_config = AIConfig(
|
||||
ai_name="write_to_file-GPT",
|
||||
ai_role="an AI designed to use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\" and then use the task_complete command to complete the task.",
|
||||
ai_goals=[
|
||||
"Use the write_to_file command to write 'Hello World' into a file named \"hello_world.txt\".",
|
||||
"Use the task_complete command to complete the task.",
|
||||
"Do not use any other commands.",
|
||||
],
|
||||
ai_name="File System Agent",
|
||||
ai_role="an AI designed to manage a file system.",
|
||||
ai_goals=[ai_goal],
|
||||
)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
triggering_prompt = (
|
||||
"Determine which next command to use, and respond using the"
|
||||
" format specified above:"
|
||||
)
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
|
||||
agent = Agent(
|
||||
ai_name="",
|
||||
memory=memory_none,
|
||||
Config().set_continuous_mode(False)
|
||||
agents.append(
|
||||
Agent(
|
||||
ai_name="File System Agent",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=triggering_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
|
||||
return agent
|
||||
)
|
||||
return agents
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def memory_management_agent(agent_test_config, memory_json_file, workspace: Workspace):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
command_registry.import_commands("autogpt.commands.task_statuses")
|
||||
command_registry = get_command_registry(agent_test_config)
|
||||
|
||||
ai_config = AIConfig(
|
||||
ai_name="Follow-Instructions-GPT",
|
||||
|
@ -127,10 +124,11 @@ def memory_management_agent(agent_test_config, memory_json_file, workspace: Work
|
|||
system_prompt = ai_config.construct_full_prompt()
|
||||
|
||||
agent = Agent(
|
||||
ai_name="",
|
||||
ai_name="Follow-Instructions-GPT",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -145,19 +143,12 @@ def information_retrieval_agents(
|
|||
agent_test_config, memory_json_file, workspace: Workspace
|
||||
):
|
||||
agents = []
|
||||
command_registry = CommandRegistry()
|
||||
enabled_command_categories = [
|
||||
x
|
||||
for x in COMMAND_CATEGORIES
|
||||
if x not in agent_test_config.disabled_command_categories
|
||||
]
|
||||
command_registry = get_command_registry(agent_test_config)
|
||||
|
||||
for command_category in enabled_command_categories:
|
||||
command_registry.import_commands(command_category)
|
||||
ai_goals = [
|
||||
"Write to a file called output.txt tesla's revenue in 2022 after searching for 'tesla revenue 2022'.",
|
||||
"Write to a file called output.txt tesla's revenue in 2022.",
|
||||
"Write to a file called output.txt tesla's revenue every year since its creation.",
|
||||
"Write to a file called output.txt containing tesla's revenue in 2022 after searching for 'tesla revenue 2022'.",
|
||||
"Write to a file called output.txt containing tesla's revenue in 2022.",
|
||||
"Write to a file called output.txt containing tesla's revenue every year since its creation.",
|
||||
]
|
||||
for ai_goal in ai_goals:
|
||||
ai_config = AIConfig(
|
||||
|
@ -173,7 +164,8 @@ def information_retrieval_agents(
|
|||
ai_name="Information Retrieval Agent",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -184,7 +176,9 @@ def information_retrieval_agents(
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def kubernetes_agent(memory_json_file, workspace: Workspace):
|
||||
def kubernetes_agent(
|
||||
agent_test_config: Config, memory_json_file: NoMemory, workspace: Workspace
|
||||
) -> Agent:
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
|
@ -205,7 +199,8 @@ def kubernetes_agent(memory_json_file, workspace: Workspace):
|
|||
ai_name="Kubernetes-Demo",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -238,7 +233,8 @@ def get_nobel_prize_agent(agent_test_config, memory_json_file, workspace: Worksp
|
|||
ai_name="Get-PhysicsNobelPrize",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
|
@ -249,38 +245,57 @@ def get_nobel_prize_agent(agent_test_config, memory_json_file, workspace: Worksp
|
|||
|
||||
|
||||
@pytest.fixture
|
||||
def debug_code_agent(agent_test_config, memory_json_file, workspace: Workspace):
|
||||
command_registry = CommandRegistry()
|
||||
command_registry.import_commands("autogpt.commands.file_operations")
|
||||
command_registry.import_commands("autogpt.commands.execute_code")
|
||||
command_registry.import_commands("autogpt.commands.improve_code")
|
||||
command_registry.import_commands("autogpt.app")
|
||||
command_registry.import_commands("autogpt.commands.task_statuses")
|
||||
def debug_code_agents(agent_test_config, memory_json_file, workspace: Workspace):
|
||||
agents = []
|
||||
goals = [
|
||||
[
|
||||
"1- Run test.py using the execute_python_file command.",
|
||||
"2- Read code.py using the read_file command.",
|
||||
"3- Modify code.py using the write_to_file command."
|
||||
"Repeat step 1, 2 and 3 until test.py runs without errors.",
|
||||
],
|
||||
[
|
||||
"1- Run test.py.",
|
||||
"2- Read code.py.",
|
||||
"3- Modify code.py."
|
||||
"Repeat step 1, 2 and 3 until test.py runs without errors.",
|
||||
],
|
||||
["1- Make test.py run without errors."],
|
||||
]
|
||||
|
||||
for goal in goals:
|
||||
ai_config = AIConfig(
|
||||
ai_name="Debug Code Agent",
|
||||
ai_role="an autonomous agent that specializes in debugging python code",
|
||||
ai_goals=[
|
||||
"1-Run the code in the file named 'code.py' using the execute_code command.",
|
||||
"2-Read code.py to understand why the code is not working as expected.",
|
||||
"3-Modify code.py to fix the error.",
|
||||
"Repeat step 1, 2 and 3 until the code is working as expected. When you're done use the task_complete command.",
|
||||
"Do not use any other commands than execute_python_file and write_file",
|
||||
],
|
||||
ai_goals=goal,
|
||||
)
|
||||
command_registry = get_command_registry(agent_test_config)
|
||||
ai_config.command_registry = command_registry
|
||||
|
||||
system_prompt = ai_config.construct_full_prompt()
|
||||
Config().set_continuous_mode(False)
|
||||
agent = Agent(
|
||||
agents.append(
|
||||
Agent(
|
||||
ai_name="Debug Code Agent",
|
||||
memory=memory_json_file,
|
||||
command_registry=command_registry,
|
||||
config=ai_config,
|
||||
ai_config=ai_config,
|
||||
config=agent_test_config,
|
||||
next_action_count=0,
|
||||
system_prompt=system_prompt,
|
||||
triggering_prompt=DEFAULT_TRIGGERING_PROMPT,
|
||||
workspace_directory=workspace.root,
|
||||
)
|
||||
)
|
||||
return agents
|
||||
|
||||
return agent
|
||||
|
||||
def get_command_registry(agent_test_config):
|
||||
command_registry = CommandRegistry()
|
||||
enabled_command_categories = [
|
||||
x
|
||||
for x in COMMAND_CATEGORIES
|
||||
if x not in agent_test_config.disabled_command_categories
|
||||
]
|
||||
for command_category in enabled_command_categories:
|
||||
command_registry.import_commands(command_category)
|
||||
return command_registry
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
import pytest
|
||||
|
||||
from autogpt.agent import Agent
|
||||
from autogpt.commands.file_operations import read_file
|
||||
from autogpt.config import Config
|
||||
from tests.integration.challenges.challenge_decorator.challenge_decorator import (
|
||||
challenge,
|
||||
)
|
||||
from tests.integration.challenges.utils import run_interaction_loop
|
||||
from tests.utils import requires_api_key
|
||||
|
||||
CYCLE_COUNT = 3
|
||||
|
||||
|
||||
@requires_api_key("OPENAI_API_KEY")
|
||||
@pytest.mark.vcr
|
||||
@challenge
|
||||
def test_write_file(
|
||||
writer_agent: Agent,
|
||||
patched_api_requestor: None,
|
||||
monkeypatch: pytest.MonkeyPatch,
|
||||
config: Config,
|
||||
level_to_run: int,
|
||||
) -> None:
|
||||
file_path = str(writer_agent.workspace.get_path("hello_world.txt"))
|
||||
run_interaction_loop(monkeypatch, writer_agent, CYCLE_COUNT)
|
||||
|
||||
content = read_file(file_path, config)
|
||||
assert content == "Hello World", f"Expected 'Hello World', got {content}"
|
|
@ -1,73 +0,0 @@
|
|||
import os
|
||||
from functools import wraps
|
||||
from typing import Any, Callable, Optional
|
||||
|
||||
import pytest
|
||||
|
||||
from tests.integration.challenges.challenge_decorator.challenge import Challenge
|
||||
from tests.integration.challenges.challenge_decorator.challenge_utils import (
|
||||
create_challenge,
|
||||
)
|
||||
from tests.integration.challenges.challenge_decorator.score_utils import (
|
||||
get_scores,
|
||||
update_new_score,
|
||||
)
|
||||
|
||||
MAX_LEVEL_TO_IMPROVE_ON = (
|
||||
1 # we will attempt to beat 1 level above the current level for now.
|
||||
)
|
||||
|
||||
|
||||
def challenge(func: Callable[..., Any]) -> Callable[..., None]:
|
||||
@wraps(func)
|
||||
def wrapper(*args: Any, **kwargs: Any) -> None:
|
||||
run_remaining = MAX_LEVEL_TO_IMPROVE_ON if Challenge.BEAT_CHALLENGES else 1
|
||||
original_error = None
|
||||
|
||||
while run_remaining > 0:
|
||||
current_score, new_score, new_score_location = get_scores()
|
||||
level_to_run = kwargs["level_to_run"] if "level_to_run" in kwargs else None
|
||||
challenge = create_challenge(
|
||||
func, current_score, Challenge.BEAT_CHALLENGES, level_to_run
|
||||
)
|
||||
if challenge.level_to_run is not None:
|
||||
kwargs["level_to_run"] = challenge.level_to_run
|
||||
try:
|
||||
func(*args, **kwargs)
|
||||
challenge.succeeded = True
|
||||
except AssertionError as err:
|
||||
original_error = err
|
||||
challenge.succeeded = False
|
||||
else:
|
||||
challenge.skipped = True
|
||||
if os.environ.get("CI") == "true":
|
||||
new_max_level_beaten = get_new_max_level_beaten(
|
||||
challenge, Challenge.BEAT_CHALLENGES
|
||||
)
|
||||
update_new_score(
|
||||
new_score_location, new_score, challenge, new_max_level_beaten
|
||||
)
|
||||
if challenge.level_to_run is None:
|
||||
pytest.skip("This test has not been unlocked yet.")
|
||||
|
||||
if not challenge.succeeded:
|
||||
if Challenge.BEAT_CHALLENGES or challenge.is_new_challenge:
|
||||
# xfail
|
||||
pytest.xfail("Challenge failed")
|
||||
if original_error:
|
||||
raise original_error
|
||||
raise AssertionError("Challenge failed")
|
||||
run_remaining -= 1
|
||||
|
||||
return wrapper
|
||||
|
||||
|
||||
def get_new_max_level_beaten(
|
||||
challenge: Challenge, beat_challenges: bool
|
||||
) -> Optional[int]:
|
||||
if challenge.succeeded:
|
||||
return challenge.level_to_run
|
||||
if challenge.skipped:
|
||||
return challenge.max_level_beaten
|
||||
# Challenge failed
|
||||
return challenge.max_level_beaten if beat_challenges else None
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue