Merge branch 'Significant-Gravitas:master' into master
commit
5b3afeccc1
|
@ -21,7 +21,7 @@ AI_SETTINGS_FILE=ai_settings.yaml
|
|||
|
||||
### OPENAI
|
||||
# OPENAI_API_KEY - OpenAI API Key (Example: my-openai-api-key)
|
||||
# TEMPERATURE - Sets temperature in OpenAI (Default: 1)
|
||||
# TEMPERATURE - Sets temperature in OpenAI (Default: 0)
|
||||
# USE_AZURE - Use Azure OpenAI or not (Default: False)
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
TEMPERATURE=0
|
||||
|
@ -74,6 +74,27 @@ REDIS_PASSWORD=
|
|||
WIPE_REDIS_ON_START=False
|
||||
MEMORY_INDEX=auto-gpt
|
||||
|
||||
### WEAVIATE
|
||||
# MEMORY_BACKEND - Use 'weaviate' to use Weaviate vector storage
|
||||
# WEAVIATE_HOST - Weaviate host IP
|
||||
# WEAVIATE_PORT - Weaviate host port
|
||||
# WEAVIATE_PROTOCOL - Weaviate host protocol (e.g. 'http')
|
||||
# USE_WEAVIATE_EMBEDDED - Whether to use Embedded Weaviate
|
||||
# WEAVIATE_EMBEDDED_PATH - File system path were to persist data when running Embedded Weaviate
|
||||
# WEAVIATE_USERNAME - Weaviate username
|
||||
# WEAVIATE_PASSWORD - Weaviate password
|
||||
# WEAVIATE_API_KEY - Weaviate API key if using API-key-based authentication
|
||||
# MEMORY_INDEX - Name of index to create in Weaviate
|
||||
WEAVIATE_HOST="127.0.0.1"
|
||||
WEAVIATE_PORT=8080
|
||||
WEAVIATE_PROTOCOL="http"
|
||||
USE_WEAVIATE_EMBEDDED=False
|
||||
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
WEAVIATE_USERNAME=
|
||||
WEAVIATE_PASSWORD=
|
||||
WEAVIATE_API_KEY=
|
||||
MEMORY_INDEX=AutoGpt
|
||||
|
||||
### MILVUS
|
||||
# MILVUS_ADDR - Milvus remote address (e.g. localhost:19530)
|
||||
# MILVUS_COLLECTION - Milvus collection,
|
||||
|
|
|
@ -2,6 +2,15 @@ name: Bug report 🐛
|
|||
description: Create a bug report for Auto-GPT.
|
||||
labels: ['status: needs triage']
|
||||
body:
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: ⚠️ Search for existing issues first ⚠️
|
||||
description: >
|
||||
Please [search the history](https://github.com/Torantulino/Auto-GPT/issues)
|
||||
to see if an issue already exists for the same problem.
|
||||
options:
|
||||
- label: I have searched the existing issues, and there is no existing issue for my problem
|
||||
required: true
|
||||
- type: markdown
|
||||
attributes:
|
||||
value: |
|
||||
|
@ -19,14 +28,14 @@ body:
|
|||
- Provide commit-hash (`git rev-parse HEAD` gets it)
|
||||
- If it's a pip/packages issue, provide pip version, python version
|
||||
- If it's a crash, provide traceback.
|
||||
|
||||
- type: checkboxes
|
||||
attributes:
|
||||
label: Duplicates
|
||||
description: Please [search the history](https://github.com/Torantulino/Auto-GPT/issues) to see if an issue already exists for the same problem.
|
||||
label: GPT-3 or GPT-4
|
||||
description: >
|
||||
If you are using Auto-GPT with `--gpt3only`, your problems may be caused by
|
||||
the limitations of GPT-3.5
|
||||
options:
|
||||
- label: I have searched the existing issues
|
||||
required: true
|
||||
- label: I am using Auto-GPT with GPT-3 (GPT-3.5)
|
||||
- type: textarea
|
||||
attributes:
|
||||
label: Steps to reproduce 🕹
|
||||
|
|
|
@ -0,0 +1,24 @@
|
|||
name: Push Docker Image on Release
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "stable" ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: Log in to Docker hub
|
||||
env:
|
||||
DOCKER_USER: ${{secrets.DOCKER_USER}}
|
||||
DOCKER_PASSWORD: ${{secrets.DOCKER_PASSWORD}}
|
||||
run: |
|
||||
docker login -u $DOCKER_USER -p $DOCKER_PASSWORD
|
||||
- name: Build the Docker image
|
||||
run: docker build . --file Dockerfile --tag ${{secrets.DOCKER_USER}}/auto-gpt:$(git describe --tags `git rev-list --tags --max-count=1`)
|
||||
- name: Docker Push
|
||||
run: docker push ${{secrets.DOCKER_USER}}/auto-gpt
|
|
@ -0,0 +1,40 @@
|
|||
# Code of Conduct for auto-gpt
|
||||
|
||||
## 1. Purpose
|
||||
|
||||
The purpose of this Code of Conduct is to provide guidelines for contributors to the auto-gpt project on GitHub. We aim to create a positive and inclusive environment where all participants can contribute and collaborate effectively. By participating in this project, you agree to abide by this Code of Conduct.
|
||||
|
||||
## 2. Scope
|
||||
|
||||
This Code of Conduct applies to all contributors, maintainers, and users of the auto-gpt project. It extends to all project spaces, including but not limited to issues, pull requests, code reviews, comments, and other forms of communication within the project.
|
||||
|
||||
## 3. Our Standards
|
||||
|
||||
We encourage the following behavior:
|
||||
|
||||
* Being respectful and considerate to others
|
||||
* Actively seeking diverse perspectives
|
||||
* Providing constructive feedback and assistance
|
||||
* Demonstrating empathy and understanding
|
||||
|
||||
We discourage the following behavior:
|
||||
|
||||
* Harassment or discrimination of any kind
|
||||
* Disrespectful, offensive, or inappropriate language or content
|
||||
* Personal attacks or insults
|
||||
* Unwarranted criticism or negativity
|
||||
|
||||
## 4. Reporting and Enforcement
|
||||
|
||||
If you witness or experience any violations of this Code of Conduct, please report them to the project maintainers by email or other appropriate means. The maintainers will investigate and take appropriate action, which may include warnings, temporary or permanent bans, or other measures as necessary.
|
||||
|
||||
Maintainers are responsible for ensuring compliance with this Code of Conduct and may take action to address any violations.
|
||||
|
||||
## 5. Acknowledgements
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant](https://www.contributor-covenant.org/version/2/0/code_of_conduct.html).
|
||||
|
||||
## 6. Contact
|
||||
|
||||
If you have any questions or concerns, please contact the project maintainers.
|
||||
|
135
CONTRIBUTING.md
135
CONTRIBUTING.md
|
@ -1,64 +1,99 @@
|
|||
# Contributing to ProjectName
|
||||
|
||||
To contribute to this GitHub project, you can follow these steps:
|
||||
First of all, thank you for considering contributing to our project! We appreciate your time and effort, and we value any contribution, whether it's reporting a bug, suggesting a new feature, or submitting a pull request.
|
||||
|
||||
1. Fork the repository you want to contribute to by clicking the "Fork" button on the project page.
|
||||
This document provides guidelines and best practices to help you contribute effectively.
|
||||
|
||||
2. Clone the repository to your local machine using the following command:
|
||||
## Table of Contents
|
||||
|
||||
- [Code of Conduct](#code-of-conduct)
|
||||
- [Getting Started](#getting-started)
|
||||
- [How to Contribute](#how-to-contribute)
|
||||
- [Reporting Bugs](#reporting-bugs)
|
||||
- [Suggesting Enhancements](#suggesting-enhancements)
|
||||
- [Submitting Pull Requests](#submitting-pull-requests)
|
||||
- [Style Guidelines](#style-guidelines)
|
||||
- [Code Formatting](#code-formatting)
|
||||
- [Pre-Commit Hooks](#pre-commit-hooks)
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
By participating in this project, you agree to abide by our [Code of Conduct](CODE_OF_CONDUCT.md). Please read it to understand the expectations we have for everyone who contributes to this project.
|
||||
|
||||
## Getting Started
|
||||
|
||||
To start contributing, follow these steps:
|
||||
|
||||
1. Fork the repository and clone your fork.
|
||||
2. Create a new branch for your changes (use a descriptive name, such as `fix-bug-123` or `add-new-feature`).
|
||||
3. Make your changes in the new branch.
|
||||
4. Test your changes thoroughly.
|
||||
5. Commit and push your changes to your fork.
|
||||
6. Create a pull request following the guidelines in the [Submitting Pull Requests](#submitting-pull-requests) section.
|
||||
|
||||
## How to Contribute
|
||||
|
||||
### Reporting Bugs
|
||||
|
||||
If you find a bug in the project, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A description of the problem, including steps to reproduce the issue.
|
||||
- Any relevant logs, screenshots, or other supporting information.
|
||||
|
||||
### Suggesting Enhancements
|
||||
|
||||
If you have an idea for a new feature or improvement, please create an issue on GitHub with the following information:
|
||||
|
||||
- A clear, descriptive title for the issue.
|
||||
- A detailed description of the proposed enhancement, including any benefits and potential drawbacks.
|
||||
- Any relevant examples, mockups, or supporting information.
|
||||
|
||||
### Submitting Pull Requests
|
||||
|
||||
When submitting a pull request, please ensure that your changes meet the following criteria:
|
||||
|
||||
- Your pull request should be atomic and focus on a single change.
|
||||
- Your pull request should include tests for your change.
|
||||
- You should have thoroughly tested your changes with multiple different prompts.
|
||||
- You should have considered potential risks and mitigations for your changes.
|
||||
- You should have documented your changes clearly and comprehensively.
|
||||
- You should not include any unrelated or "extra" small tweaks or changes.
|
||||
|
||||
## Style Guidelines
|
||||
|
||||
### Code Formatting
|
||||
|
||||
We use the `black` code formatter to maintain a consistent coding style across the project. Please ensure that your code is formatted using `black` before submitting a pull request. You can install `black` using `pip`:
|
||||
|
||||
```bash
|
||||
pip install black
|
||||
```
|
||||
git clone https://github.com/<YOUR-GITHUB-USERNAME>/Auto-GPT
|
||||
|
||||
To format your code, run the following command in the project's root directory:
|
||||
|
||||
```bash
|
||||
black .
|
||||
```
|
||||
3. Install the project requirements
|
||||
```
|
||||
pip install -r requirements.txt
|
||||
```
|
||||
4. Install pre-commit hooks
|
||||
### Pre-Commit Hooks
|
||||
We use pre-commit hooks to ensure that code formatting and other checks are performed automatically before each commit. To set up pre-commit hooks for this project, follow these steps:
|
||||
|
||||
Install the pre-commit package using pip:
|
||||
```bash
|
||||
pip install pre-commit
|
||||
```
|
||||
|
||||
Run the following command in the project's root directory to install the pre-commit hooks:
|
||||
```bash
|
||||
pre-commit install
|
||||
```
|
||||
5. Create a new branch for your changes using the following command:
|
||||
|
||||
```
|
||||
git checkout -b "branch-name"
|
||||
```
|
||||
6. Make your changes to the code or documentation.
|
||||
- Example: Improve User Interface or Add Documentation.
|
||||
Now, the pre-commit hooks will run automatically before each commit, checking your code formatting and other requirements.
|
||||
|
||||
If you encounter any issues or have questions, feel free to reach out to the maintainers or open a new issue on GitHub. We're here to help and appreciate your efforts to contribute to the project.
|
||||
|
||||
7. Add the changes to the staging area using the following command:
|
||||
```
|
||||
git add .
|
||||
```
|
||||
Happy coding, and once again, thank you for your contributions!
|
||||
|
||||
8. Commit the changes with a meaningful commit message using the following command:
|
||||
```
|
||||
git commit -m "your commit message"
|
||||
```
|
||||
9. Push the changes to your forked repository using the following command:
|
||||
```
|
||||
git push origin branch-name
|
||||
```
|
||||
10. Go to the GitHub website and navigate to your forked repository.
|
||||
Maintainers will look at PR that have no merge conflicts when deciding what to add to the project. Make sure your PR shows up here:
|
||||
|
||||
11. Click the "New pull request" button.
|
||||
|
||||
12. Select the branch you just pushed to and the branch you want to merge into on the original repository.
|
||||
|
||||
13. Add a description of your changes and click the "Create pull request" button.
|
||||
|
||||
14. Wait for the project maintainer to review your changes and provide feedback.
|
||||
|
||||
15. Make any necessary changes based on feedback and repeat steps 5-12 until your changes are accepted and merged into the main project.
|
||||
|
||||
16. Once your changes are merged, you can update your forked repository and local copy of the repository with the following commands:
|
||||
|
||||
```
|
||||
git fetch upstream
|
||||
git checkout master
|
||||
git merge upstream/master
|
||||
```
|
||||
Finally, delete the branch you created with the following command:
|
||||
```
|
||||
git branch -d branch-name
|
||||
```
|
||||
That's it you made it 🐣⭐⭐
|
||||
https://github.com/Torantulino/Auto-GPT/pulls?q=is%3Apr+is%3Aopen+-is%3Aconflict+
|
|
@ -3,7 +3,7 @@ FROM python:3.11-slim
|
|||
|
||||
# Install git
|
||||
RUN apt-get -y update
|
||||
RUN apt-get -y install git
|
||||
RUN apt-get -y install git chromium-driver
|
||||
|
||||
# Set environment variables
|
||||
ENV PIP_NO_CACHE_DIR=yes \
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
"""Agent manager for managing GPT agents"""
|
||||
from typing import List, Tuple, Union
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.llm_utils import create_chat_completion
|
||||
from autogpt.config.config import Singleton
|
||||
|
||||
|
@ -14,7 +15,7 @@ class AgentManager(metaclass=Singleton):
|
|||
# Create new GPT agent
|
||||
# TODO: Centralise use of create_chat_completion() to globally enforce token limit
|
||||
|
||||
def create_agent(self, task: str, prompt: str, model: str) -> Tuple[int, str]:
|
||||
def create_agent(self, task: str, prompt: str, model: str) -> tuple[int, str]:
|
||||
"""Create a new agent and return its key
|
||||
|
||||
Args:
|
||||
|
@ -47,7 +48,7 @@ class AgentManager(metaclass=Singleton):
|
|||
|
||||
return key, agent_reply
|
||||
|
||||
def message_agent(self, key: Union[str, int], message: str) -> str:
|
||||
def message_agent(self, key: str | int, message: str) -> str:
|
||||
"""Send a message to an agent and return its response
|
||||
|
||||
Args:
|
||||
|
@ -73,7 +74,7 @@ class AgentManager(metaclass=Singleton):
|
|||
|
||||
return agent_reply
|
||||
|
||||
def list_agents(self) -> List[Tuple[Union[str, int], str]]:
|
||||
def list_agents(self) -> list[tuple[str | int, str]]:
|
||||
"""Return a list of all agents
|
||||
|
||||
Returns:
|
||||
|
|
|
@ -125,9 +125,16 @@ def execute_command(command_name: str, arguments):
|
|||
key = CFG.google_api_key
|
||||
if key and key.strip() and key != "your-google-api-key":
|
||||
google_result = google_official_search(arguments["input"])
|
||||
return google_result
|
||||
else:
|
||||
google_result = google_search(arguments["input"])
|
||||
safe_message = google_result.encode("utf-8", "ignore")
|
||||
|
||||
# google_result can be a list or a string depending on the search results
|
||||
if isinstance(google_result, list):
|
||||
safe_message = [google_result_single.encode('utf-8', 'ignore') for google_result_single in google_result]
|
||||
else:
|
||||
safe_message = google_result.encode('utf-8', 'ignore')
|
||||
|
||||
return str(safe_message)
|
||||
elif command_name == "memory_add":
|
||||
return memory.add(arguments["string"])
|
||||
|
|
|
@ -96,7 +96,7 @@ def chat_with_ai(
|
|||
|
||||
while current_tokens_used > 2500:
|
||||
# remove memories until we are under 2500 tokens
|
||||
relevant_memory = relevant_memory[1:]
|
||||
relevant_memory = relevant_memory[:-1]
|
||||
(
|
||||
next_message_to_add_index,
|
||||
current_tokens_used,
|
||||
|
|
|
@ -2,15 +2,13 @@ import requests
|
|||
import json
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.commands.file_operations import safe_join
|
||||
from autogpt.workspace import path_in_workspace
|
||||
|
||||
cfg = Config()
|
||||
|
||||
working_directory = "auto_gpt_workspace"
|
||||
|
||||
|
||||
def read_audio_from_file(audio_path):
|
||||
audio_path = safe_join(working_directory, audio_path)
|
||||
audio_path = path_in_workspace(audio_path)
|
||||
with open(audio_path, "rb") as audio_file:
|
||||
audio = audio_file.read()
|
||||
return read_audio(audio)
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
"""Code evaluation module."""
|
||||
from typing import List
|
||||
from __future__ import annotations
|
||||
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
|
||||
|
||||
def evaluate_code(code: str) -> List[str]:
|
||||
def evaluate_code(code: str) -> list[str]:
|
||||
"""
|
||||
A function that takes in a string and returns a response from create chat
|
||||
completion api call.
|
||||
|
|
|
@ -1,12 +1,11 @@
|
|||
"""Execute code in a Docker container"""
|
||||
import os
|
||||
from pathlib import Path
|
||||
import subprocess
|
||||
|
||||
import docker
|
||||
from docker.errors import ImageNotFound
|
||||
|
||||
WORKING_DIRECTORY = Path(__file__).parent.parent / "auto_gpt_workspace"
|
||||
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
|
||||
|
||||
|
||||
def execute_python_file(file: str):
|
||||
|
@ -19,12 +18,12 @@ def execute_python_file(file: str):
|
|||
str: The output of the file
|
||||
"""
|
||||
|
||||
print(f"Executing file '{file}' in workspace '{WORKING_DIRECTORY}'")
|
||||
print(f"Executing file '{file}' in workspace '{WORKSPACE_PATH}'")
|
||||
|
||||
if not file.endswith(".py"):
|
||||
return "Error: Invalid file type. Only .py files are allowed."
|
||||
|
||||
file_path = os.path.join(WORKING_DIRECTORY, file)
|
||||
file_path = path_in_workspace(file)
|
||||
|
||||
if not os.path.isfile(file_path):
|
||||
return f"Error: File '{file}' does not exist."
|
||||
|
@ -65,7 +64,7 @@ def execute_python_file(file: str):
|
|||
image_name,
|
||||
f"python {file}",
|
||||
volumes={
|
||||
os.path.abspath(WORKING_DIRECTORY): {
|
||||
os.path.abspath(WORKSPACE_PATH): {
|
||||
"bind": "/workspace",
|
||||
"mode": "ro",
|
||||
}
|
||||
|
@ -100,9 +99,8 @@ def execute_shell(command_line: str) -> str:
|
|||
"""
|
||||
current_dir = os.getcwd()
|
||||
# Change dir into workspace if necessary
|
||||
if str(WORKING_DIRECTORY) not in current_dir:
|
||||
work_dir = os.path.join(os.getcwd(), WORKING_DIRECTORY)
|
||||
os.chdir(work_dir)
|
||||
if str(WORKSPACE_PATH) not in current_dir:
|
||||
os.chdir(WORKSPACE_PATH)
|
||||
|
||||
print(f"Executing command '{command_line}' in working directory '{os.getcwd()}'")
|
||||
|
||||
|
|
|
@ -1,19 +1,14 @@
|
|||
"""File operations for AutoGPT"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
import os.path
|
||||
from pathlib import Path
|
||||
from typing import Generator, List
|
||||
|
||||
# Set a dedicated folder for file I/O
|
||||
WORKING_DIRECTORY = Path(os.getcwd()) / "auto_gpt_workspace"
|
||||
|
||||
# Create the directory if it doesn't exist
|
||||
if not os.path.exists(WORKING_DIRECTORY):
|
||||
os.makedirs(WORKING_DIRECTORY)
|
||||
from typing import Generator
|
||||
from autogpt.workspace import path_in_workspace, WORKSPACE_PATH
|
||||
|
||||
LOG_FILE = "file_logger.txt"
|
||||
LOG_FILE_PATH = WORKING_DIRECTORY / LOG_FILE
|
||||
WORKING_DIRECTORY = str(WORKING_DIRECTORY)
|
||||
LOG_FILE_PATH = WORKSPACE_PATH / LOG_FILE
|
||||
|
||||
|
||||
def check_duplicate_operation(operation: str, filename: str) -> bool:
|
||||
|
@ -45,26 +40,7 @@ def log_operation(operation: str, filename: str) -> None:
|
|||
with open(LOG_FILE_PATH, "w", encoding="utf-8") as f:
|
||||
f.write("File Operation Logger ")
|
||||
|
||||
append_to_file(LOG_FILE, log_entry)
|
||||
|
||||
|
||||
def safe_join(base: str, *paths) -> str:
|
||||
"""Join one or more path components intelligently.
|
||||
|
||||
Args:
|
||||
base (str): The base path
|
||||
*paths (str): The paths to join to the base path
|
||||
|
||||
Returns:
|
||||
str: The joined path
|
||||
"""
|
||||
new_path = os.path.join(base, *paths)
|
||||
norm_new_path = os.path.normpath(new_path)
|
||||
|
||||
if os.path.commonprefix([base, norm_new_path]) != base:
|
||||
raise ValueError("Attempted to access outside of working directory.")
|
||||
|
||||
return norm_new_path
|
||||
append_to_file(LOG_FILE, log_entry, shouldLog = False)
|
||||
|
||||
|
||||
def split_file(
|
||||
|
@ -104,7 +80,7 @@ def read_file(filename: str) -> str:
|
|||
str: The contents of the file
|
||||
"""
|
||||
try:
|
||||
filepath = safe_join(WORKING_DIRECTORY, filename)
|
||||
filepath = path_in_workspace(filename)
|
||||
with open(filepath, "r", encoding="utf-8") as f:
|
||||
content = f.read()
|
||||
return content
|
||||
|
@ -159,7 +135,7 @@ def write_to_file(filename: str, text: str) -> str:
|
|||
if check_duplicate_operation("write", filename):
|
||||
return "Error: File has already been updated."
|
||||
try:
|
||||
filepath = safe_join(WORKING_DIRECTORY, filename)
|
||||
filepath = path_in_workspace(filename)
|
||||
directory = os.path.dirname(filepath)
|
||||
if not os.path.exists(directory):
|
||||
os.makedirs(directory)
|
||||
|
@ -171,7 +147,7 @@ def write_to_file(filename: str, text: str) -> str:
|
|||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def append_to_file(filename: str, text: str) -> str:
|
||||
def append_to_file(filename: str, text: str, shouldLog: bool = True) -> str:
|
||||
"""Append text to a file
|
||||
|
||||
Args:
|
||||
|
@ -182,10 +158,13 @@ def append_to_file(filename: str, text: str) -> str:
|
|||
str: A message indicating success or failure
|
||||
"""
|
||||
try:
|
||||
filepath = safe_join(WORKING_DIRECTORY, filename)
|
||||
filepath = path_in_workspace(filename)
|
||||
with open(filepath, "a") as f:
|
||||
f.write(text)
|
||||
log_operation("append", filename)
|
||||
|
||||
if shouldLog:
|
||||
log_operation("append", filename)
|
||||
|
||||
return "Text appended successfully."
|
||||
except Exception as e:
|
||||
return f"Error: {str(e)}"
|
||||
|
@ -203,7 +182,7 @@ def delete_file(filename: str) -> str:
|
|||
if check_duplicate_operation("delete", filename):
|
||||
return "Error: File has already been deleted."
|
||||
try:
|
||||
filepath = safe_join(WORKING_DIRECTORY, filename)
|
||||
filepath = path_in_workspace(filename)
|
||||
os.remove(filepath)
|
||||
log_operation("delete", filename)
|
||||
return "File deleted successfully."
|
||||
|
@ -211,27 +190,27 @@ def delete_file(filename: str) -> str:
|
|||
return f"Error: {str(e)}"
|
||||
|
||||
|
||||
def search_files(directory: str) -> List[str]:
|
||||
def search_files(directory: str) -> list[str]:
|
||||
"""Search for files in a directory
|
||||
|
||||
Args:
|
||||
directory (str): The directory to search in
|
||||
|
||||
Returns:
|
||||
List[str]: A list of files found in the directory
|
||||
list[str]: A list of files found in the directory
|
||||
"""
|
||||
found_files = []
|
||||
|
||||
if directory in {"", "/"}:
|
||||
search_directory = WORKING_DIRECTORY
|
||||
search_directory = WORKSPACE_PATH
|
||||
else:
|
||||
search_directory = safe_join(WORKING_DIRECTORY, directory)
|
||||
search_directory = path_in_workspace(directory)
|
||||
|
||||
for root, _, files in os.walk(search_directory):
|
||||
for file in files:
|
||||
if file.startswith("."):
|
||||
continue
|
||||
relative_path = os.path.relpath(os.path.join(root, file), WORKING_DIRECTORY)
|
||||
relative_path = os.path.relpath(os.path.join(root, file), WORKSPACE_PATH)
|
||||
found_files.append(relative_path)
|
||||
|
||||
return found_files
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""Google search command for Autogpt."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import List, Union
|
||||
|
||||
from duckduckgo_search import ddg
|
||||
|
||||
|
@ -33,7 +34,7 @@ def google_search(query: str, num_results: int = 8) -> str:
|
|||
return json.dumps(search_results, ensure_ascii=False, indent=4)
|
||||
|
||||
|
||||
def google_official_search(query: str, num_results: int = 8) -> Union[str, List[str]]:
|
||||
def google_official_search(query: str, num_results: int = 8) -> str | list[str]:
|
||||
"""Return the results of a google search using the official Google API
|
||||
|
||||
Args:
|
||||
|
|
|
@ -7,13 +7,11 @@ from base64 import b64decode
|
|||
import openai
|
||||
import requests
|
||||
from PIL import Image
|
||||
from pathlib import Path
|
||||
from autogpt.config import Config
|
||||
from autogpt.workspace import path_in_workspace
|
||||
|
||||
CFG = Config()
|
||||
|
||||
WORKING_DIRECTORY = Path(__file__).parent.parent / "auto_gpt_workspace"
|
||||
|
||||
|
||||
def generate_image(prompt: str) -> str:
|
||||
"""Generate an image from a prompt.
|
||||
|
@ -65,7 +63,7 @@ def generate_image_with_hf(prompt: str, filename: str) -> str:
|
|||
image = Image.open(io.BytesIO(response.content))
|
||||
print(f"Image Generated for prompt:{prompt}")
|
||||
|
||||
image.save(os.path.join(WORKING_DIRECTORY, filename))
|
||||
image.save(path_in_workspace(filename))
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
|
||||
|
@ -93,7 +91,7 @@ def generate_image_with_dalle(prompt: str, filename: str) -> str:
|
|||
|
||||
image_data = b64decode(response["data"][0]["b64_json"])
|
||||
|
||||
with open(f"{WORKING_DIRECTORY}/{filename}", mode="wb") as png:
|
||||
with open(path_in_workspace(filename), mode="wb") as png:
|
||||
png.write(image_data)
|
||||
|
||||
return f"Saved to disk:{filename}"
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import List
|
||||
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
|
||||
|
||||
def improve_code(suggestions: List[str], code: str) -> str:
|
||||
def improve_code(suggestions: list[str], code: str) -> str:
|
||||
"""
|
||||
A function that takes in code and suggestions and returns a response from create
|
||||
chat completion api call.
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
"""Web scraping commands using Playwright"""
|
||||
from __future__ import annotations
|
||||
|
||||
try:
|
||||
from playwright.sync_api import sync_playwright
|
||||
except ImportError:
|
||||
|
@ -7,7 +9,6 @@ except ImportError:
|
|||
)
|
||||
from bs4 import BeautifulSoup
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
from typing import List, Union
|
||||
|
||||
|
||||
def scrape_text(url: str) -> str:
|
||||
|
@ -45,7 +46,7 @@ def scrape_text(url: str) -> str:
|
|||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> Union[str, List[str]]:
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
"""Browse a webpage and summarize it using the LLM model"""
|
||||
from typing import List, Tuple, Union
|
||||
from __future__ import annotations
|
||||
|
||||
from urllib.parse import urljoin, urlparse
|
||||
|
||||
import requests
|
||||
|
@ -66,7 +67,7 @@ def check_local_file_access(url: str) -> bool:
|
|||
|
||||
def get_response(
|
||||
url: str, timeout: int = 10
|
||||
) -> Union[Tuple[None, str], Tuple[Response, None]]:
|
||||
) -> tuple[None, str] | tuple[Response, None]:
|
||||
"""Get the response from a URL
|
||||
|
||||
Args:
|
||||
|
@ -74,7 +75,7 @@ def get_response(
|
|||
timeout (int): The timeout for the HTTP request
|
||||
|
||||
Returns:
|
||||
Tuple[None, str] | Tuple[Response, None]: The response and error message
|
||||
tuple[None, str] | tuple[Response, None]: The response and error message
|
||||
|
||||
Raises:
|
||||
ValueError: If the URL is invalid
|
||||
|
@ -136,14 +137,14 @@ def scrape_text(url: str) -> str:
|
|||
return text
|
||||
|
||||
|
||||
def scrape_links(url: str) -> Union[str, List[str]]:
|
||||
def scrape_links(url: str) -> str | list[str]:
|
||||
"""Scrape links from a webpage
|
||||
|
||||
Args:
|
||||
url (str): The URL to scrape links from
|
||||
|
||||
Returns:
|
||||
Union[str, List[str]]: The scraped links
|
||||
str | list[str]: The scraped links
|
||||
"""
|
||||
response, error_message = get_response(url)
|
||||
if error_message:
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
"""Selenium web scraping module."""
|
||||
from __future__ import annotations
|
||||
|
||||
from selenium import webdriver
|
||||
from autogpt.processing.html import extract_hyperlinks, format_hyperlinks
|
||||
import autogpt.processing.text as summary
|
||||
|
@ -15,13 +17,12 @@ from selenium.webdriver.safari.options import Options as SafariOptions
|
|||
import logging
|
||||
from pathlib import Path
|
||||
from autogpt.config import Config
|
||||
from typing import List, Tuple, Union
|
||||
|
||||
FILE_DIR = Path(__file__).parent.parent
|
||||
CFG = Config()
|
||||
|
||||
|
||||
def browse_website(url: str, question: str) -> Tuple[str, WebDriver]:
|
||||
def browse_website(url: str, question: str) -> tuple[str, WebDriver]:
|
||||
"""Browse a website and return the answer and links to the user
|
||||
|
||||
Args:
|
||||
|
@ -43,7 +44,7 @@ def browse_website(url: str, question: str) -> Tuple[str, WebDriver]:
|
|||
return f"Answer gathered from website: {summary_text} \n \n Links: {links}", driver
|
||||
|
||||
|
||||
def scrape_text_with_selenium(url: str) -> Tuple[WebDriver, str]:
|
||||
def scrape_text_with_selenium(url: str) -> tuple[WebDriver, str]:
|
||||
"""Scrape text from a website using selenium
|
||||
|
||||
Args:
|
||||
|
@ -97,7 +98,7 @@ def scrape_text_with_selenium(url: str) -> Tuple[WebDriver, str]:
|
|||
return driver, text
|
||||
|
||||
|
||||
def scrape_links_with_selenium(driver: WebDriver, url: str) -> List[str]:
|
||||
def scrape_links_with_selenium(driver: WebDriver, url: str) -> list[str]:
|
||||
"""Scrape links from a website using selenium
|
||||
|
||||
Args:
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
"""A module that contains a function to generate test cases for the submitted code."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import List
|
||||
from autogpt.llm_utils import call_ai_function
|
||||
|
||||
|
||||
def write_tests(code: str, focus: List[str]) -> str:
|
||||
def write_tests(code: str, focus: list[str]) -> str:
|
||||
"""
|
||||
A function that takes in code and focus topics and returns a response from create
|
||||
chat completion api call.
|
||||
|
||||
Parameters:
|
||||
focus (List): A list of suggestions around what needs to be improved.
|
||||
focus (list): A list of suggestions around what needs to be improved.
|
||||
code (str): Code for test cases to be generated against.
|
||||
Returns:
|
||||
A result string from create chat completion. Test cases for the submitted code
|
||||
|
|
|
@ -2,8 +2,10 @@
|
|||
"""
|
||||
A module that contains the AIConfig class object that contains the configuration
|
||||
"""
|
||||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from typing import List, Optional, Type
|
||||
from typing import Type
|
||||
import yaml
|
||||
|
||||
|
||||
|
@ -18,7 +20,7 @@ class AIConfig:
|
|||
"""
|
||||
|
||||
def __init__(
|
||||
self, ai_name: str = "", ai_role: str = "", ai_goals: Optional[List] = None
|
||||
self, ai_name: str = "", ai_role: str = "", ai_goals: list | None = None
|
||||
) -> None:
|
||||
"""
|
||||
Initialize a class instance
|
||||
|
|
|
@ -66,6 +66,16 @@ class Config(metaclass=Singleton):
|
|||
self.pinecone_api_key = os.getenv("PINECONE_API_KEY")
|
||||
self.pinecone_region = os.getenv("PINECONE_ENV")
|
||||
|
||||
self.weaviate_host = os.getenv("WEAVIATE_HOST")
|
||||
self.weaviate_port = os.getenv("WEAVIATE_PORT")
|
||||
self.weaviate_protocol = os.getenv("WEAVIATE_PROTOCOL", "http")
|
||||
self.weaviate_username = os.getenv("WEAVIATE_USERNAME", None)
|
||||
self.weaviate_password = os.getenv("WEAVIATE_PASSWORD", None)
|
||||
self.weaviate_scopes = os.getenv("WEAVIATE_SCOPES", None)
|
||||
self.weaviate_embedded_path = os.getenv("WEAVIATE_EMBEDDED_PATH")
|
||||
self.weaviate_api_key = os.getenv("WEAVIATE_API_KEY", None)
|
||||
self.use_weaviate_embedded = os.getenv("USE_WEAVIATE_EMBEDDED", "False") == "True"
|
||||
|
||||
# milvus configuration, e.g., localhost:19530.
|
||||
self.milvus_addr = os.getenv("MILVUS_ADDR", "localhost:19530")
|
||||
self.milvus_collection = os.getenv("MILVUS_COLLECTION", "autogpt")
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
"""Fix JSON brackets."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Optional
|
||||
import regex
|
||||
from colorama import Fore
|
||||
|
||||
|
@ -46,7 +47,7 @@ def attempt_to_fix_json_by_finding_outermost_brackets(json_string: str):
|
|||
return json_string
|
||||
|
||||
|
||||
def balance_braces(json_string: str) -> Optional[str]:
|
||||
def balance_braces(json_string: str) -> str | None:
|
||||
"""
|
||||
Balance the braces in a JSON string.
|
||||
|
||||
|
|
|
@ -1,8 +1,9 @@
|
|||
"""Fix and parse JSON strings."""
|
||||
from __future__ import annotations
|
||||
|
||||
import contextlib
|
||||
import json
|
||||
from typing import Any, Dict, Union
|
||||
from typing import Any
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.json_fixes.auto_fix import fix_json
|
||||
|
@ -71,7 +72,7 @@ def correct_json(json_to_load: str) -> str:
|
|||
|
||||
def fix_and_parse_json(
|
||||
json_to_load: str, try_to_fix_with_gpt: bool = True
|
||||
) -> Union[str, Dict[Any, Any]]:
|
||||
) -> str | dict[Any, Any]:
|
||||
"""Fix and parse JSON string
|
||||
|
||||
Args:
|
||||
|
@ -80,7 +81,7 @@ def fix_and_parse_json(
|
|||
Defaults to True.
|
||||
|
||||
Returns:
|
||||
Union[str, Dict[Any, Any]]: The parsed JSON.
|
||||
str or dict[Any, Any]: The parsed JSON.
|
||||
"""
|
||||
|
||||
with contextlib.suppress(json.JSONDecodeError):
|
||||
|
@ -109,7 +110,7 @@ def fix_and_parse_json(
|
|||
|
||||
def try_ai_fix(
|
||||
try_to_fix_with_gpt: bool, exception: Exception, json_to_load: str
|
||||
) -> Union[str, Dict[Any, Any]]:
|
||||
) -> str | dict[Any, Any]:
|
||||
"""Try to fix the JSON with the AI
|
||||
|
||||
Args:
|
||||
|
@ -121,7 +122,7 @@ def try_ai_fix(
|
|||
exception: If try_to_fix_with_gpt is False.
|
||||
|
||||
Returns:
|
||||
Union[str, Dict[Any, Any]]: The JSON string or dictionary.
|
||||
str or dict[Any, Any]: The JSON string or dictionary.
|
||||
"""
|
||||
if not try_to_fix_with_gpt:
|
||||
raise exception
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
from __future__ import annotations
|
||||
|
||||
from ast import List
|
||||
import time
|
||||
from typing import Dict, Optional
|
||||
|
||||
import openai
|
||||
from openai.error import APIError, RateLimitError
|
||||
|
@ -14,7 +15,7 @@ openai.api_key = CFG.openai_api_key
|
|||
|
||||
|
||||
def call_ai_function(
|
||||
function: str, args: List, description: str, model: Optional[str] = None
|
||||
function: str, args: list, description: str, model: str | None = None
|
||||
) -> str:
|
||||
"""Call an AI function
|
||||
|
||||
|
@ -51,15 +52,15 @@ def call_ai_function(
|
|||
# Overly simple abstraction until we create something better
|
||||
# simple retry mechanism when getting a rate error or a bad gateway
|
||||
def create_chat_completion(
|
||||
messages: List, # type: ignore
|
||||
model: Optional[str] = None,
|
||||
messages: list, # type: ignore
|
||||
model: str | None = None,
|
||||
temperature: float = CFG.temperature,
|
||||
max_tokens: Optional[int] = None,
|
||||
max_tokens: int | None = None,
|
||||
) -> str:
|
||||
"""Create a chat completion using the OpenAI API
|
||||
|
||||
Args:
|
||||
messages (List[Dict[str, str]]): The messages to send to the chat completion
|
||||
messages (list[dict[str, str]]): The messages to send to the chat completion
|
||||
model (str, optional): The model to use. Defaults to None.
|
||||
temperature (float, optional): The temperature to use. Defaults to 0.9.
|
||||
max_tokens (int, optional): The max tokens to use. Defaults to None.
|
||||
|
|
|
@ -10,7 +10,7 @@ try:
|
|||
|
||||
supported_memory.append("redis")
|
||||
except ImportError:
|
||||
print("Redis not installed. Skipping import.")
|
||||
# print("Redis not installed. Skipping import.")
|
||||
RedisMemory = None
|
||||
|
||||
try:
|
||||
|
@ -18,13 +18,19 @@ try:
|
|||
|
||||
supported_memory.append("pinecone")
|
||||
except ImportError:
|
||||
print("Pinecone not installed. Skipping import.")
|
||||
# print("Pinecone not installed. Skipping import.")
|
||||
PineconeMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.weaviate import WeaviateMemory
|
||||
except ImportError:
|
||||
# print("Weaviate not installed. Skipping import.")
|
||||
WeaviateMemory = None
|
||||
|
||||
try:
|
||||
from autogpt.memory.milvus import MilvusMemory
|
||||
except ImportError:
|
||||
print("pymilvus not installed. Skipping import.")
|
||||
# print("pymilvus not installed. Skipping import.")
|
||||
MilvusMemory = None
|
||||
|
||||
|
||||
|
@ -48,6 +54,12 @@ def get_memory(cfg, init=False):
|
|||
)
|
||||
else:
|
||||
memory = RedisMemory(cfg)
|
||||
elif cfg.memory_backend == "weaviate":
|
||||
if not WeaviateMemory:
|
||||
print("Error: Weaviate is not installed. Please install weaviate-client to"
|
||||
" use Weaviate as a memory backend.")
|
||||
else:
|
||||
memory = WeaviateMemory(cfg)
|
||||
elif cfg.memory_backend == "milvus":
|
||||
if not MilvusMemory:
|
||||
print(
|
||||
|
@ -77,4 +89,5 @@ __all__ = [
|
|||
"PineconeMemory",
|
||||
"NoMemory",
|
||||
"MilvusMemory",
|
||||
"WeaviateMemory"
|
||||
]
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import dataclasses
|
||||
import os
|
||||
from typing import Any, List, Optional, Tuple
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import orjson
|
||||
|
@ -97,7 +99,7 @@ class LocalCache(MemoryProviderSingleton):
|
|||
self.data = CacheContent()
|
||||
return "Obliviated"
|
||||
|
||||
def get(self, data: str) -> Optional[List[Any]]:
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
|
@ -108,7 +110,7 @@ class LocalCache(MemoryProviderSingleton):
|
|||
"""
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def get_relevant(self, text: str, k: int) -> List[Any]:
|
||||
def get_relevant(self, text: str, k: int) -> list[Any]:
|
||||
""" "
|
||||
matrix-vector mult to find score-for-each-row-of-matrix
|
||||
get indices for top-k winning scores
|
||||
|
@ -127,7 +129,7 @@ class LocalCache(MemoryProviderSingleton):
|
|||
|
||||
return [self.data.texts[i] for i in top_k_indices]
|
||||
|
||||
def get_stats(self) -> Tuple[int, Tuple[int, ...]]:
|
||||
def get_stats(self) -> tuple[int, tuple[int, ...]]:
|
||||
"""
|
||||
Returns: The stats of the local cache.
|
||||
"""
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
"""A class that does not store any data. This is the default memory provider."""
|
||||
from typing import Optional, List, Any
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
from autogpt.memory.base import MemoryProviderSingleton
|
||||
|
||||
|
@ -31,7 +33,7 @@ class NoMemory(MemoryProviderSingleton):
|
|||
"""
|
||||
return ""
|
||||
|
||||
def get(self, data: str) -> Optional[List[Any]]:
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
NoMemory always returns None.
|
||||
|
@ -51,7 +53,7 @@ class NoMemory(MemoryProviderSingleton):
|
|||
"""
|
||||
return ""
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> Optional[List[Any]]:
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) ->list[Any] | None:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
NoMemory always returns None.
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
"""Redis memory provider."""
|
||||
from typing import Any, List, Optional
|
||||
from __future__ import annotations
|
||||
|
||||
from typing import Any
|
||||
|
||||
import numpy as np
|
||||
import redis
|
||||
|
@ -99,7 +101,7 @@ class RedisMemory(MemoryProviderSingleton):
|
|||
pipe.execute()
|
||||
return _text
|
||||
|
||||
def get(self, data: str) -> Optional[List[Any]]:
|
||||
def get(self, data: str) -> list[Any] | None:
|
||||
"""
|
||||
Gets the data from the memory that is most relevant to the given data.
|
||||
|
||||
|
@ -119,7 +121,7 @@ class RedisMemory(MemoryProviderSingleton):
|
|||
self.redis.flushall()
|
||||
return "Obliviated"
|
||||
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> Optional[List[Any]]:
|
||||
def get_relevant(self, data: str, num_relevant: int = 5) -> list[Any] | None:
|
||||
"""
|
||||
Returns all the data in the memory that is relevant to the given data.
|
||||
Args:
|
||||
|
|
|
@ -0,0 +1,110 @@
|
|||
from autogpt.config import Config
|
||||
from autogpt.memory.base import MemoryProviderSingleton, get_ada_embedding
|
||||
import uuid
|
||||
import weaviate
|
||||
from weaviate import Client
|
||||
from weaviate.embedded import EmbeddedOptions
|
||||
from weaviate.util import generate_uuid5
|
||||
|
||||
|
||||
def default_schema(weaviate_index):
|
||||
return {
|
||||
"class": weaviate_index,
|
||||
"properties": [
|
||||
{
|
||||
"name": "raw_text",
|
||||
"dataType": ["text"],
|
||||
"description": "original text for the embedding"
|
||||
}
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
class WeaviateMemory(MemoryProviderSingleton):
|
||||
def __init__(self, cfg):
|
||||
auth_credentials = self._build_auth_credentials(cfg)
|
||||
|
||||
url = f'{cfg.weaviate_protocol}://{cfg.weaviate_host}:{cfg.weaviate_port}'
|
||||
|
||||
if cfg.use_weaviate_embedded:
|
||||
self.client = Client(embedded_options=EmbeddedOptions(
|
||||
hostname=cfg.weaviate_host,
|
||||
port=int(cfg.weaviate_port),
|
||||
persistence_data_path=cfg.weaviate_embedded_path
|
||||
))
|
||||
|
||||
print(f"Weaviate Embedded running on: {url} with persistence path: {cfg.weaviate_embedded_path}")
|
||||
else:
|
||||
self.client = Client(url, auth_client_secret=auth_credentials)
|
||||
|
||||
self.index = cfg.memory_index
|
||||
self._create_schema()
|
||||
|
||||
def _create_schema(self):
|
||||
schema = default_schema(self.index)
|
||||
if not self.client.schema.contains(schema):
|
||||
self.client.schema.create_class(schema)
|
||||
|
||||
def _build_auth_credentials(self, cfg):
|
||||
if cfg.weaviate_username and cfg.weaviate_password:
|
||||
return weaviate.AuthClientPassword(cfg.weaviate_username, cfg.weaviate_password)
|
||||
if cfg.weaviate_api_key:
|
||||
return weaviate.AuthApiKey(api_key=cfg.weaviate_api_key)
|
||||
else:
|
||||
return None
|
||||
|
||||
def add(self, data):
|
||||
vector = get_ada_embedding(data)
|
||||
|
||||
doc_uuid = generate_uuid5(data, self.index)
|
||||
data_object = {
|
||||
'raw_text': data
|
||||
}
|
||||
|
||||
with self.client.batch as batch:
|
||||
batch.add_data_object(
|
||||
uuid=doc_uuid,
|
||||
data_object=data_object,
|
||||
class_name=self.index,
|
||||
vector=vector
|
||||
)
|
||||
|
||||
return f"Inserting data into memory at uuid: {doc_uuid}:\n data: {data}"
|
||||
|
||||
def get(self, data):
|
||||
return self.get_relevant(data, 1)
|
||||
|
||||
def clear(self):
|
||||
self.client.schema.delete_all()
|
||||
|
||||
# weaviate does not yet have a neat way to just remove the items in an index
|
||||
# without removing the entire schema, therefore we need to re-create it
|
||||
# after a call to delete_all
|
||||
self._create_schema()
|
||||
|
||||
return 'Obliterated'
|
||||
|
||||
def get_relevant(self, data, num_relevant=5):
|
||||
query_embedding = get_ada_embedding(data)
|
||||
try:
|
||||
results = self.client.query.get(self.index, ['raw_text']) \
|
||||
.with_near_vector({'vector': query_embedding, 'certainty': 0.7}) \
|
||||
.with_limit(num_relevant) \
|
||||
.do()
|
||||
|
||||
if len(results['data']['Get'][self.index]) > 0:
|
||||
return [str(item['raw_text']) for item in results['data']['Get'][self.index]]
|
||||
else:
|
||||
return []
|
||||
|
||||
except Exception as err:
|
||||
print(f'Unexpected error {err=}, {type(err)=}')
|
||||
return []
|
||||
|
||||
def get_stats(self):
|
||||
result = self.client.query.aggregate(self.index) \
|
||||
.with_meta_count() \
|
||||
.do()
|
||||
class_data = result['data']['Aggregate'][self.index]
|
||||
|
||||
return class_data[0]['meta'] if class_data else {}
|
|
@ -1,10 +1,11 @@
|
|||
"""HTML processing functions"""
|
||||
from __future__ import annotations
|
||||
|
||||
from requests.compat import urljoin
|
||||
from typing import List, Tuple
|
||||
from bs4 import BeautifulSoup
|
||||
|
||||
|
||||
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> List[Tuple[str, str]]:
|
||||
def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]:
|
||||
"""Extract hyperlinks from a BeautifulSoup object
|
||||
|
||||
Args:
|
||||
|
@ -20,7 +21,7 @@ def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> List[Tuple[str, st
|
|||
]
|
||||
|
||||
|
||||
def format_hyperlinks(hyperlinks: List[Tuple[str, str]]) -> List[str]:
|
||||
def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]:
|
||||
"""Format hyperlinks to be displayed to the user
|
||||
|
||||
Args:
|
||||
|
|
|
@ -82,10 +82,19 @@ def get_prompt() -> str:
|
|||
),
|
||||
("Execute Python File", "execute_python_file", {"file": "<file>"}),
|
||||
("Generate Image", "generate_image", {"prompt": "<prompt>"}),
|
||||
("Convert Audio to text", "read_audio_from_file", {"file": "<file>"}),
|
||||
("Send Tweet", "send_tweet", {"text": "<text>"}),
|
||||
]
|
||||
|
||||
# Only add the audio to text command if the model is specified
|
||||
if cfg.huggingface_audio_to_text_model:
|
||||
commands.append(
|
||||
(
|
||||
"Convert Audio to text",
|
||||
"read_audio_from_file",
|
||||
{"file": "<file>"}
|
||||
),
|
||||
)
|
||||
|
||||
# Only add shell command to the prompt if the AI is allowed to execute it
|
||||
if cfg.execute_local_commands:
|
||||
commands.append(
|
||||
|
@ -168,7 +177,7 @@ Continue (y/n): """
|
|||
|
||||
if not config.ai_name:
|
||||
config = prompt_user()
|
||||
config.save()
|
||||
config.save(CFG.ai_settings_file)
|
||||
|
||||
# Get rid of this global:
|
||||
global ai_name
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
""" A module for generating custom prompt strings."""
|
||||
from __future__ import annotations
|
||||
|
||||
import json
|
||||
from typing import Any, Dict, List
|
||||
from typing import Any
|
||||
|
||||
|
||||
class PromptGenerator:
|
||||
|
@ -61,7 +63,7 @@ class PromptGenerator:
|
|||
|
||||
self.commands.append(command)
|
||||
|
||||
def _generate_command_string(self, command: Dict[str, Any]) -> str:
|
||||
def _generate_command_string(self, command: dict[str, Any]) -> str:
|
||||
"""
|
||||
Generate a formatted string representation of a command.
|
||||
|
||||
|
@ -94,7 +96,7 @@ class PromptGenerator:
|
|||
"""
|
||||
self.performance_evaluation.append(evaluation)
|
||||
|
||||
def _generate_numbered_list(self, items: List[Any], item_type="list") -> str:
|
||||
def _generate_numbered_list(self, items: list[Any], item_type="list") -> str:
|
||||
"""
|
||||
Generate a numbered list from given items based on the item_type.
|
||||
|
||||
|
@ -132,5 +134,5 @@ class PromptGenerator:
|
|||
f"{self._generate_numbered_list(self.performance_evaluation)}\n\n"
|
||||
"You should only respond in JSON format as described below \nResponse"
|
||||
f" Format: \n{formatted_response_format} \nEnsure the response can be"
|
||||
"parsed by Python json.loads"
|
||||
" parsed by Python json.loads"
|
||||
)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""Functions for counting the number of tokens in a message or string."""
|
||||
from typing import Dict, List
|
||||
from __future__ import annotations
|
||||
|
||||
import tiktoken
|
||||
|
||||
|
@ -7,7 +7,7 @@ from autogpt.logs import logger
|
|||
|
||||
|
||||
def count_message_tokens(
|
||||
messages: List[Dict[str, str]], model: str = "gpt-3.5-turbo-0301"
|
||||
messages: list[dict[str, str]], model: str = "gpt-3.5-turbo-0301"
|
||||
) -> int:
|
||||
"""
|
||||
Returns the number of tokens used by a list of messages.
|
||||
|
|
|
@ -0,0 +1,41 @@
|
|||
from __future__ import annotations
|
||||
|
||||
import os
|
||||
from pathlib import Path
|
||||
|
||||
# Set a dedicated folder for file I/O
|
||||
WORKSPACE_PATH = Path(os.getcwd()) / "auto_gpt_workspace"
|
||||
|
||||
# Create the directory if it doesn't exist
|
||||
if not os.path.exists(WORKSPACE_PATH):
|
||||
os.makedirs(WORKSPACE_PATH)
|
||||
|
||||
|
||||
def path_in_workspace(relative_path: str | Path) -> Path:
|
||||
"""Get full path for item in workspace
|
||||
|
||||
Parameters:
|
||||
relative_path (str | Path): Path to translate into the workspace
|
||||
|
||||
Returns:
|
||||
Path: Absolute path for the given path in the workspace
|
||||
"""
|
||||
return safe_path_join(WORKSPACE_PATH, relative_path)
|
||||
|
||||
|
||||
def safe_path_join(base: Path, *paths: str | Path) -> Path:
|
||||
"""Join one or more path components, asserting the resulting path is within the workspace.
|
||||
|
||||
Args:
|
||||
base (Path): The base path
|
||||
*paths (str): The paths to join to the base path
|
||||
|
||||
Returns:
|
||||
Path: The joined path
|
||||
"""
|
||||
joined_path = base.joinpath(*paths).resolve()
|
||||
|
||||
if not joined_path.is_relative_to(base):
|
||||
raise ValueError(f"Attempted to access path '{joined_path}' outside of working directory '{base}'.")
|
||||
|
||||
return joined_path
|
|
@ -27,4 +27,4 @@ isort
|
|||
gitpython==3.1.31
|
||||
pytest
|
||||
pytest-mock
|
||||
tweepy
|
||||
tweepy
|
||||
|
|
|
@ -0,0 +1,117 @@
|
|||
import unittest
|
||||
from unittest import mock
|
||||
import sys
|
||||
import os
|
||||
|
||||
from weaviate import Client
|
||||
from weaviate.util import get_valid_uuid
|
||||
from uuid import uuid4
|
||||
|
||||
from autogpt.config import Config
|
||||
from autogpt.memory.weaviate import WeaviateMemory
|
||||
from autogpt.memory.base import get_ada_embedding
|
||||
|
||||
|
||||
@mock.patch.dict(os.environ, {
|
||||
"WEAVIATE_HOST": "127.0.0.1",
|
||||
"WEAVIATE_PROTOCOL": "http",
|
||||
"WEAVIATE_PORT": "8080",
|
||||
"WEAVIATE_USERNAME": "",
|
||||
"WEAVIATE_PASSWORD": "",
|
||||
"MEMORY_INDEX": "AutogptTests"
|
||||
})
|
||||
class TestWeaviateMemory(unittest.TestCase):
|
||||
cfg = None
|
||||
client = None
|
||||
|
||||
@classmethod
|
||||
def setUpClass(cls):
|
||||
# only create the connection to weaviate once
|
||||
cls.cfg = Config()
|
||||
|
||||
if cls.cfg.use_weaviate_embedded:
|
||||
from weaviate.embedded import EmbeddedOptions
|
||||
|
||||
cls.client = Client(embedded_options=EmbeddedOptions(
|
||||
hostname=cls.cfg.weaviate_host,
|
||||
port=int(cls.cfg.weaviate_port),
|
||||
persistence_data_path=cls.cfg.weaviate_embedded_path
|
||||
))
|
||||
else:
|
||||
cls.client = Client(f"{cls.cfg.weaviate_protocol}://{cls.cfg.weaviate_host}:{self.cfg.weaviate_port}")
|
||||
|
||||
"""
|
||||
In order to run these tests you will need a local instance of
|
||||
Weaviate running. Refer to https://weaviate.io/developers/weaviate/installation/docker-compose
|
||||
for creating local instances using docker.
|
||||
Alternatively in your .env file set the following environmental variables to run Weaviate embedded (see: https://weaviate.io/developers/weaviate/installation/embedded):
|
||||
|
||||
USE_WEAVIATE_EMBEDDED=True
|
||||
WEAVIATE_EMBEDDED_PATH="/home/me/.local/share/weaviate"
|
||||
"""
|
||||
def setUp(self):
|
||||
try:
|
||||
self.client.schema.delete_class(self.cfg.memory_index)
|
||||
except:
|
||||
pass
|
||||
|
||||
self.memory = WeaviateMemory(self.cfg)
|
||||
|
||||
def test_add(self):
|
||||
doc = 'You are a Titan name Thanos and you are looking for the Infinity Stones'
|
||||
self.memory.add(doc)
|
||||
result = self.client.query.get(self.cfg.memory_index, ['raw_text']).do()
|
||||
actual = result['data']['Get'][self.cfg.memory_index]
|
||||
|
||||
self.assertEqual(len(actual), 1)
|
||||
self.assertEqual(actual[0]['raw_text'], doc)
|
||||
|
||||
def test_get(self):
|
||||
doc = 'You are an Avenger and swore to defend the Galaxy from a menace called Thanos'
|
||||
|
||||
with self.client.batch as batch:
|
||||
batch.add_data_object(
|
||||
uuid=get_valid_uuid(uuid4()),
|
||||
data_object={'raw_text': doc},
|
||||
class_name=self.cfg.memory_index,
|
||||
vector=get_ada_embedding(doc)
|
||||
)
|
||||
|
||||
batch.flush()
|
||||
|
||||
actual = self.memory.get(doc)
|
||||
|
||||
self.assertEqual(len(actual), 1)
|
||||
self.assertEqual(actual[0], doc)
|
||||
|
||||
def test_get_stats(self):
|
||||
docs = [
|
||||
'You are now about to count the number of docs in this index',
|
||||
'And then you about to find out if you can count correctly'
|
||||
]
|
||||
|
||||
[self.memory.add(doc) for doc in docs]
|
||||
|
||||
stats = self.memory.get_stats()
|
||||
|
||||
self.assertTrue(stats)
|
||||
self.assertTrue('count' in stats)
|
||||
self.assertEqual(stats['count'], 2)
|
||||
|
||||
def test_clear(self):
|
||||
docs = [
|
||||
'Shame this is the last test for this class',
|
||||
'Testing is fun when someone else is doing it'
|
||||
]
|
||||
|
||||
[self.memory.add(doc) for doc in docs]
|
||||
|
||||
self.assertEqual(self.memory.get_stats()['count'], 2)
|
||||
|
||||
self.memory.clear()
|
||||
|
||||
self.assertEqual(self.memory.get_stats()['count'], 0)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
Loading…
Reference in New Issue