Merge remote-tracking branch 'upstream/master' into fix-user-feedback-json-error
commit
c6605b06c5
|
@ -2,6 +2,8 @@ PINECONE_API_KEY=your-pinecone-api-key
|
|||
PINECONE_ENV=your-pinecone-region
|
||||
OPENAI_API_KEY=your-openai-api-key
|
||||
ELEVENLABS_API_KEY=your-elevenlabs-api-key
|
||||
ELEVENLABS_VOICE_1_ID=your-voice-id
|
||||
ELEVENLABS_VOICE_2_ID=your-voice-id
|
||||
SMART_LLM_MODEL=gpt-4
|
||||
FAST_LLM_MODEL=gpt-3.5-turbo
|
||||
GOOGLE_API_KEY=
|
||||
|
|
|
@ -23,10 +23,10 @@ By following these guidelines, your PRs are more likely to be merged quickly aft
|
|||
|
||||
### PR Quality Checklist
|
||||
- [ ] My pull request is atomic and focuses on a single change.
|
||||
- [ ] I have thouroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have thoroughly tested my changes with multiple different prompts.
|
||||
- [ ] I have considered potential risks and mitigations for my changes.
|
||||
- [ ] I have documented my changes clearly and comprehensively.
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as seperate Pull Reqests, they are the easiest to merge! -->
|
||||
- [ ] I have not snuck in any "extra" small tweaks changes <!-- Submit these as separate Pull Reqests, they are the easiest to merge! -->
|
||||
|
||||
<!-- If you haven't added tests, please explain why. If you have, check the appropriate box. If you've ensured your PR is atomic and well-documented, check the corresponding boxes. -->
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
![Twitter Follow](https://img.shields.io/twitter/follow/siggravitas?style=social)
|
||||
[![](https://dcbadge.vercel.app/api/server/PQ7VX6TY4t?style=flat)](https://discord.gg/PQ7VX6TY4t)
|
||||
|
||||
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, autonomously develops and manages businesses to increase net worth. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
|
||||
Auto-GPT is an experimental open-source application showcasing the capabilities of the GPT-4 language model. This program, driven by GPT-4, chains together LLM "thoughts", to autonomously achieve whatever goal you set. As one of the first examples of GPT-4 running fully autonomously, Auto-GPT pushes the boundaries of what is possible with AI.
|
||||
|
||||
### Demo (30/03/2023):
|
||||
https://user-images.githubusercontent.com/22963551/228855501-2f5777cf-755b-4407-a643-c7299e5b6419.mp4
|
||||
|
@ -267,3 +267,8 @@ Stay up-to-date with the latest news, updates, and insights about Auto-GPT by fo
|
|||
|
||||
We look forward to connecting with you and hearing your thoughts, ideas, and experiences with Auto-GPT. Join us on Twitter and let's explore the future of AI together!
|
||||
|
||||
<p align="center">
|
||||
<a href="https://star-history.com/#Torantulino/auto-gpt&Date">
|
||||
<img src="https://api.star-history.com/svg?repos=Torantulino/auto-gpt&type=Date" alt="Star History Chart">
|
||||
</a>
|
||||
</p>
|
||||
|
|
|
@ -13,7 +13,7 @@ def create_agent(task, prompt, model):
|
|||
|
||||
messages = [{"role": "user", "content": prompt}, ]
|
||||
|
||||
# Start GTP3 instance
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
|
@ -41,7 +41,7 @@ def message_agent(key, message):
|
|||
# Add user message to message history before sending to agent
|
||||
messages.append({"role": "user", "content": message})
|
||||
|
||||
# Start GTP3 instance
|
||||
# Start GPT instance
|
||||
agent_reply = create_chat_completion(
|
||||
model=model,
|
||||
messages=messages,
|
||||
|
|
|
@ -42,7 +42,7 @@ class AIConfig:
|
|||
config_file (int): The path to the config yaml file. DEFAULT: "../ai_settings.yaml"
|
||||
|
||||
Returns:
|
||||
cls (object): A instance of given cls object
|
||||
cls (object): An instance of given cls object
|
||||
"""
|
||||
|
||||
try:
|
||||
|
@ -80,7 +80,7 @@ class AIConfig:
|
|||
None
|
||||
|
||||
Returns:
|
||||
full_prompt (str): A string containing the intitial prompt for the user including the ai_name, ai_role and ai_goals.
|
||||
full_prompt (str): A string containing the initial prompt for the user including the ai_name, ai_role and ai_goals.
|
||||
"""
|
||||
|
||||
prompt_start = """Your decisions must always be made independently without seeking user assistance. Play to your strengths as an LLM and pursue simple strategies with no legal complications."""
|
||||
|
|
|
@ -141,6 +141,6 @@ def chat_with_ai(
|
|||
|
||||
return assistant_reply
|
||||
except openai.error.RateLimitError:
|
||||
# TODO: WHen we switch to langchain, this is built in
|
||||
# TODO: When we switch to langchain, this is built in
|
||||
print("Error: ", "API Rate Limit Reached. Waiting 10 seconds...")
|
||||
time.sleep(10)
|
||||
|
|
|
@ -110,7 +110,7 @@ def execute_command(command_name, arguments):
|
|||
elif command_name == "task_complete":
|
||||
shutdown()
|
||||
else:
|
||||
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for availabe commands and only respond in the specified JSON format."
|
||||
return f"Unknown command '{command_name}'. Please refer to the 'COMMANDS' list for available commands and only respond in the specified JSON format."
|
||||
# All errors, return "Error: + error message"
|
||||
except Exception as e:
|
||||
return "Error: " + str(e)
|
||||
|
|
|
@ -54,6 +54,8 @@ class Config(metaclass=Singleton):
|
|||
openai.api_version = self.openai_api_version
|
||||
|
||||
self.elevenlabs_api_key = os.getenv("ELEVENLABS_API_KEY")
|
||||
self.elevenlabs_voice_1_id = os.getenv("ELEVENLABS_VOICE_1_ID")
|
||||
self.elevenlabs_voice_2_id = os.getenv("ELEVENLABS_VOICE_2_ID")
|
||||
|
||||
self.use_mac_os_tts = False
|
||||
self.use_mac_os_tts = os.getenv("USE_MAC_OS_TTS")
|
||||
|
@ -75,7 +77,7 @@ class Config(metaclass=Singleton):
|
|||
self.redis_password = os.getenv("REDIS_PASSWORD", "")
|
||||
self.wipe_redis_on_start = os.getenv("WIPE_REDIS_ON_START", "True") == 'True'
|
||||
self.memory_index = os.getenv("MEMORY_INDEX", 'auto-gpt')
|
||||
# Note that indexes must be created on db 0 in redis, this is not configureable.
|
||||
# Note that indexes must be created on db 0 in redis, this is not configurable.
|
||||
|
||||
self.memory_backend = os.getenv("MEMORY_BACKEND", 'local')
|
||||
# Initialize the OpenAI API client
|
||||
|
@ -113,6 +115,14 @@ class Config(metaclass=Singleton):
|
|||
"""Set the ElevenLabs API key value."""
|
||||
self.elevenlabs_api_key = value
|
||||
|
||||
def set_elevenlabs_voice_1_id(self, value: str):
|
||||
"""Set the ElevenLabs Voice 1 ID value."""
|
||||
self.elevenlabs_voice_1_id = value
|
||||
|
||||
def set_elevenlabs_voice_2_id(self, value: str):
|
||||
"""Set the ElevenLabs Voice 2 ID value."""
|
||||
self.elevenlabs_voice_2_id = value
|
||||
|
||||
def set_google_api_key(self, value: str):
|
||||
"""Set the Google API key value."""
|
||||
self.google_api_key = value
|
||||
|
|
|
@ -53,7 +53,7 @@ def fix_and_parse_json(
|
|||
last_brace_index = json_str.rindex("}")
|
||||
json_str = json_str[:last_brace_index+1]
|
||||
return json.loads(json_str)
|
||||
except json.JSONDecodeError as e: # noqa: F841
|
||||
except (json.JSONDecodeError, ValueError) as e: # noqa: F841
|
||||
if try_to_fix_with_gpt:
|
||||
print("Warning: Failed to parse AI output, attempting to fix."
|
||||
"\n If you see this warning frequently, it's likely that"
|
||||
|
@ -67,22 +67,22 @@ def fix_and_parse_json(
|
|||
else:
|
||||
# This allows the AI to react to the error message,
|
||||
# which usually results in it correcting its ways.
|
||||
print("Failed to fix ai output, telling the AI.")
|
||||
print("Failed to fix AI output, telling the AI.")
|
||||
return json_str
|
||||
else:
|
||||
raise e
|
||||
|
||||
|
||||
def fix_json(json_str: str, schema: str) -> str:
|
||||
"""Fix the given JSON string to make it parseable and fully complient with the provided schema."""
|
||||
"""Fix the given JSON string to make it parseable and fully compliant with the provided schema."""
|
||||
|
||||
# Try to fix the JSON using gpt:
|
||||
# Try to fix the JSON using GPT:
|
||||
function_string = "def fix_json(json_str: str, schema:str=None) -> str:"
|
||||
args = [f"'''{json_str}'''", f"'''{schema}'''"]
|
||||
description_string = "Fixes the provided JSON string to make it parseable"\
|
||||
" and fully complient with the provided schema.\n If an object or"\
|
||||
" and fully compliant with the provided schema.\n If an object or"\
|
||||
" field specified in the schema isn't contained within the correct"\
|
||||
" JSON, it is ommited.\n This function is brilliant at guessing"\
|
||||
" JSON, it is omitted.\n This function is brilliant at guessing"\
|
||||
" when the format is incorrect."
|
||||
|
||||
# If it doesn't already start with a "`", add one:
|
||||
|
@ -91,7 +91,7 @@ def fix_json(json_str: str, schema: str) -> str:
|
|||
result_string = call_ai_function(
|
||||
function_string, args, description_string, model=cfg.fast_llm_model
|
||||
)
|
||||
if cfg.debug:
|
||||
if cfg.debug_mode:
|
||||
print("------------ JSON FIX ATTEMPT ---------------")
|
||||
print(f"Original JSON: {json_str}")
|
||||
print("-----------")
|
||||
|
|
|
@ -309,6 +309,7 @@ def parse_arguments():
|
|||
parser.add_argument('--speak', action='store_true', help='Enable Speak Mode')
|
||||
parser.add_argument('--debug', action='store_true', help='Enable Debug Mode')
|
||||
parser.add_argument('--gpt3only', action='store_true', help='Enable GPT3.5 Only Mode')
|
||||
parser.add_argument('--gpt4only', action='store_true', help='Enable GPT4 Only Mode')
|
||||
args = parser.parse_args()
|
||||
|
||||
if args.continuous:
|
||||
|
@ -326,6 +327,10 @@ def parse_arguments():
|
|||
if args.gpt3only:
|
||||
print_to_console("GPT3.5 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_smart_llm_model(cfg.fast_llm_model)
|
||||
|
||||
if args.gpt4only:
|
||||
print_to_console("GPT4 Only Mode: ", Fore.GREEN, "ENABLED")
|
||||
cfg.set_fast_llm_model(cfg.smart_llm_model)
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -7,9 +7,21 @@ import gtts
|
|||
import threading
|
||||
from threading import Lock, Semaphore
|
||||
|
||||
# Default voice IDs
|
||||
default_voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
||||
|
||||
# TODO: Nicer names for these ids
|
||||
voices = ["ErXwobaYiN019PkySvjV", "EXAVITQu4vr4xnSDxMaL"]
|
||||
# Retrieve custom voice IDs from the Config class
|
||||
custom_voice_1 = cfg.elevenlabs_voice_1_id
|
||||
custom_voice_2 = cfg.elevenlabs_voice_2_id
|
||||
|
||||
# Placeholder values that should be treated as empty
|
||||
placeholders = {"your-voice-id"}
|
||||
|
||||
# Use custom voice IDs if provided and not placeholders, otherwise use default voice IDs
|
||||
voices = [
|
||||
custom_voice_1 if custom_voice_1 and custom_voice_1 not in placeholders else default_voices[0],
|
||||
custom_voice_2 if custom_voice_2 and custom_voice_2 not in placeholders else default_voices[1]
|
||||
]
|
||||
|
||||
tts_headers = {
|
||||
"Content-Type": "application/json",
|
||||
|
|
|
@ -20,7 +20,7 @@ class Spinner:
|
|||
sys.stdout.write(next(self.spinner) + " " + self.message + "\r")
|
||||
sys.stdout.flush()
|
||||
time.sleep(self.delay)
|
||||
sys.stdout.write('\b' * (len(self.message) + 2))
|
||||
sys.stdout.write('\r' + ' ' * (len(self.message) + 2) + '\r')
|
||||
|
||||
def __enter__(self):
|
||||
"""Start the spinner"""
|
||||
|
|
Loading…
Reference in New Issue