2023-04-16 18:02:48 +00:00
|
|
|
from __future__ import annotations
|
|
|
|
|
2023-04-15 12:56:23 +00:00
|
|
|
from ast import List
|
2023-04-13 16:23:16 +00:00
|
|
|
import time
|
2023-04-14 19:42:28 +00:00
|
|
|
|
2023-04-03 02:51:07 +00:00
|
|
|
import openai
|
2023-04-15 00:04:48 +00:00
|
|
|
from openai.error import APIError, RateLimitError
|
2023-04-13 16:23:16 +00:00
|
|
|
from colorama import Fore
|
2023-04-14 19:42:28 +00:00
|
|
|
|
2023-04-14 16:28:58 +00:00
|
|
|
from autogpt.config import Config
|
2023-04-13 16:23:16 +00:00
|
|
|
|
2023-04-15 12:56:23 +00:00
|
|
|
CFG = Config()
|
2023-04-03 02:51:07 +00:00
|
|
|
|
2023-04-15 12:56:23 +00:00
|
|
|
openai.api_key = CFG.openai_api_key
|
|
|
|
|
|
|
|
|
|
|
|
def call_ai_function(
|
2023-04-16 18:02:48 +00:00
|
|
|
function: str, args: list, description: str, model: str | None = None
|
2023-04-15 12:56:23 +00:00
|
|
|
) -> str:
|
|
|
|
"""Call an AI function
|
|
|
|
|
|
|
|
This is a magic function that can do anything with no-code. See
|
|
|
|
https://github.com/Torantulino/AI-Functions for more info.
|
|
|
|
|
|
|
|
Args:
|
|
|
|
function (str): The function to call
|
|
|
|
args (list): The arguments to pass to the function
|
|
|
|
description (str): The description of the function
|
|
|
|
model (str, optional): The model to use. Defaults to None.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: The response from the function
|
|
|
|
"""
|
|
|
|
if model is None:
|
|
|
|
model = CFG.smart_llm_model
|
|
|
|
# For each arg, if any are None, convert to "None":
|
|
|
|
args = [str(arg) if arg is not None else "None" for arg in args]
|
|
|
|
# parse args to comma separated string
|
|
|
|
args = ", ".join(args)
|
|
|
|
messages = [
|
|
|
|
{
|
|
|
|
"role": "system",
|
|
|
|
"content": f"You are now the following python function: ```# {description}"
|
|
|
|
f"\n{function}```\n\nOnly respond with your `return` value.",
|
|
|
|
},
|
|
|
|
{"role": "user", "content": args},
|
|
|
|
]
|
|
|
|
|
|
|
|
return create_chat_completion(model=model, messages=messages, temperature=0)
|
2023-04-03 02:51:07 +00:00
|
|
|
|
2023-04-13 16:23:16 +00:00
|
|
|
|
2023-04-03 02:51:07 +00:00
|
|
|
# Overly simple abstraction until we create something better
|
2023-04-13 16:23:16 +00:00
|
|
|
# simple retry mechanism when getting a rate error or a bad gateway
|
2023-04-14 19:42:28 +00:00
|
|
|
def create_chat_completion(
|
2023-04-16 18:02:48 +00:00
|
|
|
messages: list, # type: ignore
|
|
|
|
model: str | None = None,
|
2023-04-15 12:56:23 +00:00
|
|
|
temperature: float = CFG.temperature,
|
2023-04-16 18:02:48 +00:00
|
|
|
max_tokens: int | None = None,
|
2023-04-14 19:42:28 +00:00
|
|
|
) -> str:
|
2023-04-15 12:56:23 +00:00
|
|
|
"""Create a chat completion using the OpenAI API
|
|
|
|
|
|
|
|
Args:
|
2023-04-16 18:02:48 +00:00
|
|
|
messages (list[dict[str, str]]): The messages to send to the chat completion
|
2023-04-15 12:56:23 +00:00
|
|
|
model (str, optional): The model to use. Defaults to None.
|
|
|
|
temperature (float, optional): The temperature to use. Defaults to 0.9.
|
|
|
|
max_tokens (int, optional): The max tokens to use. Defaults to None.
|
|
|
|
|
|
|
|
Returns:
|
|
|
|
str: The response from the chat completion
|
|
|
|
"""
|
2023-04-13 16:23:16 +00:00
|
|
|
response = None
|
2023-04-15 12:56:23 +00:00
|
|
|
num_retries = 10
|
|
|
|
if CFG.debug_mode:
|
2023-04-15 00:04:48 +00:00
|
|
|
print(
|
|
|
|
Fore.GREEN
|
|
|
|
+ f"Creating chat completion with model {model}, temperature {temperature},"
|
|
|
|
f" max_tokens {max_tokens}" + Fore.RESET
|
|
|
|
)
|
2023-04-13 16:23:16 +00:00
|
|
|
for attempt in range(num_retries):
|
2023-04-15 12:56:23 +00:00
|
|
|
backoff = 2 ** (attempt + 2)
|
2023-04-13 16:23:16 +00:00
|
|
|
try:
|
2023-04-15 12:56:23 +00:00
|
|
|
if CFG.use_azure:
|
2023-04-13 16:23:16 +00:00
|
|
|
response = openai.ChatCompletion.create(
|
2023-04-15 12:56:23 +00:00
|
|
|
deployment_id=CFG.get_azure_deployment_id_for_model(model),
|
2023-04-13 16:23:16 +00:00
|
|
|
model=model,
|
|
|
|
messages=messages,
|
|
|
|
temperature=temperature,
|
2023-04-14 19:42:28 +00:00
|
|
|
max_tokens=max_tokens,
|
2023-04-13 16:23:16 +00:00
|
|
|
)
|
|
|
|
else:
|
|
|
|
response = openai.ChatCompletion.create(
|
|
|
|
model=model,
|
|
|
|
messages=messages,
|
|
|
|
temperature=temperature,
|
2023-04-14 19:42:28 +00:00
|
|
|
max_tokens=max_tokens,
|
2023-04-13 16:23:16 +00:00
|
|
|
)
|
|
|
|
break
|
2023-04-15 00:04:48 +00:00
|
|
|
except RateLimitError:
|
2023-04-15 19:06:27 +00:00
|
|
|
if CFG.debug_mode:
|
|
|
|
print(
|
|
|
|
Fore.RED + "Error: ",
|
2023-04-15 19:20:05 +00:00
|
|
|
f"Reached rate limit, passing..." + Fore.RESET,
|
2023-04-15 19:06:27 +00:00
|
|
|
)
|
2023-04-15 00:04:48 +00:00
|
|
|
except APIError as e:
|
2023-04-13 16:23:16 +00:00
|
|
|
if e.http_status == 502:
|
2023-04-15 12:56:23 +00:00
|
|
|
pass
|
2023-04-13 16:23:16 +00:00
|
|
|
else:
|
|
|
|
raise
|
|
|
|
if attempt == num_retries - 1:
|
|
|
|
raise
|
2023-04-15 12:56:23 +00:00
|
|
|
if CFG.debug_mode:
|
|
|
|
print(
|
|
|
|
Fore.RED + "Error: ",
|
|
|
|
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
|
|
|
|
)
|
|
|
|
time.sleep(backoff)
|
2023-04-13 16:23:16 +00:00
|
|
|
if response is None:
|
2023-04-15 12:56:23 +00:00
|
|
|
raise RuntimeError(f"Failed to get response after {num_retries} retries")
|
2023-04-03 02:51:07 +00:00
|
|
|
|
|
|
|
return response.choices[0].message["content"]
|
2023-04-15 16:13:29 +00:00
|
|
|
|
|
|
|
|
2023-04-15 20:03:03 +00:00
|
|
|
def create_embedding_with_ada(text) -> list:
|
2023-04-15 16:13:29 +00:00
|
|
|
"""Create a embedding with text-ada-002 using the OpenAI SDK"""
|
|
|
|
num_retries = 10
|
|
|
|
for attempt in range(num_retries):
|
|
|
|
backoff = 2 ** (attempt + 2)
|
|
|
|
try:
|
|
|
|
if CFG.use_azure:
|
2023-04-15 21:40:12 +00:00
|
|
|
return openai.Embedding.create(
|
|
|
|
input=[text],
|
|
|
|
engine=CFG.get_azure_deployment_id_for_model(
|
|
|
|
"text-embedding-ada-002"
|
|
|
|
),
|
2023-04-15 16:13:29 +00:00
|
|
|
)["data"][0]["embedding"]
|
|
|
|
else:
|
2023-04-15 21:40:12 +00:00
|
|
|
return openai.Embedding.create(
|
|
|
|
input=[text], model="text-embedding-ada-002"
|
|
|
|
)["data"][0]["embedding"]
|
2023-04-15 16:13:29 +00:00
|
|
|
except RateLimitError:
|
|
|
|
pass
|
|
|
|
except APIError as e:
|
|
|
|
if e.http_status == 502:
|
|
|
|
pass
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
if attempt == num_retries - 1:
|
|
|
|
raise
|
|
|
|
if CFG.debug_mode:
|
|
|
|
print(
|
|
|
|
Fore.RED + "Error: ",
|
|
|
|
f"API Bad gateway. Waiting {backoff} seconds..." + Fore.RESET,
|
|
|
|
)
|
|
|
|
time.sleep(backoff)
|