diff --git a/autogpt/configurator.py b/autogpt/configurator.py index 0fddd1fa8..98bd0b26d 100644 --- a/autogpt/configurator.py +++ b/autogpt/configurator.py @@ -1,9 +1,12 @@ """Configurator module.""" +from __future__ import annotations + import click from colorama import Back, Fore, Style from autogpt import utils from autogpt.config import Config +from autogpt.llm.llm_utils import check_model from autogpt.logs import logger from autogpt.memory import get_supported_memory_backends @@ -45,6 +48,8 @@ def create_config( CFG.set_debug_mode(False) CFG.set_continuous_mode(False) CFG.set_speak_mode(False) + CFG.set_fast_llm_model(check_model(CFG.fast_llm_model, "fast_llm_model")) + CFG.set_smart_llm_model(check_model(CFG.smart_llm_model, "smart_llm_model")) if debug: logger.typewriter_log("Debug Mode: ", Fore.GREEN, "ENABLED") diff --git a/autogpt/llm/api_manager.py b/autogpt/llm/api_manager.py index a7777a2b0..82cf1c573 100644 --- a/autogpt/llm/api_manager.py +++ b/autogpt/llm/api_manager.py @@ -1,6 +1,9 @@ from __future__ import annotations +from typing import List, Optional + import openai +from openai import Model from autogpt.config import Config from autogpt.llm.modelsinfo import COSTS @@ -14,12 +17,14 @@ class ApiManager(metaclass=Singleton): self.total_completion_tokens = 0 self.total_cost = 0 self.total_budget = 0 + self.models: Optional[list[Model]] = None def reset(self): self.total_prompt_tokens = 0 self.total_completion_tokens = 0 self.total_cost = 0 self.total_budget = 0.0 + self.models = None def create_chat_completion( self, @@ -127,3 +132,17 @@ class ApiManager(metaclass=Singleton): float: The total budget for API calls. """ return self.total_budget + + def get_models(self) -> List[Model]: + """ + Get list of available GPT models. + + Returns: + list: List of available GPT models. + + """ + if self.models is None: + all_models = openai.Model.list()["data"] + self.models = [model for model in all_models if "gpt" in model["id"]] + + return self.models diff --git a/autogpt/llm/llm_utils.py b/autogpt/llm/llm_utils.py index a77bccbc0..fcd1b2d83 100644 --- a/autogpt/llm/llm_utils.py +++ b/autogpt/llm/llm_utils.py @@ -3,7 +3,7 @@ from __future__ import annotations import functools import time from itertools import islice -from typing import List, Optional +from typing import List, Literal, Optional import numpy as np import openai @@ -293,3 +293,22 @@ def create_embedding( ) # normalize the length to one chunk_embeddings = chunk_embeddings.tolist() return chunk_embeddings + + +def check_model( + model_name: str, model_type: Literal["smart_llm_model", "fast_llm_model"] +) -> str: + """Check if model is available for use. If not, return gpt-3.5-turbo.""" + api_manager = ApiManager() + models = api_manager.get_models() + + if any(model_name in m["id"] for m in models): + return model_name + + logger.typewriter_log( + "WARNING: ", + Fore.YELLOW, + f"You do not have access to {model_name}. Setting {model_type} to " + f"gpt-3.5-turbo.", + ) + return "gpt-3.5-turbo" diff --git a/autogpt/main.py b/autogpt/main.py index 792496095..ac27e282d 100644 --- a/autogpt/main.py +++ b/autogpt/main.py @@ -47,6 +47,7 @@ def run_auto_gpt( cfg = Config() # TODO: fill in llm values here check_openai_api_key() + create_config( continuous, continuous_limit, diff --git a/tests/test_api_manager.py b/tests/test_api_manager.py index 3d0672c11..bfb842130 100644 --- a/tests/test_api_manager.py +++ b/tests/test_api_manager.py @@ -118,3 +118,13 @@ class TestApiManager: assert api_manager.get_total_prompt_tokens() == 50 assert api_manager.get_total_completion_tokens() == 100 assert api_manager.get_total_cost() == (50 * 0.002 + 100 * 0.002) / 1000 + + @staticmethod + def test_get_models(): + """Test if getting models works correctly.""" + with patch("openai.Model.list") as mock_list_models: + mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]} + result = api_manager.get_models() + + assert result[0]["id"] == "gpt-3.5-turbo" + assert api_manager.models[0]["id"] == "gpt-3.5-turbo" diff --git a/tests/test_config.py b/tests/test_config.py index e1fc67454..0f38b28cd 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -2,10 +2,11 @@ Test cases for the Config class, which handles the configuration settings for the AI and ensures it behaves as a singleton. """ +from unittest.mock import patch -import pytest +from openai import InvalidRequestError -from autogpt.config import Config +from autogpt.configurator import create_config def test_initial_values(config): @@ -117,3 +118,40 @@ def test_set_debug_mode(config): # Reset debug mode config.set_debug_mode(debug_mode) + + +@patch("openai.Model.list") +def test_smart_and_fast_llm_models_set_to_gpt4(mock_list_models, config): + """ + Test if models update to gpt-3.5-turbo if both are set to gpt-4. + """ + fast_llm_model = config.fast_llm_model + smart_llm_model = config.smart_llm_model + + config.fast_llm_model = "gpt-4" + config.smart_llm_model = "gpt-4" + + mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]} + + create_config( + continuous=False, + continuous_limit=False, + ai_settings_file="", + prompt_settings_file="", + skip_reprompt=False, + speak=False, + debug=False, + gpt3only=False, + gpt4only=False, + memory_type="", + browser_name="", + allow_downloads=False, + skip_news=False, + ) + + assert config.fast_llm_model == "gpt-3.5-turbo" + assert config.smart_llm_model == "gpt-3.5-turbo" + + # Reset config + config.set_fast_llm_model(fast_llm_model) + config.set_smart_llm_model(smart_llm_model) diff --git a/tests/unit/test_llm_utils.py b/tests/unit/test_llm_utils.py index be36dc090..e11b368e1 100644 --- a/tests/unit/test_llm_utils.py +++ b/tests/unit/test_llm_utils.py @@ -1,7 +1,11 @@ +from unittest.mock import patch + import pytest +from openai import InvalidRequestError from openai.error import APIError, RateLimitError from autogpt.llm import llm_utils +from autogpt.llm.llm_utils import check_model @pytest.fixture(params=[RateLimitError, APIError]) @@ -131,3 +135,26 @@ def test_chunked_tokens(): ] output = list(llm_utils.chunked_tokens(text, "cl100k_base", 8191)) assert output == expected_output + + +def test_check_model(api_manager): + """ + Test if check_model() returns original model when valid. + Test if check_model() returns gpt-3.5-turbo when model is invalid. + """ + with patch("openai.Model.list") as mock_list_models: + # Test when correct model is returned + mock_list_models.return_value = {"data": [{"id": "gpt-4"}]} + result = check_model("gpt-4", "smart_llm_model") + assert result == "gpt-4" + + # Reset api manager models + api_manager.models = None + + # Test when incorrect model is returned + mock_list_models.return_value = {"data": [{"id": "gpt-3.5-turbo"}]} + result = check_model("gpt-4", "fast_llm_model") + assert result == "gpt-3.5-turbo" + + # Reset api manager models + api_manager.models = None