diff --git a/.env.template b/.env.template index eeff2907c..209a29b96 100644 --- a/.env.template +++ b/.env.template @@ -5,8 +5,6 @@ EXECUTE_LOCAL_COMMANDS=False # BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory BROWSE_CHUNK_MAX_LENGTH=8192 -# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website -BROWSE_SUMMARY_MAX_TOKEN=300 # USER_AGENT - Define the user-agent used by the requests library to browse website (string) # USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36" # AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml) diff --git a/autogpt/config/config.py b/autogpt/config/config.py index fe6f4f325..a8b48b492 100644 --- a/autogpt/config/config.py +++ b/autogpt/config/config.py @@ -33,7 +33,6 @@ class Config(metaclass=Singleton): self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000)) self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000)) self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192)) - self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300)) self.openai_api_key = os.getenv("OPENAI_API_KEY") self.temperature = float(os.getenv("TEMPERATURE", "1")) @@ -188,10 +187,6 @@ class Config(metaclass=Singleton): """Set the browse_website command chunk max length value.""" self.browse_chunk_max_length = value - def set_browse_summary_max_token(self, value: int) -> None: - """Set the browse_website command summary max token value.""" - self.browse_summary_max_token = value - def set_openai_api_key(self, value: str) -> None: """Set the OpenAI API key value.""" self.openai_api_key = value diff --git a/autogpt/processing/text.py b/autogpt/processing/text.py index d30036d87..657b0b0eb 100644 --- a/autogpt/processing/text.py +++ b/autogpt/processing/text.py @@ -78,7 +78,6 @@ def summarize_text( summary = create_chat_completion( model=CFG.fast_llm_model, messages=messages, - max_tokens=CFG.browse_summary_max_token, ) summaries.append(summary) print(f"Added chunk {i + 1} summary to memory") @@ -95,7 +94,6 @@ def summarize_text( return create_chat_completion( model=CFG.fast_llm_model, messages=messages, - max_tokens=CFG.browse_summary_max_token, )