Unbound summary size

Signed-off-by: Tom Kaitchuck <Tom.Kaitchuck@gmail.com>
pull/2203/head
Tom Kaitchuck 2023-04-16 01:53:24 -07:00 committed by Pi
parent 23e7031326
commit 6b64158356
3 changed files with 0 additions and 9 deletions

View File

@ -5,8 +5,6 @@
EXECUTE_LOCAL_COMMANDS=False
# BROWSE_CHUNK_MAX_LENGTH - When browsing website, define the length of chunk stored in memory
BROWSE_CHUNK_MAX_LENGTH=8192
# BROWSE_SUMMARY_MAX_TOKEN - Define the maximum length of the summary generated by GPT agent when browsing website
BROWSE_SUMMARY_MAX_TOKEN=300
# USER_AGENT - Define the user-agent used by the requests library to browse website (string)
# USER_AGENT="Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36"
# AI_SETTINGS_FILE - Specifies which AI Settings file to use (defaults to ai_settings.yaml)

View File

@ -33,7 +33,6 @@ class Config(metaclass=Singleton):
self.fast_token_limit = int(os.getenv("FAST_TOKEN_LIMIT", 4000))
self.smart_token_limit = int(os.getenv("SMART_TOKEN_LIMIT", 8000))
self.browse_chunk_max_length = int(os.getenv("BROWSE_CHUNK_MAX_LENGTH", 8192))
self.browse_summary_max_token = int(os.getenv("BROWSE_SUMMARY_MAX_TOKEN", 300))
self.openai_api_key = os.getenv("OPENAI_API_KEY")
self.temperature = float(os.getenv("TEMPERATURE", "1"))
@ -188,10 +187,6 @@ class Config(metaclass=Singleton):
"""Set the browse_website command chunk max length value."""
self.browse_chunk_max_length = value
def set_browse_summary_max_token(self, value: int) -> None:
"""Set the browse_website command summary max token value."""
self.browse_summary_max_token = value
def set_openai_api_key(self, value: str) -> None:
"""Set the OpenAI API key value."""
self.openai_api_key = value

View File

@ -78,7 +78,6 @@ def summarize_text(
summary = create_chat_completion(
model=CFG.fast_llm_model,
messages=messages,
max_tokens=CFG.browse_summary_max_token,
)
summaries.append(summary)
print(f"Added chunk {i + 1} summary to memory")
@ -95,7 +94,6 @@ def summarize_text(
return create_chat_completion(
model=CFG.fast_llm_model,
messages=messages,
max_tokens=CFG.browse_summary_max_token,
)