Adjust test_prompt_generator and add test report generation

pull/1609/head
younessZMZ 2023-04-15 15:10:42 +00:00
parent 5dfdb2e2a9
commit 0c1ff5d6a4
2 changed files with 50 additions and 23 deletions

View File

@ -1,8 +1,20 @@
import unittest import unittest
import coverage
if __name__ == "__main__": if __name__ == "__main__":
# Start coverage collection
cov = coverage.Coverage()
cov.start()
# Load all tests from the 'autogpt/tests' package # Load all tests from the 'autogpt/tests' package
suite = unittest.defaultTestLoader.discover("autogpt/tests") suite = unittest.defaultTestLoader.discover("./tests")
# Run the tests # Run the tests
unittest.TextTestRunner().run(suite) unittest.TextTestRunner().run(suite)
# Stop coverage collection
cov.stop()
cov.save()
# Report the coverage
cov.report(show_missing=True)

View File

@ -1,25 +1,35 @@
# Import the required libraries for unit testing from unittest import TestCase
import os
import sys
import unittest
from autogpt.promptgenerator import PromptGenerator from autogpt.promptgenerator import PromptGenerator
# Create a test class for the PromptGenerator, subclassed from unittest.TestCase class TestPromptGenerator(TestCase):
class promptgenerator_tests(unittest.TestCase): """
# Set up the initial state for each test method by creating an instance of PromptGenerator Test cases for the PromptGenerator class, which is responsible for generating
def setUp(self): prompts for the AI with constraints, commands, resources, and performance evaluations.
self.generator = PromptGenerator() """
@classmethod
def setUpClass(cls):
"""
Set up the initial state for each test method by creating an instance of PromptGenerator.
"""
cls.generator = PromptGenerator()
# Test whether the add_constraint() method adds a constraint to the generator's constraints list # Test whether the add_constraint() method adds a constraint to the generator's constraints list
def test_add_constraint(self): def test_add_constraint(self):
"""
Test if the add_constraint() method adds a constraint to the generator's constraints list.
"""
constraint = "Constraint1" constraint = "Constraint1"
self.generator.add_constraint(constraint) self.generator.add_constraint(constraint)
self.assertIn(constraint, self.generator.constraints) self.assertIn(constraint, self.generator.constraints)
# Test whether the add_command() method adds a command to the generator's commands list # Test whether the add_command() method adds a command to the generator's commands list
def test_add_command(self): def test_add_command(self):
"""
Test if the add_command() method adds a command to the generator's commands list.
"""
command_label = "Command Label" command_label = "Command Label"
command_name = "command_name" command_name = "command_name"
args = {"arg1": "value1", "arg2": "value2"} args = {"arg1": "value1", "arg2": "value2"}
@ -31,20 +41,29 @@ class promptgenerator_tests(unittest.TestCase):
} }
self.assertIn(command, self.generator.commands) self.assertIn(command, self.generator.commands)
# Test whether the add_resource() method adds a resource to the generator's resources list
def test_add_resource(self): def test_add_resource(self):
"""
Test if the add_resource() method adds a resource to the generator's resources list.
"""
resource = "Resource1" resource = "Resource1"
self.generator.add_resource(resource) self.generator.add_resource(resource)
self.assertIn(resource, self.generator.resources) self.assertIn(resource, self.generator.resources)
# Test whether the add_performance_evaluation() method adds an evaluation to the generator's performance_evaluation list
def test_add_performance_evaluation(self): def test_add_performance_evaluation(self):
"""
Test if the add_performance_evaluation() method adds an evaluation to the generator's
performance_evaluation list.
"""
evaluation = "Evaluation1" evaluation = "Evaluation1"
self.generator.add_performance_evaluation(evaluation) self.generator.add_performance_evaluation(evaluation)
self.assertIn(evaluation, self.generator.performance_evaluation) self.assertIn(evaluation, self.generator.performance_evaluation)
# Test whether the generate_prompt_string() method generates a prompt string with all the added constraints, commands, resources and evaluations
def test_generate_prompt_string(self): def test_generate_prompt_string(self):
"""
Test if the generate_prompt_string() method generates a prompt string with all the added
constraints, commands, resources, and evaluations.
"""
# Define the test data
constraints = ["Constraint1", "Constraint2"] constraints = ["Constraint1", "Constraint2"]
commands = [ commands = [
{ {
@ -61,7 +80,7 @@ class promptgenerator_tests(unittest.TestCase):
resources = ["Resource1", "Resource2"] resources = ["Resource1", "Resource2"]
evaluations = ["Evaluation1", "Evaluation2"] evaluations = ["Evaluation1", "Evaluation2"]
# Add all the constraints, commands, resources, and evaluations to the generator # Add test data to the generator
for constraint in constraints: for constraint in constraints:
self.generator.add_constraint(constraint) self.generator.add_constraint(constraint)
for command in commands: for command in commands:
@ -76,24 +95,20 @@ class promptgenerator_tests(unittest.TestCase):
# Generate the prompt string and verify its correctness # Generate the prompt string and verify its correctness
prompt_string = self.generator.generate_prompt_string() prompt_string = self.generator.generate_prompt_string()
self.assertIsNotNone(prompt_string) self.assertIsNotNone(prompt_string)
# Check if all constraints, commands, resources, and evaluations are present in the prompt string
for constraint in constraints: for constraint in constraints:
self.assertIn(constraint, prompt_string) self.assertIn(constraint, prompt_string)
for command in commands: for command in commands:
self.assertIn(command["name"], prompt_string) self.assertIn(command["name"], prompt_string)
# Check for each key-value pair in the command args dictionary
for key, value in command["args"].items(): for key, value in command["args"].items():
self.assertIn(f'"{key}": "{value}"', prompt_string) self.assertIn(f'"{key}": "{value}"', prompt_string)
for resource in resources: for resource in resources:
self.assertIn(resource, prompt_string) self.assertIn(resource, prompt_string)
for evaluation in evaluations: for evaluation in evaluations:
self.assertIn(evaluation, prompt_string) self.assertIn(evaluation, prompt_string)
self.assertIn("constraints", prompt_string.lower()) self.assertIn("constraints", prompt_string.lower())
self.assertIn("commands", prompt_string.lower()) self.assertIn("commands", prompt_string.lower())
self.assertIn("resources", prompt_string.lower()) self.assertIn("resources", prompt_string.lower())
self.assertIn("performance evaluation", prompt_string.lower()) self.assertIn("performance evaluation", prompt_string.lower())
# Run the tests when this script is executed
if __name__ == "__main__":
unittest.main()