From 0c1ff5d6a48f47923445ec3b947a8ea7f1ddfa29 Mon Sep 17 00:00:00 2001 From: younessZMZ Date: Sat, 15 Apr 2023 15:10:42 +0000 Subject: [PATCH] Adjust test_prompt_generator and add test report generation --- tests.py | 14 ++++- ...ator_tests.py => test_prompt_generator.py} | 59 ++++++++++++------- 2 files changed, 50 insertions(+), 23 deletions(-) rename tests/{promptgenerator_tests.py => test_prompt_generator.py} (67%) diff --git a/tests.py b/tests.py index 46f802f68..67ba1c8eb 100644 --- a/tests.py +++ b/tests.py @@ -1,8 +1,20 @@ import unittest +import coverage if __name__ == "__main__": + # Start coverage collection + cov = coverage.Coverage() + cov.start() + # Load all tests from the 'autogpt/tests' package - suite = unittest.defaultTestLoader.discover("autogpt/tests") + suite = unittest.defaultTestLoader.discover("./tests") # Run the tests unittest.TextTestRunner().run(suite) + + # Stop coverage collection + cov.stop() + cov.save() + + # Report the coverage + cov.report(show_missing=True) diff --git a/tests/promptgenerator_tests.py b/tests/test_prompt_generator.py similarity index 67% rename from tests/promptgenerator_tests.py rename to tests/test_prompt_generator.py index b700af497..6a0bfd6c7 100644 --- a/tests/promptgenerator_tests.py +++ b/tests/test_prompt_generator.py @@ -1,25 +1,35 @@ -# Import the required libraries for unit testing -import os -import sys -import unittest +from unittest import TestCase from autogpt.promptgenerator import PromptGenerator -# Create a test class for the PromptGenerator, subclassed from unittest.TestCase -class promptgenerator_tests(unittest.TestCase): - # Set up the initial state for each test method by creating an instance of PromptGenerator - def setUp(self): - self.generator = PromptGenerator() +class TestPromptGenerator(TestCase): + """ + Test cases for the PromptGenerator class, which is responsible for generating + prompts for the AI with constraints, commands, resources, and performance evaluations. + """ + + @classmethod + def setUpClass(cls): + """ + Set up the initial state for each test method by creating an instance of PromptGenerator. + """ + cls.generator = PromptGenerator() # Test whether the add_constraint() method adds a constraint to the generator's constraints list def test_add_constraint(self): + """ + Test if the add_constraint() method adds a constraint to the generator's constraints list. + """ constraint = "Constraint1" self.generator.add_constraint(constraint) self.assertIn(constraint, self.generator.constraints) # Test whether the add_command() method adds a command to the generator's commands list def test_add_command(self): + """ + Test if the add_command() method adds a command to the generator's commands list. + """ command_label = "Command Label" command_name = "command_name" args = {"arg1": "value1", "arg2": "value2"} @@ -31,20 +41,29 @@ class promptgenerator_tests(unittest.TestCase): } self.assertIn(command, self.generator.commands) - # Test whether the add_resource() method adds a resource to the generator's resources list def test_add_resource(self): + """ + Test if the add_resource() method adds a resource to the generator's resources list. + """ resource = "Resource1" self.generator.add_resource(resource) self.assertIn(resource, self.generator.resources) - # Test whether the add_performance_evaluation() method adds an evaluation to the generator's performance_evaluation list def test_add_performance_evaluation(self): + """ + Test if the add_performance_evaluation() method adds an evaluation to the generator's + performance_evaluation list. + """ evaluation = "Evaluation1" self.generator.add_performance_evaluation(evaluation) self.assertIn(evaluation, self.generator.performance_evaluation) - # Test whether the generate_prompt_string() method generates a prompt string with all the added constraints, commands, resources and evaluations def test_generate_prompt_string(self): + """ + Test if the generate_prompt_string() method generates a prompt string with all the added + constraints, commands, resources, and evaluations. + """ + # Define the test data constraints = ["Constraint1", "Constraint2"] commands = [ { @@ -61,7 +80,7 @@ class promptgenerator_tests(unittest.TestCase): resources = ["Resource1", "Resource2"] evaluations = ["Evaluation1", "Evaluation2"] - # Add all the constraints, commands, resources, and evaluations to the generator + # Add test data to the generator for constraint in constraints: self.generator.add_constraint(constraint) for command in commands: @@ -76,24 +95,20 @@ class promptgenerator_tests(unittest.TestCase): # Generate the prompt string and verify its correctness prompt_string = self.generator.generate_prompt_string() self.assertIsNotNone(prompt_string) + + # Check if all constraints, commands, resources, and evaluations are present in the prompt string for constraint in constraints: self.assertIn(constraint, prompt_string) for command in commands: self.assertIn(command["name"], prompt_string) - - # Check for each key-value pair in the command args dictionary - for key, value in command["args"].items(): - self.assertIn(f'"{key}": "{value}"', prompt_string) + for key, value in command["args"].items(): + self.assertIn(f'"{key}": "{value}"', prompt_string) for resource in resources: self.assertIn(resource, prompt_string) for evaluation in evaluations: self.assertIn(evaluation, prompt_string) + self.assertIn("constraints", prompt_string.lower()) self.assertIn("commands", prompt_string.lower()) self.assertIn("resources", prompt_string.lower()) self.assertIn("performance evaluation", prompt_string.lower()) - - -# Run the tests when this script is executed -if __name__ == "__main__": - unittest.main()