diff --git a/arena/AwareAgent.json b/arena/AwareAgent.json new file mode 100644 index 000000000..fe7f44875 --- /dev/null +++ b/arena/AwareAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/LuisLechugaRuiz/AwareAgent", + "timestamp": "2023-10-17T14:10:03.198917", + "commit_hash_to_benchmark": "1eadc64dc0a693c7c9de77ddaef857f3a36f7950", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Bagi_agent.json b/arena/Bagi_agent.json new file mode 100644 index 000000000..4251bb424 --- /dev/null +++ b/arena/Bagi_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git", + "timestamp": "2023-10-20T09:21:48.837635", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/LAWYER_EMAD.json b/arena/LAWYER_EMAD.json new file mode 100644 index 000000000..5d84d0872 --- /dev/null +++ b/arena/LAWYER_EMAD.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/emads7/AutoGPT.git", + "timestamp": "2023-10-19T15:06:37.481038", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/MyExample.json b/arena/MyExample.json new file mode 100644 index 000000000..508515aed --- /dev/null +++ b/arena/MyExample.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/gabenitez/AutoGPT", + "timestamp": "2023-10-19T22:00:47.453159", + "commit_hash_to_benchmark": "b4588f6425912316e1512391e4392ca30d61e144", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/Pumu2_agent.json b/arena/Pumu2_agent.json new file mode 100644 index 000000000..52510f0b0 --- /dev/null +++ b/arena/Pumu2_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/xpineda/AutoGPT_xabyvng.git", + "timestamp": "2023-10-20T09:26:07.885410", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ResearchAgent.json b/arena/ResearchAgent.json new file mode 100644 index 000000000..c04a6b579 --- /dev/null +++ b/arena/ResearchAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Umar-Azam/AutoGPT-ResearchAgent", + "timestamp": "2023-10-20T06:08:12.933685", + "commit_hash_to_benchmark": "9219bfba0e028a557109b8e39c0fd91c1df243f8", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/UGYUJI.json b/arena/UGYUJI.json new file mode 100644 index 000000000..2d0abc304 --- /dev/null +++ b/arena/UGYUJI.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ugyuji/AutoGPT", + "timestamp": "2023-10-20T04:42:28.397067", + "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/UniAgent.json b/arena/UniAgent.json new file mode 100644 index 000000000..19d710fa2 --- /dev/null +++ b/arena/UniAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/JovanKanevche/AutoGPT", + "timestamp": "2023-10-19T17:04:49.626683", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/WYC.json b/arena/WYC.json new file mode 100644 index 000000000..0620b0aab --- /dev/null +++ b/arena/WYC.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/131250208/AutoGPT_YC", + "timestamp": "2023-10-20T07:42:11.493899", + "commit_hash_to_benchmark": "9219bfba0e028a557109b8e39c0fd91c1df243f8", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/YoudaoAutoGPT.json b/arena/YoudaoAutoGPT.json new file mode 100644 index 000000000..8e81970eb --- /dev/null +++ b/arena/YoudaoAutoGPT.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jiezhangGt/AutoGPT", + "timestamp": "2023-10-20T03:02:17.342168", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/ZJgpt.json b/arena/ZJgpt.json new file mode 100644 index 000000000..0ac3d2567 --- /dev/null +++ b/arena/ZJgpt.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/jiezhangGt/AutoGPT", + "timestamp": "2023-10-20T04:04:28.198603", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/agsCehAgent.json b/arena/agsCehAgent.json new file mode 100644 index 000000000..e628e79a3 --- /dev/null +++ b/arena/agsCehAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/albags/AutoGPT.git", + "timestamp": "2023-10-19T11:30:12.759675", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/contentstrategy.json b/arena/contentstrategy.json new file mode 100644 index 000000000..891432676 --- /dev/null +++ b/arena/contentstrategy.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/banderson12/AutoGPT", + "timestamp": "2023-10-19T20:13:23.530323", + "commit_hash_to_benchmark": "b4588f6425912316e1512391e4392ca30d61e144", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/devagent.json b/arena/devagent.json new file mode 100644 index 000000000..f65809e14 --- /dev/null +++ b/arena/devagent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/w6m6/kkgpt", + "timestamp": "2023-10-20T08:29:25.708364", + "commit_hash_to_benchmark": "052802ff8d9354f23620eb8b6a5fd68cda7e5c0e", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/hello.json b/arena/hello.json new file mode 100644 index 000000000..44d8836c8 --- /dev/null +++ b/arena/hello.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/ldnvnbl/AutoGPT", + "timestamp": "2023-10-20T09:37:16.860422", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/testAgent.json b/arena/testAgent.json new file mode 100644 index 000000000..02c5b1b84 --- /dev/null +++ b/arena/testAgent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/Nilllas/AutoGPT", + "timestamp": "2023-10-20T11:27:15.343842", + "commit_hash_to_benchmark": "2187f66149ffa4bb99f9ca6a11b592fe4d683791", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/arena/woohoo_agent.json b/arena/woohoo_agent.json new file mode 100644 index 000000000..a805c3498 --- /dev/null +++ b/arena/woohoo_agent.json @@ -0,0 +1,6 @@ +{ + "github_repo_url": "https://github.com/FIresInWind/AutoGPT", + "timestamp": "2023-10-19T15:14:59.786203", + "commit_hash_to_benchmark": "4b1e8f6e8b4186ec6563301c146fbf3425f92715", + "branch_to_benchmark": "master" +} \ No newline at end of file diff --git a/autogpts/forge/README.md b/autogpts/forge/README.md index 7e696b852..2e6840c46 100644 --- a/autogpts/forge/README.md +++ b/autogpts/forge/README.md @@ -22,10 +22,3 @@ The getting started [tutorial series](https://aiedge.medium.com/autogpt-forge-e3 4. [AutoGPT Forge: Crafting Intelligent Agent Logic](https://medium.com/@aiedge/autogpt-forge-crafting-intelligent-agent-logic-bc5197b14cb4) -Coming soon: - - -3. Interacting with and Benchmarking your Agent -4. Abilities -5. The Planning Loop -6. Memories diff --git a/autogpts/forge/tutorials/wip_004_benchmarking.md b/autogpts/forge/tutorials/wip_004_benchmarking.md deleted file mode 100644 index 68cbd3dfd..000000000 --- a/autogpts/forge/tutorials/wip_004_benchmarking.md +++ /dev/null @@ -1,37 +0,0 @@ -# Harnessing the Power of Test-Driven Development with AGBenchmark - -## Introduction -- Understanding Test-Driven Development (TDD) -- Importance of Benchmarking in Agent Development - -## Section 1: Introduction to AGBenchmark -- Overview of AGBenchmark -- Setting up AGBenchmark in the Forge Environment - -## Section 2: Benchmarking with AGBenchmark -- Understanding Benchmark Categories and Tests -- Using AGBenchmark Commands to List and Start Tests - -## Section 3: Writing Tests for Your Agent -- Creating Benchmark Tests -- Structuring Test Cases and Scenarios - -## Section 4: Running and Analyzing Benchmark Tests -- Executing Benchmark Tests using CLI -- Analyzing Benchmark Results and Feedback - -## Section 5: Continuous Benchmarking -- Integrating Benchmarking into Development Workflow -- Automating Benchmark Testing - -## Conclusion -- Recap of the Tutorial -- Enhancing Your Agent through Continuous Benchmarking - -## Additional Resources -- Links to AGBenchmark Documentation -- Community Forums and Discussions on Benchmarking - -## Appendix -- Troubleshooting Common Benchmarking Issues -- Glossary of Benchmarking Terms diff --git a/autogpts/forge/tutorials/wip_005_adding_abilities.md b/autogpts/forge/tutorials/wip_005_adding_abilities.md deleted file mode 100644 index 1ab4cf7eb..000000000 --- a/autogpts/forge/tutorials/wip_005_adding_abilities.md +++ /dev/null @@ -1,59 +0,0 @@ -# Ability Acquisition: Enhancing Your Agent's Capabilities - -## Introduction -- Understanding the Importance of Ability Acquisition -- The Concept of Abilities in AutoGPT - -## Section 1: Identifying Necessary Abilities -- Analyzing the Requirements for Your Agent -- Categorizing Abilities: Core vs. Supplementary - -## Section 2: Developing Abilities for Your Agent -- Integrating Existing Abilities from the Forge -- Developing Custom Abilities: A Step-by-step Guide - -## Section 3: Implementing and Executing Abilities -- Utilizing the Agent Protocol for Ability Implementation -- Executing Abilities: Task and Step Execution -- Example: Developing and Executing an Ability using Task and Step Schemas - -## Section 4: Encoding Abilities in Prompts for LLM Selection -- Understanding the Concept of Prompt Engineering -- Strategies for Effective Ability Encoding in Prompts -- Practical Examples: Encoding Various Abilities in Prompts - -## Section 5: Testing and Debugging Abilities -- Employing Test-Driven Development for Ability Testing -- Debugging Common Issues in Ability Implementation - -## Conclusion -- Recap of the Tutorial -- Preparing Your Agent for Ability Integration and Enhancement - -## Additional Resources - -From **The Rise and Potential of Large Language Model Based Agents: A Survey** *Zhiheng Xi (Fudan University) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.14497)] [[code](https://github.com/woooodyy/llm-agent-paper-list)] -### Research Papers -- [2023/07] **ToolLLM: Facilitating Large Language Models to Master 16000+ Real-world APIs.** *Yujia Qin et al. arXiv.* [[paper](https://arxiv.org/abs/2307.16789)] [[code](https://github.com/openbmb/toolbench)] [[dataset](https://paperswithcode.com/dataset/toolbench)] -- [2023/05] **Large Language Models as Tool Makers.** *Tianle Cai et al. arXiv.* [[paper](https://arxiv.org/abs/2305.17126)] [[code](https://github.com/ctlllll/llm-toolmaker)] -- [2023/05] **CREATOR: Disentangling Abstract and Concrete Reasonings of Large Language Models through Tool Creation.** *Cheng Qian et al. arXiv.* [[paper](https://arxiv.org/abs/2305.14318)] -- [2023/04] **Tool Learning with Foundation Models.** *Yujia Qin et al. arXiv.* [[paper](https://arxiv.org/abs/2304.08354)] [[code](https://github.com/openbmb/bmtools)] -- [2023/04] **ChemCrow: Augmenting large-language models with chemistry tools.** *Andres M Bran (Laboratory of Artificial Chemical Intelligence, ISIC, EPFL) et al. arXiv.* [[paper](https://arxiv.org/abs/2304.05376)] [[code](https://github.com/ur-whitelab/chemcrow-public)] -- [2023/04] **GeneGPT: Augmenting Large Language Models with Domain Tools for Improved Access to Biomedical Information.** *Qiao Jin, Yifan Yang, Qingyu Chen, Zhiyong Lu. arXiv.* [[paper](https://arxiv.org/abs/2304.09667)] [[code](https://github.com/ncbi/GeneGPT)] -- [2023/04] **OpenAGI: When LLM Meets Domain Experts.** *Yingqiang Ge et al. arXiv.* [[paper](https://arxiv.org/abs/2304.04370)] [[code](https://github.com/agiresearch/openagi)] -- [2023/03] **HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face.** *Yongliang Shen et al. arXiv.* [[paper](https://arxiv.org/abs/2303.17580)] [[code](https://github.com/microsoft/JARVIS)] -- [2023/03] **Visual ChatGPT: Talking, Drawing and Editing with Visual Foundation Models.** *Chenfei Wu et al. arXiv.* [[paper](https://arxiv.org/abs/2303.04671)] [[code](https://github.com/microsoft/visual-chatgpt)] -- [2023/02] **Augmented Language Models: a Survey.** *Grégoire Mialon et al. arXiv.* [[paper](https://arxiv.org/abs/2302.07842)] -- [2023/02] **Toolformer: Language Models Can Teach Themselves to Use Tools.** *Timo Schick et al. arXiv.* [[paper](https://arxiv.org/abs/2302.04761)] -- [2022/05] **TALM: Tool Augmented Language Models.** *Aaron Parisi et al. arXiv.* [[paper](https://arxiv.org/abs/2205.12255)] -- [2022/05] **MRKL Systems: A modular, neuro-symbolic architecture that combines large language models, external knowledge sources and discrete reasoning.** *Ehud Karpas et al. arXiv.* [[paper](https://arxiv.org/abs/2205.00445)] -- [2022/04] **Do As I Can, Not As I Say: Grounding Language in Robotic Affordances.** *Michael Ahn et al. arXiv.* [[paper](https://arxiv.org/abs/2204.01691)] -- [2021/12] **WebGPT: Browser-assisted question-answering with human feedback.** *Reiichiro Nakano et al. arXiv.* [[paper](https://arxiv.org/abs/2112.09332)] -- [2021/07] **Evaluating Large Language Models Trained on Code.** *Mark Chen et al. arXiv.* [[paper](https://arxiv.org/abs/2107.03374)] [[code](https://github.com/openai/human-eval)] - - - -## Appendix -- Examples of Ability Implementations -- Glossary of Ability-Related Terms - diff --git a/autogpts/forge/tutorials/wip_006_planning_loop.md b/autogpts/forge/tutorials/wip_006_planning_loop.md deleted file mode 100644 index cf2540363..000000000 --- a/autogpts/forge/tutorials/wip_006_planning_loop.md +++ /dev/null @@ -1,80 +0,0 @@ -# Mastering the Agent Planning Loop: Strategies for Effective Development - -## Introduction -- Understanding the Agent Planning Loop -- Significance of Effective Planning in Agent Development - -## Section 1: Concepts of Agent Planning Loop -- The Structure of an Agent Planning Loop -- Key Components and Functions - -## Section 2: Developing an Effective Planning Strategy -- Setting Goals and Objectives -- Identifying Tasks and Steps within the Planning Loop - -## Section 3: Implementing the Planning Loop -- Coding the Planning Loop in the Forge Environment -- Utilizing the Agent Protocol APIs - -## Section 4: Testing and Optimization -- Test-Driven Development of the Planning Loop -- Optimizing the Planning Loop for Better Performance - -## Section 5: Best Practices -- Tips for Effective Planning Loop Implementation -- Common Pitfalls to Avoid - -## Conclusion -- Recap of the Tutorial -- Leveraging the Planning Loop for Advanced Agent Development - -## Additional Resources - -From **The Rise and Potential of Large Language Model Based Agents: A Survey** *Zhiheng Xi (Fudan University) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.14497)] [[code](https://github.com/woooodyy/llm-agent-paper-list)] - -### Reasoning - -- [2023/05] **Self-Polish: Enhance Reasoning in Large Language Models via Problem Refinement.** *Zhiheng Xi (Fudan University) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.14497)] [[code](https://github.com/woooodyy/self-polish)] - -- [2023-03] **Large Language Models are Zero-Shot Reasoners.** *Takeshi Kojima (The University of Tokyo) et al. arXiv.* [[paper](https://arxiv.org/abs/2205.11916)][[code](https://github.com/kojima-takeshi188/zero_shot_cot)] - -- [2023/03] **Self-Refine: Iterative Refinement with Self-Feedback.** *Aman Madaan (Carnegie Mellon University) et al. arXiv.* [[paper](https://arxiv.org/abs/2303.17651)] [[code](https://github.com/madaan/self-refine)] - -- [2022/05] **Selection-Inference: Exploiting Large Language Models for Interpretable Logical Reasoning.** *Antonia Creswell (DeepMind) et al. arXiv.* [[paper](https://arxiv.org/abs/2205.09712)] - -- [2022/03] **Self-Consistency Improves Chain of Thought Reasoning in Language Models.** *Xuezhi Wang(Google Research) et al. arXiv.* [[paper](https://arxiv.org/abs/2203.11171)] [[code](https://github.com/huggingface/transformers/tree/main/src/transformers/models/bart)] - -- [2022/01] **Chain-of-Thought Prompting Elicits Reasoning in Large Language Models.** *Jason Wei (Google Research,) et al. arXiv.* [[paper](https://arxiv.org/abs/2201.11903)] - - -### Planning - -#### Plan formulation - -- [2023/05] **Tree of Thoughts: Deliberate Problem Solving with Large Language Models.** *Shunyu Yao (Princeton University) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.10601)] [[code](https://github.com/princeton-nlp/tree-of-thought-llm)] -- [2023/05] **Plan, Eliminate, and Track -- Language Models are Good Teachers for Embodied Agents.** *Yue Wu(Carnegie Mellon University) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.02412)] -- [2023/05] **Reasoning with Language Model is Planning with World Model.** *Shibo Hao (UC San Diego) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.14992)] [[code](https://github.com/Ber666/RAP)] -- [2023/05] **SwiftSage: A Generative Agent with Fast and Slow Thinking for Complex Interactive Tasks.** *Bill Yuchen Lin (Allen Institute for Artificial Intelligence) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.17390)] [[code](https://github.com/yuchenlin/swiftsage)] -- [2023/04] **LLM+P: Empowering Large Language Models with Optimal Planning Proficiency.** *Bo Liu (University of Texas at Austin) et al. arXiv.* [[paper](https://arxiv.org/abs/2304.11477)] [[code](https://github.com/Cranial-XIX/llm-pddl)] -- [2023/03] **HuggingGPT: Solving AI Tasks with ChatGPT and its Friends in Hugging Face.** *Yongliang Shen (Microsoft Research Asia) et al. arXiv.* [[paper](https://arxiv.org/abs/2303.17580)] [[code](https://github.com/microsoft/JARVIS)] -- [2023/02] **Describe, Explain, Plan and Select: Interactive Planning with Large Language Models Enables Open-World Multi-Task Agents.** *ZiHao Wang (Peking University) et al. arXiv.* [[paper](https://arxiv.org/abs/2302.01560)] [[code](https://github.com/CraftJarvis/MC-Planner)] -- [2022/05] **Least-to-Most Prompting Enables Complex Reasoning in Large Language Models.** *Denny Zhou (Google Research) et al. arXiv.* [[paper](https://arxiv.org/abs/2205.10625)] -- [2022/05] **MRKL Systems: A modular, neuro-symbolic architecture that combines large language models, external knowledge sources and discrete reasoning.** *Ehud Karpas (AI21 Labs) et al. arXiv.* [[paper](https://arxiv.org/abs/2205.00445)] -- [2022/04] **Do As I Can, Not As I Say: Grounding Language in Robotic Affordances.** *Michael Ahn (Robotics at Google) et al. arXiv.* [[paper](https://arxiv.org/abs/2204.01691)] -- [2023/05] **Agents: An Open-source Framework for Autonomous Language Agents.** Wangchunshu Zhou (AIWaves) et al. arXiv.* [[paper](https://arxiv.org/pdf/2309.07870.pdf)] [[code](https://github.com/aiwaves-cn/agents)] - - -#### Plan reflection - -- [2023/08] **SelfCheck: Using LLMs to Zero-Shot Check Their Own Step-by-Step Reasoning.** *Ning Miao (University of Oxford) et al. arXiv.* [[paper](https://arxiv.org/abs/2308.00436)] [[code](https://github.com/NingMiao/SelfCheck)] -- [2023/05] **ChatCoT: Tool-Augmented Chain-of-Thought Reasoning on Chat-based Large Language Models.** *Zhipeng Chen (Renmin University of China) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.14323)] [[code](https://github.com/RUCAIBOX/ChatCoT)] -- [2023/05] **Voyager: An Open-Ended Embodied Agent with Large Language Models.** *Guanzhi Wang (NVIDA) et al. arXiv.* [[paper](https://arxiv.org/abs/2305.16291)] [[code](https://voyager.minedojo.org/)] -- [2023/03] **Chat with the Environment: Interactive Multimodal Perception Using Large Language Models.** *Xufeng Zhao (University Hamburg) et al. arXiv.* [[paper](https://arxiv.org/abs/2303.08268)] [[code](https://matcha-model.github.io/)] -- [2022/12] **LLM-Planner: Few-Shot Grounded Planning for Embodied Agents with Large Language Models.** *Chan Hee Song (The Ohio State University) et al. arXiv.* [[paper](https://arxiv.org/abs/2212.04088)] [[code](https://dki-lab.github.io/LLM-Planner/)] -- [2022/10] **ReAct: Synergizing Reasoning and Acting in Language Models.** *Shunyu Yao ( Princeton University) et al. arXiv.* [[paper](https://arxiv.org/abs/2210.03629)] [[code](https://react-lm.github.io/)] -- [2022/07] **Inner Monologue: Embodied Reasoning through Planning with Language Models.** *Wenlong Huang (Robotics at Google) et al. arXiv.* [[paper](https://arxiv.org/abs/2207.05608)] [[code](https://innermonologue.github.io/)] -- [2021/10] **AI Chains: Transparent and Controllable Human-AI Interaction by Chaining Large Language Model Prompts.** *Tongshuang Wu (University of Washington) et al. arXiv.* [[paper](https://arxiv.org/abs/2110.01691)] - -## Appendix -- Example Planning Loop Implementations -- Glossary of Planning Loop Terms diff --git a/benchmark/agbenchmark/challenges/verticals/scrape/6_find_cpo_microsoft/artifacts_out/output.txt b/benchmark/agbenchmark/challenges/verticals/scrape/6_find_autogpt_creator/artifacts_out/output.txt similarity index 100% rename from benchmark/agbenchmark/challenges/verticals/scrape/6_find_cpo_microsoft/artifacts_out/output.txt rename to benchmark/agbenchmark/challenges/verticals/scrape/6_find_autogpt_creator/artifacts_out/output.txt diff --git a/benchmark/agbenchmark/challenges/verticals/scrape/6_find_cpo_microsoft/data.json b/benchmark/agbenchmark/challenges/verticals/scrape/6_find_autogpt_creator/data.json similarity index 94% rename from benchmark/agbenchmark/challenges/verticals/scrape/6_find_cpo_microsoft/data.json rename to benchmark/agbenchmark/challenges/verticals/scrape/6_find_autogpt_creator/data.json index 112df0cb8..c3c069f2e 100644 --- a/benchmark/agbenchmark/challenges/verticals/scrape/6_find_cpo_microsoft/data.json +++ b/benchmark/agbenchmark/challenges/verticals/scrape/6_find_autogpt_creator/data.json @@ -17,8 +17,7 @@ "output.txt" ], "should_contain": [ - "Scotland", - "scotland" + "cotland" ], "should_not_contain": [] },