diff --git a/autogpt_platform/backend/.env.example b/autogpt_platform/backend/.env.example
index eb4801c28..f0804681b 100644
--- a/autogpt_platform/backend/.env.example
+++ b/autogpt_platform/backend/.env.example
@@ -141,6 +141,9 @@ EXA_API_KEY=
 # E2B
 E2B_API_KEY=
 
+# Mem0
+MEM0_API_KEY=
+
 # Nvidia
 NVIDIA_API_KEY=
 
diff --git a/autogpt_platform/backend/backend/blocks/basic.py b/autogpt_platform/backend/backend/blocks/basic.py
index caacaf898..63aea3488 100644
--- a/autogpt_platform/backend/backend/blocks/basic.py
+++ b/autogpt_platform/backend/backend/blocks/basic.py
@@ -508,6 +508,48 @@ class AddToListBlock(Block):
         yield "updated_list", updated_list
 
 
+class FindInListBlock(Block):
+    class Input(BlockSchema):
+        list: List[Any] = SchemaField(description="The list to search in.")
+        value: Any = SchemaField(description="The value to search for.")
+
+    class Output(BlockSchema):
+        index: int = SchemaField(description="The index of the value in the list.")
+        found: bool = SchemaField(
+            description="Whether the value was found in the list."
+        )
+        not_found_value: Any = SchemaField(
+            description="The value that was not found in the list."
+        )
+
+    def __init__(self):
+        super().__init__(
+            id="5e2c6d0a-1e37-489f-b1d0-8e1812b23333",
+            description="Finds the index of the value in the list.",
+            categories={BlockCategory.BASIC},
+            input_schema=FindInListBlock.Input,
+            output_schema=FindInListBlock.Output,
+            test_input=[
+                {"list": [1, 2, 3, 4, 5], "value": 3},
+                {"list": [1, 2, 3, 4, 5], "value": 6},
+            ],
+            test_output=[
+                ("index", 2),
+                ("found", True),
+                ("found", False),
+                ("not_found_value", 6),
+            ],
+        )
+
+    def run(self, input_data: Input, **kwargs) -> BlockOutput:
+        try:
+            yield "index", input_data.list.index(input_data.value)
+            yield "found", True
+        except ValueError:
+            yield "found", False
+            yield "not_found_value", input_data.value
+
+
 class NoteBlock(Block):
     class Input(BlockSchema):
         text: str = SchemaField(description="The text to display in the sticky note.")
diff --git a/autogpt_platform/backend/backend/blocks/mem0.py b/autogpt_platform/backend/backend/blocks/mem0.py
new file mode 100644
index 000000000..90245e0e0
--- /dev/null
+++ b/autogpt_platform/backend/backend/blocks/mem0.py
@@ -0,0 +1,338 @@
+from typing import Any, Literal, Optional, Union
+
+from mem0 import MemoryClient
+from pydantic import BaseModel, SecretStr
+
+from backend.data.block import Block, BlockOutput, BlockSchema
+from backend.data.model import (
+    APIKeyCredentials,
+    CredentialsField,
+    CredentialsMetaInput,
+    SchemaField,
+)
+from backend.integrations.providers import ProviderName
+
+TEST_CREDENTIALS = APIKeyCredentials(
+    id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
+    provider="mem0",
+    api_key=SecretStr("mock-mem0-api-key"),
+    title="Mock Mem0 API key",
+    expires_at=None,
+)
+
+TEST_CREDENTIALS_INPUT = {
+    "provider": TEST_CREDENTIALS.provider,
+    "id": TEST_CREDENTIALS.id,
+    "type": TEST_CREDENTIALS.type,
+    "title": TEST_CREDENTIALS.title,
+}
+
+
+class Mem0Base:
+    """Base class with shared utilities for Mem0 blocks"""
+
+    @staticmethod
+    def _get_client(credentials: APIKeyCredentials) -> MemoryClient:
+        """Get initialized Mem0 client"""
+        return MemoryClient(api_key=credentials.api_key.get_secret_value())
+
+
+Filter = dict[str, list[dict[str, str | dict[str, list[str]]]]]
+
+
+class Conversation(BaseModel):
+    discriminator: Literal["conversation"]
+    messages: list[dict[str, str]]
+
+
+class Content(BaseModel):
+    discriminator: Literal["content"]
+    content: str
+
+
+class AddMemoryBlock(Block, Mem0Base):
+    """Block for adding memories to Mem0
+
+    Always limited by user_id and optional graph_id and graph_exec_id"""
+
+    class Input(BlockSchema):
+        credentials: CredentialsMetaInput[
+            Literal[ProviderName.MEM0], Literal["api_key"]
+        ] = CredentialsField(description="Mem0 API key credentials")
+        content: Union[Content, Conversation] = SchemaField(
+            discriminator="discriminator",
+            description="Content to add - either a string or list of message objects as output from an AI block",
+            default=Content(discriminator="content", content="I'm a vegetarian"),
+        )
+        metadata: dict[str, Any] = SchemaField(
+            description="Optional metadata for the memory", default={}
+        )
+
+        limit_memory_to_run: bool = SchemaField(
+            description="Limit the memory to the run", default=False
+        )
+        limit_memory_to_agent: bool = SchemaField(
+            description="Limit the memory to the agent", default=False
+        )
+
+    class Output(BlockSchema):
+        action: str = SchemaField(description="Action of the operation")
+        memory: str = SchemaField(description="Memory created")
+        error: str = SchemaField(description="Error message if operation fails")
+
+    def __init__(self):
+        super().__init__(
+            id="dce97578-86be-45a4-ae50-f6de33fc935a",
+            description="Add new memories to Mem0 with user segmentation",
+            input_schema=AddMemoryBlock.Input,
+            output_schema=AddMemoryBlock.Output,
+            test_input=[
+                {
+                    "content": {
+                        "discriminator": "conversation",
+                        "messages": [{"role": "user", "content": "I'm a vegetarian"}],
+                    },
+                    "metadata": {"food": "vegetarian"},
+                    "credentials": TEST_CREDENTIALS_INPUT,
+                },
+                {
+                    "content": {
+                        "discriminator": "content",
+                        "content": "I am a vegetarian",
+                    },
+                    "metadata": {"food": "vegetarian"},
+                    "credentials": TEST_CREDENTIALS_INPUT,
+                },
+            ],
+            test_output=[("action", "NO_CHANGE"), ("action", "NO_CHANGE")],
+            test_credentials=TEST_CREDENTIALS,
+            test_mock={"_get_client": lambda credentials: MockMemoryClient()},
+        )
+
+    def run(
+        self,
+        input_data: Input,
+        *,
+        credentials: APIKeyCredentials,
+        user_id: str,
+        graph_id: str,
+        graph_exec_id: str,
+        **kwargs
+    ) -> BlockOutput:
+        try:
+            client = self._get_client(credentials)
+
+            if isinstance(input_data.content, Conversation):
+                messages = input_data.content.messages
+            else:
+                messages = [{"role": "user", "content": input_data.content}]
+
+            params = {
+                "user_id": user_id,
+                "output_format": "v1.1",
+                "metadata": input_data.metadata,
+            }
+
+            if input_data.limit_memory_to_run:
+                params["run_id"] = graph_exec_id
+            if input_data.limit_memory_to_agent:
+                params["agent_id"] = graph_id
+
+            # Use the client to add memory
+            result = client.add(
+                messages,
+                **params,
+            )
+
+            if len(result.get("results", [])) > 0:
+                for result in result.get("results", []):
+                    yield "action", result["event"]
+                    yield "memory", result["memory"]
+            else:
+                yield "action", "NO_CHANGE"
+
+        except Exception as e:
+            yield "error", str(object=e)
+
+
+class SearchMemoryBlock(Block, Mem0Base):
+    """Block for searching memories in Mem0"""
+
+    class Input(BlockSchema):
+        credentials: CredentialsMetaInput[
+            Literal[ProviderName.MEM0], Literal["api_key"]
+        ] = CredentialsField(description="Mem0 API key credentials")
+        query: str = SchemaField(
+            description="Search query",
+            advanced=False,
+        )
+        trigger: bool = SchemaField(
+            description="An unused field that is used to (re-)trigger the block when you have no other inputs",
+            default=False,
+            advanced=False,
+        )
+        categories_filter: list[str] = SchemaField(
+            description="Categories to filter by",
+            default=[],
+            advanced=True,
+        )
+        limit_memory_to_run: bool = SchemaField(
+            description="Limit the memory to the run", default=False
+        )
+        limit_memory_to_agent: bool = SchemaField(
+            description="Limit the memory to the agent", default=True
+        )
+
+    class Output(BlockSchema):
+        memories: Any = SchemaField(description="List of matching memories")
+        error: str = SchemaField(description="Error message if operation fails")
+
+    def __init__(self):
+        super().__init__(
+            id="bd7c84e3-e073-4b75-810c-600886ec8a5b",
+            description="Search memories in Mem0 by user",
+            input_schema=SearchMemoryBlock.Input,
+            output_schema=SearchMemoryBlock.Output,
+            test_input={
+                "query": "vegetarian preferences",
+                "credentials": TEST_CREDENTIALS_INPUT,
+                "top_k": 10,
+                "rerank": True,
+            },
+            test_output=[
+                ("memories", [{"id": "test-memory", "content": "test content"}])
+            ],
+            test_credentials=TEST_CREDENTIALS,
+            test_mock={"_get_client": lambda credentials: MockMemoryClient()},
+        )
+
+    def run(
+        self,
+        input_data: Input,
+        *,
+        credentials: APIKeyCredentials,
+        user_id: str,
+        graph_id: str,
+        graph_exec_id: str,
+        **kwargs
+    ) -> BlockOutput:
+        try:
+            client = self._get_client(credentials)
+
+            filters: Filter = {
+                # This works with only one filter, so we can allow others to add on later
+                "AND": [
+                    {"user_id": user_id},
+                ]
+            }
+            if input_data.categories_filter:
+                filters["AND"].append(
+                    {"categories": {"contains": input_data.categories_filter}}
+                )
+            if input_data.limit_memory_to_run:
+                filters["AND"].append({"run_id": graph_exec_id})
+            if input_data.limit_memory_to_agent:
+                filters["AND"].append({"agent_id": graph_id})
+
+            result: list[dict[str, Any]] = client.search(
+                input_data.query, version="v2", filters=filters
+            )
+            yield "memories", result
+
+        except Exception as e:
+            yield "error", str(e)
+
+
+class GetAllMemoriesBlock(Block, Mem0Base):
+    """Block for retrieving all memories from Mem0"""
+
+    class Input(BlockSchema):
+        credentials: CredentialsMetaInput[
+            Literal[ProviderName.MEM0], Literal["api_key"]
+        ] = CredentialsField(description="Mem0 API key credentials")
+        trigger: bool = SchemaField(
+            description="An unused field that is used to trigger the block when you have no other inputs",
+            default=False,
+            advanced=False,
+        )
+        categories: Optional[list[str]] = SchemaField(
+            description="Filter by categories", default=None
+        )
+        limit_memory_to_run: bool = SchemaField(
+            description="Limit the memory to the run", default=False
+        )
+        limit_memory_to_agent: bool = SchemaField(
+            description="Limit the memory to the agent", default=False
+        )
+
+    class Output(BlockSchema):
+        memories: Any = SchemaField(description="List of memories")
+        error: str = SchemaField(description="Error message if operation fails")
+
+    def __init__(self):
+        super().__init__(
+            id="45aee5bf-4767-45d1-a28b-e01c5aae9fc1",
+            description="Retrieve all memories from Mem0 with pagination",
+            input_schema=GetAllMemoriesBlock.Input,
+            output_schema=GetAllMemoriesBlock.Output,
+            test_input={
+                "user_id": "test_user",
+                "credentials": TEST_CREDENTIALS_INPUT,
+            },
+            test_output=[
+                ("memories", [{"id": "test-memory", "content": "test content"}]),
+            ],
+            test_credentials=TEST_CREDENTIALS,
+            test_mock={"_get_client": lambda credentials: MockMemoryClient()},
+        )
+
+    def run(
+        self,
+        input_data: Input,
+        *,
+        credentials: APIKeyCredentials,
+        user_id: str,
+        graph_id: str,
+        graph_exec_id: str,
+        **kwargs
+    ) -> BlockOutput:
+        try:
+            client = self._get_client(credentials)
+
+            filters: Filter = {
+                "AND": [
+                    {"user_id": user_id},
+                ]
+            }
+            if input_data.limit_memory_to_run:
+                filters["AND"].append({"run_id": graph_exec_id})
+            if input_data.limit_memory_to_agent:
+                filters["AND"].append({"agent_id": graph_id})
+            if input_data.categories:
+                filters["AND"].append(
+                    {"categories": {"contains": input_data.categories}}
+                )
+
+            memories: list[dict[str, Any]] = client.get_all(
+                filters=filters,
+                version="v2",
+            )
+
+            yield "memories", memories
+
+        except Exception as e:
+            yield "error", str(e)
+
+
+# Mock client for testing
+class MockMemoryClient:
+    """Mock Mem0 client for testing"""
+
+    def add(self, *args, **kwargs):
+        return {"memory_id": "test-memory-id", "status": "success"}
+
+    def search(self, *args, **kwargs) -> list[dict[str, str]]:
+        return [{"id": "test-memory", "content": "test content"}]
+
+    def get_all(self, *args, **kwargs) -> list[dict[str, str]]:
+        return [{"id": "test-memory", "content": "test content"}]
diff --git a/autogpt_platform/backend/backend/blocks/text.py b/autogpt_platform/backend/backend/blocks/text.py
index 35d25660f..be0917abe 100644
--- a/autogpt_platform/backend/backend/blocks/text.py
+++ b/autogpt_platform/backend/backend/blocks/text.py
@@ -212,3 +212,43 @@ class CombineTextsBlock(Block):
     def run(self, input_data: Input, **kwargs) -> BlockOutput:
         combined_text = input_data.delimiter.join(input_data.input)
         yield "output", combined_text
+
+
+class TextSplitBlock(Block):
+    class Input(BlockSchema):
+        text: str = SchemaField(description="The text to split.")
+        delimiter: str = SchemaField(description="The delimiter to split the text by.")
+        strip: bool = SchemaField(
+            description="Whether to strip the text.", default=True
+        )
+
+    class Output(BlockSchema):
+        texts: list[str] = SchemaField(
+            description="The text split into a list of strings."
+        )
+
+    def __init__(self):
+        super().__init__(
+            id="d5ea33c8-a575-477a-b42f-2fe3be5055ec",
+            description="This block is used to split a text into a list of strings.",
+            categories={BlockCategory.TEXT},
+            input_schema=TextSplitBlock.Input,
+            output_schema=TextSplitBlock.Output,
+            test_input=[
+                {"text": "Hello, World!", "delimiter": ","},
+                {"text": "Hello, World!", "delimiter": ",", "strip": False},
+            ],
+            test_output=[
+                ("texts", ["Hello", "World!"]),
+                ("texts", ["Hello", " World!"]),
+            ],
+        )
+
+    def run(self, input_data: Input, **kwargs) -> BlockOutput:
+        if len(input_data.text) == 0:
+            yield "texts", []
+        else:
+            texts = input_data.text.split(input_data.delimiter)
+            if input_data.strip:
+                texts = [text.strip() for text in texts]
+            yield "texts", texts
diff --git a/autogpt_platform/backend/backend/data/block.py b/autogpt_platform/backend/backend/data/block.py
index cc2ab4567..5592af8dc 100644
--- a/autogpt_platform/backend/backend/data/block.py
+++ b/autogpt_platform/backend/backend/data/block.py
@@ -398,6 +398,7 @@ class Block(ABC, Generic[BlockSchemaInputType, BlockSchemaOutputType]):
         }
 
     def execute(self, input_data: BlockInput, **kwargs) -> BlockOutput:
+        # Merge the input data with the extra execution arguments, preferring the args for security
         if error := self.input_schema.validate_data(input_data):
             raise ValueError(
                 f"Unable to execute block with invalid input data: {error}"
diff --git a/autogpt_platform/backend/backend/executor/manager.py b/autogpt_platform/backend/backend/executor/manager.py
index 0890458be..ae0166e03 100644
--- a/autogpt_platform/backend/backend/executor/manager.py
+++ b/autogpt_platform/backend/backend/executor/manager.py
@@ -170,12 +170,15 @@ def execute_node(
     log_metadata.info("Executed node with input", input=input_data_str)
     update_execution(ExecutionStatus.RUNNING)
 
+    # Inject extra execution arguments for the blocks via kwargs
     extra_exec_kwargs: dict = {
         "graph_id": graph_id,
         "node_id": node_id,
         "graph_exec_id": graph_exec_id,
         "node_exec_id": node_exec_id,
+        "user_id": user_id,
     }
+
     # Last-minute fetch credentials + acquire a system-wide read-write lock to prevent
     # changes during execution. ⚠️ This means a set of credentials can only be used by
     # one (running) block at a time; simultaneous execution of blocks using same
diff --git a/autogpt_platform/backend/backend/integrations/credentials_store.py b/autogpt_platform/backend/backend/integrations/credentials_store.py
index ee1c05bf1..79edc769b 100644
--- a/autogpt_platform/backend/backend/integrations/credentials_store.py
+++ b/autogpt_platform/backend/backend/integrations/credentials_store.py
@@ -130,6 +130,13 @@ nvidia_credentials = APIKeyCredentials(
     title="Use Credits for Nvidia",
     expires_at=None,
 )
+mem0_credentials = APIKeyCredentials(
+    id="ed55ac19-356e-4243-a6cb-bc599e9b716f",
+    provider="mem0",
+    api_key=SecretStr(settings.secrets.mem0_api_key),
+    title="Use Credits for Mem0",
+    expires_at=None,
+)
 
 
 DEFAULT_CREDENTIALS = [
@@ -148,6 +155,7 @@ DEFAULT_CREDENTIALS = [
     exa_credentials,
     e2b_credentials,
     nvidia_credentials,
+    mem0_credentials,
 ]
 
 
@@ -211,6 +219,8 @@ class IntegrationCredentialsStore:
             all_credentials.append(e2b_credentials)
         if settings.secrets.nvidia_api_key:
             all_credentials.append(nvidia_credentials)
+        if settings.secrets.mem0_api_key:
+            all_credentials.append(mem0_credentials)
         return all_credentials
 
     def get_creds_by_id(self, user_id: str, credentials_id: str) -> Credentials | None:
diff --git a/autogpt_platform/backend/backend/integrations/providers.py b/autogpt_platform/backend/backend/integrations/providers.py
index c8cebe0a5..a9f810fbc 100644
--- a/autogpt_platform/backend/backend/integrations/providers.py
+++ b/autogpt_platform/backend/backend/integrations/providers.py
@@ -19,6 +19,7 @@ class ProviderName(str, Enum):
     JINA = "jina"
     LINEAR = "linear"
     MEDIUM = "medium"
+    MEM0 = "mem0"
     NOTION = "notion"
     NVIDIA = "nvidia"
     OLLAMA = "ollama"
diff --git a/autogpt_platform/backend/backend/util/settings.py b/autogpt_platform/backend/backend/util/settings.py
index 24f3ca6d3..153700dbc 100644
--- a/autogpt_platform/backend/backend/util/settings.py
+++ b/autogpt_platform/backend/backend/util/settings.py
@@ -315,6 +315,7 @@ class Secrets(UpdateTrackingModel["Secrets"], BaseSettings):
     exa_api_key: str = Field(default="", description="Exa API key")
     e2b_api_key: str = Field(default="", description="E2B API key")
     nvidia_api_key: str = Field(default="", description="Nvidia API key")
+    mem0_api_key: str = Field(default="", description="Mem0 API key")
 
     linear_client_id: str = Field(default="", description="Linear client ID")
     linear_client_secret: str = Field(default="", description="Linear client secret")
diff --git a/autogpt_platform/backend/backend/util/test.py b/autogpt_platform/backend/backend/util/test.py
index 79dc0c4cd..91718f5da 100644
--- a/autogpt_platform/backend/backend/util/test.py
+++ b/autogpt_platform/backend/backend/util/test.py
@@ -1,5 +1,6 @@
 import logging
 import time
+import uuid
 from typing import Sequence, cast
 
 from backend.data import db
@@ -104,7 +105,13 @@ def execute_block_test(block: Block):
             log.info(f"{prefix} mock {mock_name} not found in block")
 
     # Populate credentials argument(s)
-    extra_exec_kwargs = {}
+    extra_exec_kwargs: dict = {
+        "graph_id": uuid.uuid4(),
+        "node_id": uuid.uuid4(),
+        "graph_exec_id": uuid.uuid4(),
+        "node_exec_id": uuid.uuid4(),
+        "user_id": uuid.uuid4(),
+    }
     input_model = cast(type[BlockSchema], block.input_schema)
     credentials_input_fields = input_model.get_credentials_fields()
     if len(credentials_input_fields) == 1 and isinstance(
@@ -125,7 +132,9 @@ def execute_block_test(block: Block):
 
         for output_name, output_data in block.execute(input_data, **extra_exec_kwargs):
             if output_index >= len(block.test_output):
-                raise ValueError(f"{prefix} produced output more than expected")
+                raise ValueError(
+                    f"{prefix} produced output more than expected {output_index} >= {len(block.test_output)}:\nOutput Expected:\t\t{block.test_output}\nFailed Output Produced:\t('{output_name}', {output_data})\nNote that this may not be the one that was unexpected, but it is the first that triggered the extra output warning"
+                )
             ex_output_name, ex_output_data = block.test_output[output_index]
 
             def compare(data, expected_data):
@@ -142,7 +151,9 @@ def execute_block_test(block: Block):
                 log.info(f"{prefix} {mark} comparing `{data}` vs `{expected_data}`")
                 if not is_matching:
                     raise ValueError(
-                        f"{prefix}: wrong output {data} vs {expected_data}"
+                        f"{prefix}: wrong output {data} vs {expected_data}\n"
+                        f"Output Expected:\t\t{block.test_output}\n"
+                        f"Failed Output Produced:\t('{output_name}', {output_data})"
                     )
 
             compare(output_data, ex_output_data)
diff --git a/autogpt_platform/backend/poetry.lock b/autogpt_platform/backend/poetry.lock
index 587d8b111..8a195bc1b 100644
--- a/autogpt_platform/backend/poetry.lock
+++ b/autogpt_platform/backend/poetry.lock
@@ -1,4 +1,4 @@
-# This file is automatically @generated by Poetry 2.0.0 and should not be changed by hand.
+# This file is automatically @generated by Poetry 2.0.1 and should not be changed by hand.
 
 [[package]]
 name = "aio-pika"
@@ -306,6 +306,18 @@ supabase = "^2.11.0"
 type = "directory"
 url = "../autogpt_libs"
 
+[[package]]
+name = "backoff"
+version = "2.2.1"
+description = "Function decoration for backoff and retry"
+optional = false
+python-versions = ">=3.7,<4.0"
+groups = ["main"]
+files = [
+    {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"},
+    {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"},
+]
+
 [[package]]
 name = "black"
 version = "24.10.0"
@@ -1501,6 +1513,76 @@ googleapis-common-protos = ">=1.5.5"
 grpcio = ">=1.70.0"
 protobuf = ">=5.26.1,<6.0dev"
 
+[[package]]
+name = "grpcio-tools"
+version = "1.68.0"
+description = "Protobuf code generator for gRPC"
+optional = false
+python-versions = ">=3.8"
+groups = ["main"]
+files = [
+    {file = "grpcio_tools-1.68.0-cp310-cp310-linux_armv7l.whl", hash = "sha256:9509a5c3ed3d54fa7ac20748d501cb86668f764605a0a68f275339ee0f1dc1a6"},
+    {file = "grpcio_tools-1.68.0-cp310-cp310-macosx_12_0_universal2.whl", hash = "sha256:59a885091bf29700ba0e14a954d156a18714caaa2006a7f328b18e1ac4b1e721"},
+    {file = "grpcio_tools-1.68.0-cp310-cp310-manylinux_2_17_aarch64.whl", hash = "sha256:d3e678162e1d7a8720dc05fdd537fc8df082a50831791f7bb1c6f90095f8368b"},
+    {file = "grpcio_tools-1.68.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:10d03e3ad4af6284fd27cb14f5a3d52045913c1253e3e24a384ed91bc8adbfcd"},
+    {file = "grpcio_tools-1.68.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1769d7f529de1cc102f7fb900611e3c0b69bdb244fca1075b24d6e5b49024586"},
+    {file = "grpcio_tools-1.68.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:88640d95ee41921ac7352fa5fadca52a06d7e21fbe53e6a706a9a494f756be7d"},
+    {file = "grpcio_tools-1.68.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e903d07bc65232aa9e7704c829aec263e1e139442608e473d7912417a9908e29"},
+    {file = "grpcio_tools-1.68.0-cp310-cp310-win32.whl", hash = "sha256:66b70b37184d40806844f51c2757c6b852511d4ea46a3bf2c7e931a47b455bc6"},
+    {file = "grpcio_tools-1.68.0-cp310-cp310-win_amd64.whl", hash = "sha256:b47ae076ffb29a68e517bc03552bef0d9c973f8e18adadff180b123e973a26ea"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-linux_armv7l.whl", hash = "sha256:f65942fab440e99113ce14436deace7554d5aa554ea18358e3a5f3fc47efe322"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8fefc6d000e169a97336feded23ce614df3fb9926fc48c7a9ff8ea459d93b5b0"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-manylinux_2_17_aarch64.whl", hash = "sha256:6dd69c9f3ff85eee8d1f71adf7023c638ca8d465633244ac1b7f19bc3668612d"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7dc5195dc02057668cc22da1ff1aea1811f6fa0deb801b3194dec1fe0bab1cf0"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:849b12bec2320e49e988df104c92217d533e01febac172a4495caab36d9f0edc"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:766c2cd2e365e0fc0e559af56f2c2d144d95fd7cb8668a34d533e66d6435eb34"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2ec3a2e0afa4866ccc5ba33c071aebaa619245dfdd840cbb74f2b0591868d085"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-win32.whl", hash = "sha256:80b733014eb40d920d836d782e5cdea0dcc90d251a2ffb35ab378ef4f8a42c14"},
+    {file = "grpcio_tools-1.68.0-cp311-cp311-win_amd64.whl", hash = "sha256:f95103e3e4e7fee7c6123bc9e4e925e07ad24d8d09d7c1c916fb6c8d1cb9e726"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-linux_armv7l.whl", hash = "sha256:dd9a654af8536b3de8525bff72a245fef62d572eabf96ac946fe850e707cb27d"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-macosx_10_9_universal2.whl", hash = "sha256:0f77957e3a0916a0dd18d57ce6b49d95fc9a5cfed92310f226339c0fda5394f6"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-manylinux_2_17_aarch64.whl", hash = "sha256:92a09afe64fe26696595de2036e10967876d26b12c894cc9160f00152cacebe7"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:28ebdbad2ef16699d07400b65260240851049a75502eff69a59b127d3ab960f1"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d3150d784d8050b10dcf5eb06e04fb90747a1547fed3a062a608d940fe57066"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:261d98fd635595de42aadee848f9af46da6654d63791c888891e94f66c5d0682"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:061345c0079b9471f32230186ab01acb908ea0e577bc1699a8cf47acef8be4af"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-win32.whl", hash = "sha256:533ce6791a5ba21e35d74c6c25caf4776f5692785a170c01ea1153783ad5af31"},
+    {file = "grpcio_tools-1.68.0-cp312-cp312-win_amd64.whl", hash = "sha256:56842a0ce74b4b92eb62cd5ee00181b2d3acc58ba0c4fd20d15a5db51f891ba6"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-linux_armv7l.whl", hash = "sha256:1117a81592542f0c36575082daa6413c57ca39188b18a4c50ec7332616f4b97e"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:51e5a090849b30c99a2396d42140b8a3e558eff6cdfa12603f9582e2cd07724e"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-manylinux_2_17_aarch64.whl", hash = "sha256:4fe611d89a1836df8936f066d39c7eb03d4241806449ec45d4b8e1c843ae8011"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c10f3faa0cc4d89eb546f53b623837af23e86dc495d3b89510bcc0e0a6c0b8b2"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:46b537480b8fd2195d988120a28467601a2a3de2e504043b89fb90318e1eb754"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-musllinux_1_1_i686.whl", hash = "sha256:17d0c9004ea82b4213955a585401e80c30d4b37a1d4ace32ccdea8db4d3b7d43"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-musllinux_1_1_x86_64.whl", hash = "sha256:2919faae04fe47bad57fc9b578aeaab527da260e851f321a253b6b11862254a8"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-win32.whl", hash = "sha256:ee86157ef899f58ba2fe1055cce0d33bd703e99aa6d5a0895581ac3969f06bfa"},
+    {file = "grpcio_tools-1.68.0-cp313-cp313-win_amd64.whl", hash = "sha256:d0470ffc6a93c86cdda48edd428d22e2fef17d854788d60d0d5f291038873157"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-linux_armv7l.whl", hash = "sha256:795f2cd76f68a12b0b5541b98187ba367dd69b49d359cf98b781ead742961370"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:57e29e78c33fb1b1d557fbe7650d722d1f2b0a9f53ea73beb8ea47e627b6000b"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-manylinux_2_17_aarch64.whl", hash = "sha256:700f171cd3293ee8d50cd43171562ff07b14fa8e49ee471cd91c6924c7da8644"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:196cd8a3a5963a4c9e424314df9eb573b305e6f958fe6508d26580ce01e7aa56"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cad40c3164ee9cef62524dea509449ea581b17ea493178beef051bf79b5103ca"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ab93fab49fa1e699e577ff5fbb99aba660164d710d4c33cfe0aa9d06f585539f"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:511224a99726eb84db9ddb84dc8a75377c3eae797d835f99e80128ec618376d5"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-win32.whl", hash = "sha256:b4ca81770cd729a9ea536d871aacedbde2b732bb9bb83c9d993d63f58502153d"},
+    {file = "grpcio_tools-1.68.0-cp38-cp38-win_amd64.whl", hash = "sha256:6950725bf7a496f81d3ec3324334ffc9dbec743b510dd0e897f51f8627eeb6ac"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-linux_armv7l.whl", hash = "sha256:01ace351a51d7ee120963a4612b1f00e964462ec548db20d17f8902e238592c8"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:5afd2f3f7257b52228a7808a2b4a765893d4d802d7a2377d9284853e67d045c6"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-manylinux_2_17_aarch64.whl", hash = "sha256:453ee3193d59c974c678d91f08786f43c25ef753651b0825dc3d008c31baf68d"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b094b22919b786ad73c20372ef5e546330e7cd2c6dc12293b7ed586975f35d38"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:26335eea976dfc1ff5d90b19c309a9425bd53868112a0507ad20f297f2c21d3e"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:c77ecc5164bb413a613bdac9091dcc29d26834a2ac42fcd1afdfcda9e3003e68"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e31be6dc61496a59c1079b0a669f93dfcc2cdc4b1dbdc4374247cd09cee1329b"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-win32.whl", hash = "sha256:3aa40958355920ae2846c6fb5cadac4f2c8e33234a2982fef8101da0990e3968"},
+    {file = "grpcio_tools-1.68.0-cp39-cp39-win_amd64.whl", hash = "sha256:19bafb80948eda979b1b3a63c1567162d06249f43068a0e46a028a448e6f72d4"},
+    {file = "grpcio_tools-1.68.0.tar.gz", hash = "sha256:737804ec2225dd4cc27e633b4ca0e963b0795161bf678285fab6586e917fd867"},
+]
+
+[package.dependencies]
+grpcio = ">=1.68.0"
+protobuf = ">=5.26.1,<6.0dev"
+setuptools = "*"
+
 [[package]]
 name = "h11"
 version = "0.14.0"
@@ -2067,6 +2149,41 @@ files = [
     {file = "mccabe-0.7.0.tar.gz", hash = "sha256:348e0240c33b60bbdf4e523192ef919f28cb2c3d7d5c7794f74009290f236325"},
 ]
 
+[[package]]
+name = "mem0ai"
+version = "0.1.44"
+description = "Long-term memory for AI Agents"
+optional = false
+python-versions = "<4.0,>=3.9"
+groups = ["main"]
+files = [
+    {file = "mem0ai-0.1.44-py3-none-any.whl", hash = "sha256:32260a2cd935035a1b16ce04ad2e4510a5bd97618709466e2d06303e0eb8d9d4"},
+    {file = "mem0ai-0.1.44.tar.gz", hash = "sha256:93214272915d94f673d370bb8fe7a8bfc21806267e65700b471bec454dcdfa5c"},
+]
+
+[package.dependencies]
+openai = ">=1.33.0,<2.0.0"
+posthog = ">=3.5.0,<4.0.0"
+pydantic = ">=2.7.3,<3.0.0"
+pytz = ">=2024.1,<2025.0"
+qdrant-client = ">=1.9.1,<2.0.0"
+sqlalchemy = ">=2.0.31,<3.0.0"
+
+[package.extras]
+graph = ["langchain-community (>=0.3.1,<0.4.0)", "neo4j (>=5.23.1,<6.0.0)", "rank-bm25 (>=0.2.2,<0.3.0)"]
+
+[[package]]
+name = "monotonic"
+version = "1.6"
+description = "An implementation of time.monotonic() for Python 2 & < 3.3"
+optional = false
+python-versions = "*"
+groups = ["main"]
+files = [
+    {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"},
+    {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"},
+]
+
 [[package]]
 name = "moviepy"
 version = "2.1.2"
@@ -2619,6 +2736,26 @@ tomli = {version = ">=1.2.2", markers = "python_version < \"3.11\""}
 [package.extras]
 poetry-plugin = ["poetry (>=1.0,<3.0)"]
 
+[[package]]
+name = "portalocker"
+version = "2.10.1"
+description = "Wraps the portalocker recipe for easy usage"
+optional = false
+python-versions = ">=3.8"
+groups = ["main"]
+files = [
+    {file = "portalocker-2.10.1-py3-none-any.whl", hash = "sha256:53a5984ebc86a025552264b459b46a2086e269b21823cb572f8f28ee759e45bf"},
+    {file = "portalocker-2.10.1.tar.gz", hash = "sha256:ef1bf844e878ab08aee7e40184156e1151f228f103aa5c6bd0724cc330960f8f"},
+]
+
+[package.dependencies]
+pywin32 = {version = ">=226", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+docs = ["sphinx (>=1.7.1)"]
+redis = ["redis"]
+tests = ["pytest (>=5.4.1)", "pytest-cov (>=2.8.1)", "pytest-mypy (>=0.8.0)", "pytest-timeout (>=2.1.0)", "redis", "sphinx (>=6.0.0)", "types-redis"]
+
 [[package]]
 name = "postgrest"
 version = "0.19.1"
@@ -2637,6 +2774,31 @@ httpx = {version = ">=0.26,<0.29", extras = ["http2"]}
 pydantic = ">=1.9,<3.0"
 strenum = {version = ">=0.4.9,<0.5.0", markers = "python_version < \"3.11\""}
 
+[[package]]
+name = "posthog"
+version = "3.8.3"
+description = "Integrate PostHog into any python application."
+optional = false
+python-versions = "*"
+groups = ["main"]
+files = [
+    {file = "posthog-3.8.3-py2.py3-none-any.whl", hash = "sha256:7215c4d7649b0c87905b42f460403311564996d776ab48d39852f46539a50f22"},
+    {file = "posthog-3.8.3.tar.gz", hash = "sha256:263df03ea312d4b47a3d5ea393fdb22ff2ed78140d5ce9af9dd0618ae245a44b"},
+]
+
+[package.dependencies]
+backoff = ">=1.10.0"
+monotonic = ">=1.5"
+python-dateutil = ">2.1"
+requests = ">=2.7,<3.0"
+six = ">=1.5"
+
+[package.extras]
+dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"]
+langchain = ["langchain (>=0.2.0)"]
+sentry = ["django", "sentry-sdk"]
+test = ["coverage", "django", "flake8", "freezegun (==0.3.15)", "langchain-community (>=0.2.0)", "langchain-openai (>=0.2.0)", "mock (>=2.0.0)", "pylint", "pytest", "pytest-asyncio", "pytest-timeout"]
+
 [[package]]
 name = "praw"
 version = "7.8.1"
@@ -3382,6 +3544,47 @@ files = [
     {file = "python_multipart-0.0.20.tar.gz", hash = "sha256:8dd0cab45b8e23064ae09147625994d090fa46f5b0d1e13af944c331a7fa9d13"},
 ]
 
+[[package]]
+name = "pytz"
+version = "2024.2"
+description = "World timezone definitions, modern and historical"
+optional = false
+python-versions = "*"
+groups = ["main"]
+files = [
+    {file = "pytz-2024.2-py2.py3-none-any.whl", hash = "sha256:31c7c1817eb7fae7ca4b8c7ee50c72f93aa2dd863de768e1ef4245d426aa0725"},
+    {file = "pytz-2024.2.tar.gz", hash = "sha256:2aa355083c50a0f93fa581709deac0c9ad65cca8a9e9beac660adcbd493c798a"},
+]
+
+[[package]]
+name = "pywin32"
+version = "308"
+description = "Python for Window Extensions"
+optional = false
+python-versions = "*"
+groups = ["main"]
+markers = "platform_system == \"Windows\""
+files = [
+    {file = "pywin32-308-cp310-cp310-win32.whl", hash = "sha256:796ff4426437896550d2981b9c2ac0ffd75238ad9ea2d3bfa67a1abd546d262e"},
+    {file = "pywin32-308-cp310-cp310-win_amd64.whl", hash = "sha256:4fc888c59b3c0bef905ce7eb7e2106a07712015ea1c8234b703a088d46110e8e"},
+    {file = "pywin32-308-cp310-cp310-win_arm64.whl", hash = "sha256:a5ab5381813b40f264fa3495b98af850098f814a25a63589a8e9eb12560f450c"},
+    {file = "pywin32-308-cp311-cp311-win32.whl", hash = "sha256:5d8c8015b24a7d6855b1550d8e660d8daa09983c80e5daf89a273e5c6fb5095a"},
+    {file = "pywin32-308-cp311-cp311-win_amd64.whl", hash = "sha256:575621b90f0dc2695fec346b2d6302faebd4f0f45c05ea29404cefe35d89442b"},
+    {file = "pywin32-308-cp311-cp311-win_arm64.whl", hash = "sha256:100a5442b7332070983c4cd03f2e906a5648a5104b8a7f50175f7906efd16bb6"},
+    {file = "pywin32-308-cp312-cp312-win32.whl", hash = "sha256:587f3e19696f4bf96fde9d8a57cec74a57021ad5f204c9e627e15c33ff568897"},
+    {file = "pywin32-308-cp312-cp312-win_amd64.whl", hash = "sha256:00b3e11ef09ede56c6a43c71f2d31857cf7c54b0ab6e78ac659497abd2834f47"},
+    {file = "pywin32-308-cp312-cp312-win_arm64.whl", hash = "sha256:9b4de86c8d909aed15b7011182c8cab38c8850de36e6afb1f0db22b8959e3091"},
+    {file = "pywin32-308-cp313-cp313-win32.whl", hash = "sha256:1c44539a37a5b7b21d02ab34e6a4d314e0788f1690d65b48e9b0b89f31abbbed"},
+    {file = "pywin32-308-cp313-cp313-win_amd64.whl", hash = "sha256:fd380990e792eaf6827fcb7e187b2b4b1cede0585e3d0c9e84201ec27b9905e4"},
+    {file = "pywin32-308-cp313-cp313-win_arm64.whl", hash = "sha256:ef313c46d4c18dfb82a2431e3051ac8f112ccee1a34f29c263c583c568db63cd"},
+    {file = "pywin32-308-cp37-cp37m-win32.whl", hash = "sha256:1f696ab352a2ddd63bd07430080dd598e6369152ea13a25ebcdd2f503a38f1ff"},
+    {file = "pywin32-308-cp37-cp37m-win_amd64.whl", hash = "sha256:13dcb914ed4347019fbec6697a01a0aec61019c1046c2b905410d197856326a6"},
+    {file = "pywin32-308-cp38-cp38-win32.whl", hash = "sha256:5794e764ebcabf4ff08c555b31bd348c9025929371763b2183172ff4708152f0"},
+    {file = "pywin32-308-cp38-cp38-win_amd64.whl", hash = "sha256:3b92622e29d651c6b783e368ba7d6722b1634b8e70bd376fd7610fe1992e19de"},
+    {file = "pywin32-308-cp39-cp39-win32.whl", hash = "sha256:7873ca4dc60ab3287919881a7d4f88baee4a6e639aa6962de25a98ba6b193341"},
+    {file = "pywin32-308-cp39-cp39-win_amd64.whl", hash = "sha256:71b3322d949b4cc20776436a9c9ba0eeedcbc9c650daa536df63f0ff111bb920"},
+]
+
 [[package]]
 name = "pyyaml"
 version = "6.0.2"
@@ -3445,6 +3648,34 @@ files = [
     {file = "pyyaml-6.0.2.tar.gz", hash = "sha256:d584d9ec91ad65861cc08d42e834324ef890a082e591037abe114850ff7bbc3e"},
 ]
 
+[[package]]
+name = "qdrant-client"
+version = "1.12.2"
+description = "Client library for the Qdrant vector search engine"
+optional = false
+python-versions = ">=3.9"
+groups = ["main"]
+files = [
+    {file = "qdrant_client-1.12.2-py3-none-any.whl", hash = "sha256:a0ae500a46a679ff3521ba3f1f1cf3d72b57090a768cec65fc317066bcbac1e6"},
+    {file = "qdrant_client-1.12.2.tar.gz", hash = "sha256:2777e09b3e89bb22bb490384d8b1fa8140f3915287884f18984f7031a346aba5"},
+]
+
+[package.dependencies]
+grpcio = ">=1.41.0"
+grpcio-tools = ">=1.41.0"
+httpx = {version = ">=0.20.0", extras = ["http2"]}
+numpy = [
+    {version = ">=1.21", markers = "python_version >= \"3.10\" and python_version < \"3.12\""},
+    {version = ">=1.26", markers = "python_version >= \"3.12\" and python_version < \"3.13\""},
+]
+portalocker = ">=2.7.0,<3.0.0"
+pydantic = ">=1.10.8"
+urllib3 = ">=1.26.14,<3"
+
+[package.extras]
+fastembed = ["fastembed (==0.5.0)"]
+fastembed-gpu = ["fastembed-gpu (==0.5.0)"]
+
 [[package]]
 name = "realtime"
 version = "2.2.0"
@@ -3793,6 +4024,27 @@ files = [
     {file = "serpent-1.41.tar.gz", hash = "sha256:0407035fe3c6644387d48cff1467d5aa9feff814d07372b78677ed0ee3ed7095"},
 ]
 
+[[package]]
+name = "setuptools"
+version = "75.8.0"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.9"
+groups = ["main"]
+files = [
+    {file = "setuptools-75.8.0-py3-none-any.whl", hash = "sha256:e3982f444617239225d675215d51f6ba05f845d4eec313da4418fdbb56fb27e3"},
+    {file = "setuptools-75.8.0.tar.gz", hash = "sha256:c5afc8f407c626b8313a86e10311dd3f661c6cd9c09d4bf8c15c0e11f9f2b0e6"},
+]
+
+[package.extras]
+check = ["pytest-checkdocs (>=2.4)", "pytest-ruff (>=0.2.1)", "ruff (>=0.8.0)"]
+core = ["importlib_metadata (>=6)", "jaraco.collections", "jaraco.functools (>=4)", "jaraco.text (>=3.7)", "more_itertools", "more_itertools (>=8.8)", "packaging", "packaging (>=24.2)", "platformdirs (>=4.2.2)", "tomli (>=2.0.1)", "wheel (>=0.43.0)"]
+cover = ["pytest-cov"]
+doc = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "pyproject-hooks (!=1.1)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (>=1,<2)", "sphinx-reredirects", "sphinxcontrib-towncrier", "towncrier (<24.7)"]
+enabler = ["pytest-enabler (>=2.2)"]
+test = ["build[virtualenv] (>=1.0.3)", "filelock (>=3.4.0)", "ini2toml[lite] (>=0.14)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.7.2)", "jaraco.test (>=5.5)", "packaging (>=24.2)", "pip (>=19.1)", "pyproject-hooks (!=1.1)", "pytest (>=6,!=8.1.*)", "pytest-home (>=0.5)", "pytest-perf", "pytest-subprocess", "pytest-timeout", "pytest-xdist (>=3)", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel (>=0.44.0)"]
+type = ["importlib_metadata (>=7.0.2)", "jaraco.develop (>=7.21)", "mypy (==1.14.*)", "pytest-mypy"]
+
 [[package]]
 name = "sgmllib3k"
 version = "1.0.0"
@@ -4782,4 +5034,4 @@ type = ["pytest-mypy"]
 [metadata]
 lock-version = "2.1"
 python-versions = ">=3.10,<3.13"
-content-hash = "e67dceb1406873bcd28a0bcd105ff587b8ff808b438cbc7fdbf9e7086b4b4bb0"
+content-hash = "38a5c750ddca1a6264fd98b7ee74d199c2bbf57d3acc189264bd9f8ec90febc2"
diff --git a/autogpt_platform/backend/pyproject.toml b/autogpt_platform/backend/pyproject.toml
index 41dc1ced4..ad7061468 100644
--- a/autogpt_platform/backend/pyproject.toml
+++ b/autogpt_platform/backend/pyproject.toml
@@ -55,6 +55,7 @@ sqlalchemy = "^2.0.36"
 psycopg2-binary = "^2.9.10"
 google-cloud-storage = "^2.18.2"
 launchdarkly-server-sdk = "^9.8.0"
+mem0ai = "^0.1.44"
 moviepy = "^2.1.2"
 
 [tool.poetry.group.dev.dependencies]
diff --git a/autogpt_platform/backend/test/executor/test_manager.py b/autogpt_platform/backend/test/executor/test_manager.py
index b5959e21b..d98c89be3 100644
--- a/autogpt_platform/backend/test/executor/test_manager.py
+++ b/autogpt_platform/backend/test/executor/test_manager.py
@@ -44,7 +44,7 @@ async def execute_graph(
 
     # Execution queue should be empty
     logger.info("Waiting for execution to complete...")
-    result = await wait_execution(test_user.id, test_graph.id, graph_exec_id)
+    result = await wait_execution(test_user.id, test_graph.id, graph_exec_id, 30)
     logger.info(f"Execution completed with {len(result)} results")
     assert len(result) == num_execs
     return graph_exec_id
diff --git a/autogpt_platform/frontend/src/app/marketplace/(user)/integrations/page.tsx b/autogpt_platform/frontend/src/app/marketplace/(user)/integrations/page.tsx
index 330b470f8..fdc0bf2b4 100644
--- a/autogpt_platform/frontend/src/app/marketplace/(user)/integrations/page.tsx
+++ b/autogpt_platform/frontend/src/app/marketplace/(user)/integrations/page.tsx
@@ -109,6 +109,11 @@ export default function PrivatePage() {
       "7f26de70-ba0d-494e-ba76-238e65e7b45f", // Jina
       "66f20754-1b81-48e4-91d0-f4f0dd82145f", // Unreal Speech
       "b5a0e27d-0c98-4df3-a4b9-10193e1f3c40", // Open Router
+      "6c0f5bd0-9008-4638-9d79-4b40b631803e", // FAL
+      "96153e04-9c6c-4486-895f-5bb683b1ecec", // Exa
+      "78d19fd7-4d59-4a16-8277-3ce310acf2b7", // E2B
+      "96b83908-2789-4dec-9968-18f0ece4ceb3", // Nvidia
+      "ed55ac19-356e-4243-a6cb-bc599e9b716f", // Mem0
     ],
     [],
   );
diff --git a/autogpt_platform/frontend/src/app/profile/page.tsx b/autogpt_platform/frontend/src/app/profile/page.tsx
index a4fb51266..445077dab 100644
--- a/autogpt_platform/frontend/src/app/profile/page.tsx
+++ b/autogpt_platform/frontend/src/app/profile/page.tsx
@@ -109,6 +109,11 @@ export default function PrivatePage() {
       "7f26de70-ba0d-494e-ba76-238e65e7b45f", // Jina
       "66f20754-1b81-48e4-91d0-f4f0dd82145f", // Unreal Speech
       "b5a0e27d-0c98-4df3-a4b9-10193e1f3c40", // Open Router
+      "6c0f5bd0-9008-4638-9d79-4b40b631803e", // FAL
+      "96153e04-9c6c-4486-895f-5bb683b1ecec", // Exa
+      "78d19fd7-4d59-4a16-8277-3ce310acf2b7", // E2B
+      "96b83908-2789-4dec-9968-18f0ece4ceb3", // Nvidia
+      "ed55ac19-356e-4243-a6cb-bc599e9b716f", // Mem0
     ],
     [],
   );
diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx
index 340fc6cb4..1fd21fcc5 100644
--- a/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx
+++ b/autogpt_platform/frontend/src/components/integrations/credentials-input.tsx
@@ -69,6 +69,7 @@ export const providerIcons: Record<
   ideogram: fallbackIcon,
   linear: fallbackIcon,
   medium: FaMedium,
+  mem0: fallbackIcon,
   ollama: fallbackIcon,
   openai: fallbackIcon,
   openweathermap: fallbackIcon,
diff --git a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx
index ab8f0a5a9..8fdf0932b 100644
--- a/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx
+++ b/autogpt_platform/frontend/src/components/integrations/credentials-provider.tsx
@@ -32,6 +32,7 @@ const providerDisplayNames: Record<CredentialsProviderName, string> = {
   jina: "Jina",
   linear: "Linear",
   medium: "Medium",
+  mem0: "Mem0",
   notion: "Notion",
   nvidia: "Nvidia",
   ollama: "Ollama",
diff --git a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
index dc046fd58..69fda51c3 100644
--- a/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
+++ b/autogpt_platform/frontend/src/lib/autogpt-server-api/types.ts
@@ -121,6 +121,7 @@ export const PROVIDER_NAMES = {
   JINA: "jina",
   LINEAR: "linear",
   MEDIUM: "medium",
+  MEM0: "mem0",
   NOTION: "notion",
   NVIDIA: "nvidia",
   OLLAMA: "ollama",
diff --git a/docs/content/platform/new_blocks.md b/docs/content/platform/new_blocks.md
index 4f887d213..ce9174272 100644
--- a/docs/content/platform/new_blocks.md
+++ b/docs/content/platform/new_blocks.md
@@ -102,6 +102,13 @@ Follow these steps to create and test a new block:
    - **API request**: Send a GET request to the Wikipedia API.
    - **Error handling**: Handle various exceptions that might occur during the API request and data processing. We don't need to catch all exceptions, only the ones we expect and can handle. The uncaught exceptions will be automatically yielded as `error` in the output. Any block that raises an exception (or yields an `error` output) will be marked as failed. Prefer raising exceptions over yielding `error`, as it will stop the execution immediately.
    - **Yield**: Use `yield` to output the results. Prefer to output one result object at a time. If you are calling a function that returns a list, you can yield each item in the list separately. You can also yield the whole list as well, but do both rather than yielding the list. For example: If you were writing a block that outputs emails, you'd yield each email as a separate result object, but you could also yield the whole list as an additional single result object. Yielding output named `error` will break the execution right away and mark the block execution as failed.
+   - **kwargs**: The `kwargs` parameter is used to pass additional arguments to the block. It is not used in the example above, but it is available to the block. You can also have args as inline signatures in the run method ala `def run(self, input_data: Input, *, user_id: str, **kwargs) -> BlockOutput:`.
+       Available kwargs are:
+       - `user_id`: The ID of the user running the block.
+       - `graph_id`: The ID of the agent that is executing the block. This is the same for every version of the agent
+       - `graph_exec_id`: The ID of the execution of the agent. This changes every time the agent has a new "run"
+       - `node_exec_id`: The ID of the execution of the node. This changes every time the node is executed
+       - `node_id`: The ID of the node that is being executed. It changes every version of the graph, but not every time the node is executed.
 
 ### Field Types