LangGraph Integration
Build stateful, multi-actor applications with LangGraph and Memorystack. Add persistent memory to your agent workflows and state machines.
✓ LangGraph✓ State Machines✓ Multi-Agent
Installation
pip install langgraph memorystackBasic Agent with Memory
Create a LangGraph agent that uses Memorystack for persistent state:
from langgraph.graph import StateGraph, END
from langchain.chat_models import ChatOpenAI
from memorystack import MemoryStackClient
from typing import TypedDict, Annotated
import operator
# Initialize Memorystack
memory_client = MemoryStackClient(
api_key=os.environ["MEMORYSTACK_API_KEY"]
)
# Define state
class AgentState(TypedDict):
messages: Annotated[list, operator.add]
user_id: str
context: str
# Load memories into context
def load_memory(state: AgentState):
memories = memory_client.list_memories(
user_id=state["user_id"],
limit=5
)
context = "\n".join([
f"[{m.memory_type}] {m.content}"
for m in memories.results
])
return {"context": context}
# Agent node
def agent_node(state: AgentState):
llm = ChatOpenAI(model="gpt-4")
# Use context from memories
system_msg = f"Context: {state['context']}"
messages = [{"role": "system", "content": system_msg}] + state["messages"]
response = llm.invoke(messages)
# Save to Memorystack
user_msg = state["messages"][-1]["content"]
memory_client.add_conversation(
user_msg,
response.content,
user_id=state["user_id"]
)
return {"messages": [response]}
# Build graph
workflow = StateGraph(AgentState)
workflow.add_node("load_memory", load_memory)
workflow.add_node("agent", agent_node)
workflow.set_entry_point("load_memory")
workflow.add_edge("load_memory", "agent")
workflow.add_edge("agent", END)
app = workflow.compile()
# Run
result = app.invoke({
"messages": [{"role": "user", "content": "What do I prefer?"}],
"user_id": "user_123",
"context": ""
})
print(result["messages"][-1].content)Multi-Agent System with Shared Memory
Build a multi-agent system where agents share memory:
from langgraph.graph import StateGraph, END
from langchain.chat_models import ChatOpenAI
class MultiAgentState(TypedDict):
messages: list
user_id: str
current_agent: str
shared_memory: dict
# Research Agent
def research_agent(state: MultiAgentState):
# Get research-related memories
memories = memory_client.list_memories(
user_id=state["user_id"],
memory_type="fact",
limit=10
)
llm = ChatOpenAI(model="gpt-4")
context = "\n".join([m.content for m in memories.results])
response = llm.invoke([
{"role": "system", "content": f"You are a research agent. Context: {context}"},
{"role": "user", "content": state["messages"][-1]}
])
# Save research findings
memory_client.create_memory(
messages=[
{"role": "user", "content": state["messages"][-1]},
{"role": "assistant", "content": response.content}
],
user_id=state["user_id"],
metadata={"agent": "research"}
)
return {
"messages": [response.content],
"current_agent": "writer"
}
# Writer Agent
def writer_agent(state: MultiAgentState):
# Get all memories including research
memories = memory_client.list_memories(
user_id=state["user_id"],
limit=15
)
llm = ChatOpenAI(model="gpt-4")
context = "\n".join([m.content for m in memories.results])
response = llm.invoke([
{"role": "system", "content": f"You are a writer. Use this research: {context}"},
{"role": "user", "content": "Write based on the research"}
])
# Save final output
memory_client.add_message(
f"Final output: {response.content}",
user_id=state["user_id"]
)
return {"messages": [response.content]}
# Router
def router(state: MultiAgentState):
if state["current_agent"] == "research":
return "research"
elif state["current_agent"] == "writer":
return "writer"
return END
# Build multi-agent graph
workflow = StateGraph(MultiAgentState)
workflow.add_node("research", research_agent)
workflow.add_node("writer", writer_agent)
workflow.set_entry_point("research")
workflow.add_conditional_edges(
"research",
router,
{"writer": "writer", END: END}
)
workflow.add_edge("writer", END)
app = workflow.compile()
# Run multi-agent workflow
result = app.invoke({
"messages": ["Research and write about AI trends"],
"user_id": "user_123",
"current_agent": "research",
"shared_memory": {}
})Benefits
🔄 Stateful Workflows
Persist agent state across workflow executions with Memorystack.
👥 Multi-Agent Memory
Agents can share and access common memory for collaboration.
💾 Persistent Context
Context persists across sessions and application restarts.
🎯 Semantic Memory
Automatically extract and store semantic information from agent interactions.
