MemoryStackMemoryStack/Documentation

    Vercel AI SDK Integration

    Add persistent memory to your Vercel AI SDK applications. Build streaming AI interfaces with long-term memory capabilities.

    ✓ Vercel AI SDK✓ Streaming Support✓ React Hooks

    Installation

    npm install ai @memorystack/sdk

    API Route with Memory

    Create an API route that uses Memorystack with Vercel AI SDK:

    // app/api/chat/route.ts
    import { OpenAIStream, StreamingTextResponse } from 'ai';
    import { Configuration, OpenAIApi } from 'openai-edge';
    import { MemoryStackClient } from '@memorystack/sdk';
    
    const config = new Configuration({
      apiKey: process.env.OPENAI_API_KEY
    });
    const openai = new OpenAIApi(config);
    
    const memoryOS = new MemoryStackClient({
      apiKey: process.env.MEMORYSTACK_API_KEY!
    });
    
    export async function POST(req: Request) {
      const { messages, userId } = await req.json();
      
      // Get memories for context
      const memories = await memoryOS.listMemories({
        user_id: userId,
        limit: 5
      });
      
      // Build context from memories
      const context = memories.results
        .map(m => `[${m.memory_type}] ${m.content}`)
        .join('\n');
      
      // Add context to system message
      const messagesWithContext = [
        {
          role: 'system',
          content: `You are a helpful assistant. Context:\n${context}`
        },
        ...messages
      ];
      
      // Stream response
      const response = await openai.createChatCompletion({
        model: 'gpt-4',
        stream: true,
        messages: messagesWithContext
      });
      
      const stream = OpenAIStream(response, {
        async onCompletion(completion) {
          // Save conversation to Memorystack after streaming
          const lastMessage = messages[messages.length - 1];
          await memoryOS.addConversation(
            lastMessage.content,
            completion,
            userId
          );
        }
      });
      
      return new StreamingTextResponse(stream);
    }

    React Component

    Use the useChat hook with Memorystack:

    'use client';
    
    import { useChat } from 'ai/react';
    
    export default function Chat() {
      const { messages, input, handleInputChange, handleSubmit } = useChat({
        api: '/api/chat',
        body: {
          userId: 'user_123' // Pass user ID for memory isolation
        }
      });
    
      return (
        <div className="flex flex-col h-screen">
          {/* Messages */}
          <div className="flex-1 overflow-y-auto p-4">
            {messages.map(m => (
              <div key={m.id} className="mb-4">
                <div className="font-semibold">
                  {m.role === 'user' ? 'You' : 'AI'}
                </div>
                <div className="text-gray-700">{m.content}</div>
              </div>
            ))}
          </div>
    
          {/* Input */}
          <form onSubmit={handleSubmit} className="p-4 border-t">
            <input
              value={input}
              onChange={handleInputChange}
              placeholder="Say something..."
              className="w-full p-2 border rounded"
            />
          </form>
        </div>
      );
    }

    Benefits

    ⚡ Streaming Support

    Memories are saved after streaming completes, providing instant responses.

    🎨 React Hooks

    Works seamlessly with useChat and useCompletion hooks.

    🚀 Edge Runtime

    Compatible with Vercel Edge Runtime for global low-latency.

    💾 Persistent Context

    Conversations persist across sessions and page reloads.