MemoryStackMemoryStack/Documentation

    Amazon Bedrock Integration

    Add persistent memory to your Amazon Bedrock applications. Build context-aware AI with Claude, Llama, and other Bedrock models.

    ✓ Claude✓ Llama✓ Titan

    Installation

    npm install @aws-sdk/client-bedrock-runtime @memorystack/sdk

    Claude with Memory

    import { 
      BedrockRuntimeClient, 
      InvokeModelCommand 
    } from "@aws-sdk/client-bedrock-runtime";
    import { MemoryStackClient } from "@memorystack/sdk";
    
    // Initialize clients
    const bedrock = new BedrockRuntimeClient({ 
      region: "us-east-1" 
    });
    
    const memoryOS = new MemoryStackClient({
      apiKey: process.env.MEMORYSTACK_API_KEY!
    });
    
    async function chatWithClaude(userMessage: string, userId?: string) {
      // Get memories for context
      const memories = await memoryOS.listMemories({
        user_id: userId,
        limit: 5
      });
    
      const context = memories.results
        .map(m => `[${m.memory_type}] ${m.content}`)
        .join("\n");
    
      // Prepare Claude request
      const payload = {
        anthropic_version: "bedrock-2023-05-31",
        max_tokens: 1000,
        messages: [
          {
            role: "user",
            content: `Context about the user:\n${context}\n\nUser: ${userMessage}`
          }
        ]
      };
    
      // Invoke Claude
      const command = new InvokeModelCommand({
        modelId: "anthropic.claude-3-sonnet-20240229-v1:0",
        contentType: "application/json",
        accept: "application/json",
        body: JSON.stringify(payload)
      });
    
      const response = await bedrock.send(command);
      const result = JSON.parse(new TextDecoder().decode(response.body));
      const aiResponse = result.content[0].text;
    
      // Save to Memorystack
      await memoryOS.addConversation(userMessage, aiResponse, userId);
    
      return aiResponse;
    }
    
    // Usage
    const response = await chatWithClaude(
      "What are my preferences?",
      "user_123"
    );
    console.log(response);

    Streaming with Bedrock

    import { InvokeModelWithResponseStreamCommand } from "@aws-sdk/client-bedrock-runtime";
    
    async function streamChatWithClaude(userMessage: string, userId?: string) {
      // Get context
      const memories = await memoryOS.getUserMemories(userId, 5);
      const context = memories.results
        .map(m => `[${m.memory_type}] ${m.content}`)
        .join("\n");
    
      const payload = {
        anthropic_version: "bedrock-2023-05-31",
        max_tokens: 1000,
        messages: [
          {
            role: "user",
            content: `Context: ${context}\n\nUser: ${userMessage}`
          }
        ]
      };
    
      // Stream response
      const command = new InvokeModelWithResponseStreamCommand({
        modelId: "anthropic.claude-3-sonnet-20240229-v1:0",
        contentType: "application/json",
        accept: "application/json",
        body: JSON.stringify(payload)
      });
    
      const response = await bedrock.send(command);
      let fullResponse = "";
    
      if (response.body) {
        for await (const event of response.body) {
          if (event.chunk) {
            const chunk = JSON.parse(new TextDecoder().decode(event.chunk.bytes));
            if (chunk.type === "content_block_delta") {
              const text = chunk.delta.text;
              fullResponse += text;
              process.stdout.write(text);
            }
          }
        }
      }
    
      // Save after streaming
      await memoryOS.addConversation(userMessage, fullResponse, userId);
    
      return fullResponse;
    }

    Benefits

    🏢 Enterprise Ready

    Combine AWS Bedrock's enterprise features with persistent memory.

    🔒 Secure

    Keep data secure with AWS infrastructure and Memorystack encryption.

    🎯 Model Choice

    Works with Claude, Llama, Titan, and all Bedrock models.

    💾 Persistent Context

    Conversations persist across sessions and regions.