MemoryStackMemoryStack/Documentation

    Google Gemini Integration

    Add persistent memory to your Google Gemini applications. Build context-aware AI with Gemini Pro and Memorystack.

    ✓ Gemini Pro✓ Gemini Vision✓ Multimodal

    Installation

    npm install @google/generative-ai @memorystack/sdk

    Basic Integration

    import { GoogleGenerativeAI } from "@google/generative-ai";
    import { MemoryStackClient } from "@memorystack/sdk";
    
    // Initialize clients
    const genAI = new GoogleGenerativeAI(process.env.GOOGLE_API_KEY!);
    const memoryOS = new MemoryStackClient({
      apiKey: process.env.MEMORYSTACK_API_KEY!
    });
    
    async function chatWithMemory(userMessage: string, userId?: string) {
      // Get memories for context
      const memories = await memoryOS.listMemories({
        user_id: userId,
        limit: 5
      });
    
      const context = memories.results
        .map(m => `[${m.memory_type}] ${m.content}`)
        .join("\n");
    
      // Initialize Gemini
      const model = genAI.getGenerativeModel({ model: "gemini-pro" });
    
      // Create prompt with context
      const prompt = `Context about the user:\n${context}\n\nUser: ${userMessage}`;
    
      // Generate response
      const result = await model.generateContent(prompt);
      const response = result.response.text();
    
      // Save to Memorystack
      await memoryOS.addConversation(userMessage, response, userId);
    
      return response;
    }
    
    // Usage
    const response = await chatWithMemory(
      "What are my preferences?",
      "user_123"
    );
    console.log(response);

    Streaming with Memory

    async function streamChatWithMemory(userMessage: string, userId?: string) {
      // Get context
      const memories = await memoryOS.getUserMemories(userId, 5);
      const context = memories.results
        .map(m => `[${m.memory_type}] ${m.content}`)
        .join("\n");
    
      const model = genAI.getGenerativeModel({ model: "gemini-pro" });
      const prompt = `Context: ${context}\n\nUser: ${userMessage}`;
    
      // Stream response
      const result = await model.generateContentStream(prompt);
      
      let fullResponse = "";
      for await (const chunk of result.stream) {
        const text = chunk.text();
        fullResponse += text;
        process.stdout.write(text);
      }
    
      // Save after streaming
      await memoryOS.addConversation(userMessage, fullResponse, userId);
    
      return fullResponse;
    }

    Benefits

    🎨 Multimodal Memory

    Store memories from text, images, and other modalities.

    ⚡ Fast Context

    Quickly load relevant context for Gemini's large context window.

    💾 Persistent

    Conversations persist across sessions and API calls.

    🔍 Semantic Search

    Retrieve relevant memories using semantic similarity.