JavaScript SDK
The official JavaScript SDK for Memory OS provides a fully-typed client for storing, searching, and retrieving memories in your Node.js or browser applications.
Installation
Install the SDK using your preferred package manager:
Bash
npm install @memory-os/sdkOr with yarn:
Bash
yarn add @memory-os/sdkOr with pnpm:
Bash
pnpm add @memory-os/sdkQuick Start
JavaScript
import { MemoryOS } from '@memory-os/sdk';
// Initialize the client
const memory = new MemoryOS({
apiKey: process.env.MEMORY_OS_API_KEY
});
// Store a memory
const newMemory = await memory.memories.create({
content: "User prefers dark mode and TypeScript",
tier: "long",
content_type: "fact",
memory_nature: "semantic"
});
// Search for memories
const results = await memory.search({
query: "What are the user's preferences?",
threshold: 0.7
});
// Get context for an LLM
const context = await memory.getContext({
query: "Help the user with their request",
max_tokens: 2000
});
console.log(context.context);Client Configuration
Constructor Options
TypeScript
interface MemoryOSOptions {
apiKey: string; // Required: Your API key (starts with mos_)
baseUrl?: string; // Optional: API base URL (default: https://api.mymemoryos.com/v1)
timeout?: number; // Optional: Request timeout in ms (default: 30000)
maxRetries?: number; // Optional: Max retry attempts (default: 3)
headers?: Record<string, string>; // Optional: Additional headers
}Basic Configuration
JavaScript
import { MemoryOS } from '@memory-os/sdk';
const memory = new MemoryOS({
apiKey: process.env.MEMORY_OS_API_KEY
});Advanced Configuration
JavaScript
import { MemoryOS } from '@memory-os/sdk';
const memory = new MemoryOS({
apiKey: process.env.MEMORY_OS_API_KEY,
baseUrl: 'https://api.mymemoryos.com/v1',
timeout: 60000,
maxRetries: 5,
headers: {
'X-Custom-Header': 'value'
}
});API Reference
memories.create()
Create a new memory.
TypeScript
interface CreateMemoryParams {
content: string; // Required: Memory content
tier?: 'short' | 'medium' | 'long'; // Optional: Memory tier (default: 'short')
content_type?: string; // Optional: text, conversation, document, event, fact
memory_nature?: 'episodic' | 'semantic'; // Optional: Memory nature
parent_memory_id?: string; // Optional: Parent memory UUID
metadata?: Record<string, any>; // Optional: Custom metadata
external_id?: string; // Optional: Your external reference
source_id?: string; // Optional: Source system identifier
importance_score?: number; // Optional: Importance (0-1)
}
interface Memory {
id: string;
content: string;
tier: string;
content_type: string;
memory_nature: string;
relevance_score: number;
importance_score: number;
confidence_score: number;
decay_rate: number;
access_count: number;
last_accessed_at: string | null;
parent_memory_id: string | null;
metadata: Record<string, any>;
pii_detected: boolean;
pii_fields: string[];
created_at: string;
updated_at: string;
expires_at: string | null;
}
const memory: Memory = await client.memories.create(params);Example:
JavaScript
const memory = await client.memories.create({
content: "User is a senior developer at TechCorp, specializing in React and Node.js",
tier: "long",
content_type: "fact",
memory_nature: "semantic",
importance_score: 0.8,
metadata: {
source: "onboarding",
user_id: "user_123",
verified: true
}
});
console.log(`Created memory: ${memory.id}`);memories.list()
Retrieve a paginated list of memories.
TypeScript
interface ListMemoriesParams {
limit?: number; // Max results (default: 50, max: 100)
offset?: number; // Pagination offset (default: 0)
tier?: 'short' | 'medium' | 'long'; // Filter by tier
content_type?: string; // Filter by content type
memory_nature?: 'episodic' | 'semantic'; // Filter by nature
min_relevance?: number; // Minimum relevance score (0-1)
}
interface ListMemoriesResponse {
data: Memory[];
meta: {
total: number;
limit: number;
offset: number;
has_more: boolean;
};
}
const response: ListMemoriesResponse = await client.memories.list(params);Example:
JavaScript
// Get all long-term memories
const longTermMemories = await client.memories.list({
tier: "long",
limit: 50
});
console.log(`Found ${longTermMemories.meta.total} long-term memories`);
// Paginate through all memories
let offset = 0;
const limit = 50;
const allMemories = [];
do {
const page = await client.memories.list({ limit, offset });
allMemories.push(...page.data);
offset += limit;
} while (offset < page.meta.total);
console.log(`Retrieved ${allMemories.length} total memories`);memories.get()
Retrieve a single memory by ID. Automatically updates access statistics.
TypeScript
const memory: Memory = await client.memories.get(id: string);Example:
JavaScript
try {
const memory = await client.memories.get("550e8400-e29b-41d4-a716-446655440000");
console.log(`Memory content: ${memory.content}`);
console.log(`Access count: ${memory.access_count}`);
} catch (error) {
if (error.code === 'NOT_FOUND') {
console.log('Memory not found');
}
}memories.update()
Update an existing memory. If content is changed, the embedding is regenerated.
TypeScript
interface UpdateMemoryParams {
content?: string; // New content (triggers re-embedding)
tier?: 'short' | 'medium' | 'long'; // New tier
importance_score?: number; // New importance (0-1)
memory_nature?: 'episodic' | 'semantic'; // New nature
metadata?: Record<string, any>; // Replaces existing metadata
}
const memory: Memory = await client.memories.update(id: string, params);Example:
JavaScript
// Promote a memory to long-term
const updated = await client.memories.update("550e8400-e29b-41d4-a716-446655440000", {
tier: "long",
importance_score: 0.9,
metadata: {
verified: true,
promoted_at: new Date().toISOString()
}
});
console.log(`Updated memory tier: ${updated.tier}`);memories.delete()
Permanently delete a memory.
TypeScript
interface DeleteResponse {
deleted: boolean;
}
const result: DeleteResponse = await client.memories.delete(id: string);Example:
JavaScript
try {
const result = await client.memories.delete("550e8400-e29b-41d4-a716-446655440000");
if (result.deleted) {
console.log('Memory deleted successfully');
}
} catch (error) {
console.error('Failed to delete memory:', error.message);
}search()
Perform semantic search across memories.
TypeScript
interface SearchParams {
query?: string; // Natural language query
embedding?: number[]; // Pre-computed embedding (1536 dimensions)
threshold?: number; // Minimum similarity (default: 0.7)
limit?: number; // Max results (default: 20, max: 100)
tier?: 'short' | 'medium' | 'long'; // Filter by tier
memory_nature?: 'episodic' | 'semantic'; // Filter by nature
tags?: string[]; // Filter by tags
entities?: string[]; // Filter by entity IDs
}
interface SearchResult {
id: string;
content: string;
content_type: string;
tier: string;
memory_nature: string;
relevance_score: number;
similarity: number; // Vector similarity (0-1)
combined_score: number; // Weighted combination
metadata: Record<string, any>;
created_at: string;
}
interface SearchResponse {
results: SearchResult[];
search_type: 'semantic' | 'text';
threshold: number;
}
const response: SearchResponse = await client.search(params);Example:
JavaScript
// Basic semantic search
const results = await client.search({
query: "What programming languages does the user know?",
threshold: 0.7,
limit: 10
});
for (const result of results.results) {
console.log(`[${result.combined_score.toFixed(2)}] ${result.content}`);
}
// Filtered search
const recentResults = await client.search({
query: "current project",
tier: "short",
threshold: 0.6,
limit: 5
});
// Search with pre-computed embedding
const embedding = await getEmbeddingFromOpenAI("user preferences");
const embeddingResults = await client.search({
embedding,
threshold: 0.75
});getContext()
Retrieve formatted context for LLM prompt injection.
TypeScript
interface GetContextParams {
query: string; // Context query
max_tokens?: number; // Token budget (default: 4000)
tier?: 'short' | 'medium' | 'long'; // Filter by tier
format?: 'text' | 'json'; // Output format (default: 'text')
}
interface ContextMemory {
id: string;
content: string;
tier: string;
score: number;
}
interface ContextResponse {
context: string; // Formatted context string
memories: ContextMemory[]; // Source memories
token_count: number; // Estimated tokens
retrieval_time_ms: number; // Retrieval latency
}
const response: ContextResponse = await client.getContext(params);Example:
JavaScript
// Get context for LLM prompt
const context = await client.getContext({
query: "User is asking about React performance optimization",
max_tokens: 2000
});
console.log(`Retrieved ${context.memories.length} memories`);
console.log(`Token count: ${context.token_count}`);
// Use in LLM prompt
const systemPrompt = `You are a helpful assistant. Here is context about the user:
${context.context}
Use this context to personalize your response.`;
// Get tiered context
const shortTermContext = await client.getContext({
query: "current conversation",
tier: "short",
max_tokens: 500
});
const longTermContext = await client.getContext({
query: "user preferences and background",
tier: "long",
max_tokens: 1500
});
const fullSystemPrompt = `# Long-term Knowledge
${longTermContext.context}
# Current Session
${shortTermContext.context}`;TypeScript Types
The SDK includes full TypeScript type definitions. Import types as needed:
TypeScript
import {
MemoryOS,
Memory,
SearchResult,
ContextResponse,
CreateMemoryParams,
UpdateMemoryParams,
SearchParams,
GetContextParams,
MemoryOSError,
Tier,
MemoryNature,
ContentType
} from '@memory-os/sdk';
// Type definitions
type Tier = 'short' | 'medium' | 'long';
type MemoryNature = 'episodic' | 'semantic';
type ContentType = 'text' | 'conversation' | 'document' | 'event' | 'fact';
// Typed function example
async function storeUserPreference(
client: MemoryOS,
userId: string,
preference: string
): Promise<Memory> {
return client.memories.create({
content: preference,
tier: 'long',
content_type: 'fact',
memory_nature: 'semantic',
importance_score: 0.8,
metadata: { user_id: userId }
});
}Error Handling
The SDK throws MemoryOSError for API errors.
TypeScript
interface MemoryOSError extends Error {
code: string; // Error code
message: string; // Error message
status?: number; // HTTP status code
requestId?: string; // Request ID for debugging
retryAfter?: number; // Seconds to wait (for rate limits)
}Error Codes
| Code | HTTP Status | Description |
|---|---|---|
VALIDATION_ERROR | 400 | Invalid request parameters |
AUTHENTICATION_ERROR | 401 | Invalid or missing API key |
FORBIDDEN | 403 | Insufficient permissions |
NOT_FOUND | 404 | Resource not found |
RATE_LIMIT_EXCEEDED | 429 | Too many requests |
INTERNAL_ERROR | 500 | Server error |
Error Handling Example
JavaScript
import { MemoryOS, MemoryOSError } from '@memory-os/sdk';
const client = new MemoryOS({ apiKey: process.env.MEMORY_OS_API_KEY });
async function safeOperation() {
try {
const memory = await client.memories.create({
content: "User data",
tier: "long"
});
return memory;
} catch (error) {
if (error instanceof MemoryOSError) {
switch (error.code) {
case 'VALIDATION_ERROR':
console.error('Invalid input:', error.message);
break;
case 'AUTHENTICATION_ERROR':
console.error('Check your API key');
break;
case 'RATE_LIMIT_EXCEEDED':
console.log(`Rate limited. Retry after ${error.retryAfter}s`);
await sleep(error.retryAfter * 1000);
return safeOperation(); // Retry
break;
case 'NOT_FOUND':
console.error('Resource not found');
break;
default:
console.error('API error:', error.code, error.message);
}
} else {
console.error('Unexpected error:', error);
}
throw error;
}
}
function sleep(ms) {
return new Promise(resolve => setTimeout(resolve, ms));
}Retry with Exponential Backoff
JavaScript
async function withRetry(fn, maxRetries = 3, baseDelay = 1000) {
let lastError;
for (let attempt = 0; attempt < maxRetries; attempt++) {
try {
return await fn();
} catch (error) {
lastError = error;
// Don't retry validation or auth errors
if (error.code === 'VALIDATION_ERROR' ||
error.code === 'AUTHENTICATION_ERROR' ||
error.code === 'FORBIDDEN') {
throw error;
}
// Handle rate limiting
if (error.code === 'RATE_LIMIT_EXCEEDED' && error.retryAfter) {
await sleep(error.retryAfter * 1000);
continue;
}
// Exponential backoff for other errors
if (attempt < maxRetries - 1) {
const delay = baseDelay * Math.pow(2, attempt);
console.log(`Attempt ${attempt + 1} failed, retrying in ${delay}ms`);
await sleep(delay);
}
}
}
throw lastError;
}
// Usage
const memory = await withRetry(() =>
client.memories.create({
content: "Important data",
tier: "long"
})
);Complete Examples
Chat Application with Memory
JavaScript
import { MemoryOS } from '@memory-os/sdk';
import OpenAI from 'openai';
const memory = new MemoryOS({ apiKey: process.env.MEMORY_OS_API_KEY });
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
async function chat(userId, message) {
// Get relevant context
const context = await memory.getContext({
query: message,
max_tokens: 2000
});
// Generate response
const completion = await openai.chat.completions.create({
model: 'gpt-4',
messages: [
{
role: 'system',
content: `You are a helpful assistant. Context:\n\n${context.context}`
},
{ role: 'user', content: message }
]
});
const response = completion.choices[0].message.content;
// Store conversation turn
await memory.memories.create({
content: `User: ${message}\nAssistant: ${response}`,
tier: 'short',
content_type: 'conversation',
memory_nature: 'episodic',
metadata: { user_id: userId }
});
return response;
}Memory-Aware Recommendation System
JavaScript
import { MemoryOS } from '@memory-os/sdk';
const memory = new MemoryOS({ apiKey: process.env.MEMORY_OS_API_KEY });
async function getRecommendations(userId, category) {
// Get user preferences
const preferences = await memory.search({
query: `${userId} preferences ${category}`,
tier: 'long',
limit: 10,
threshold: 0.6
});
// Get recent interactions
const recent = await memory.search({
query: `${userId} ${category} interaction`,
tier: 'short',
limit: 5,
threshold: 0.5
});
// Build recommendation context
const userProfile = preferences.results.map(r => r.content).join('\n');
const recentActivity = recent.results.map(r => r.content).join('\n');
// Use this context with your recommendation engine
return {
preferences: userProfile,
recentActivity,
recommendationQuery: `Based on:\n${userProfile}\n\nRecent:\n${recentActivity}`
};
}
async function recordInteraction(userId, itemId, action) {
await memory.memories.create({
content: `User ${action} item ${itemId}`,
tier: 'short',
content_type: 'event',
memory_nature: 'episodic',
metadata: {
user_id: userId,
item_id: itemId,
action,
timestamp: new Date().toISOString()
}
});
}Memory Maintenance Script
JavaScript
import { MemoryOS } from '@memory-os/sdk';
const memory = new MemoryOS({ apiKey: process.env.MEMORY_OS_API_KEY });
async function promoteImportantMemories() {
// Find frequently accessed short-term memories
const shortTerm = await memory.memories.list({
tier: 'short',
limit: 100
});
for (const mem of shortTerm.data) {
if (mem.access_count > 5) {
// Promote to medium-term
await memory.memories.update(mem.id, {
tier: 'medium',
importance_score: Math.min(1, (mem.importance_score || 0.5) + 0.1)
});
console.log(`Promoted ${mem.id} to medium-term`);
}
}
}
async function cleanupLowRelevanceMemories() {
const memories = await memory.memories.list({
tier: 'short',
min_relevance: 0,
limit: 100
});
for (const mem of memories.data) {
if (mem.relevance_score < 0.1 && mem.access_count === 0) {
await memory.memories.delete(mem.id);
console.log(`Deleted low-relevance memory ${mem.id}`);
}
}
}
// Run maintenance
await promoteImportantMemories();
await cleanupLowRelevanceMemories();