Quickstart
This guide will help you integrate Memory OS into your application in under 5 minutes. By the end, you'll be storing memories, searching semantically, and retrieving context for your LLM.
Prerequisites
- Node.js 18+ or Python 3.8+ (for SDK usage)
- A Memory OS API key
Step 1: Get Your API Key
Register for a free account to get your API key:
curl -X POST https://api.mymemoryos.com/api/v1/register \
-H "Content-Type: application/json" \
-d '{
"email": "you@example.com",
"name": "Your Name",
"organization": "Your Company"
}'Response:
{
"data": {
"message": "Account created successfully",
"tenant": {
"id": "abc123...",
"name": "Your Company",
"slug": "your-company-xxx"
},
"api_key": {
"id": "key_123...",
"key": "mos_live_xxxxxxxxxxxx",
"warning": "Store this key securely - it will not be shown again"
}
}
}Important: Save your API key immediately - it won't be shown again!
Step 2: Install the SDK
Choose your preferred language:
npm install @memory-os/sdkOr with yarn:
yarn add @memory-os/sdkpip install memoryosOr with poetry:
poetry add memoryosNo installation required. Use any HTTP client to call the API directly.
Base URL: https://api.mymemoryos.com/v1
Step 3: Create Your First Memory
Memories are the core data type in Memory OS. Each memory has content, a tier (short/medium/long), and optional metadata.
import { MemoryOS } from '@memory-os/sdk';
const client = new MemoryOS({
apiKey: process.env.MEMORY_OS_API_KEY
});
// Create a long-term memory about user preferences
const memory = await client.memories.create({
content: "User's name is Alex. They work as a software engineer at TechCorp and prefer concise responses.",
tier: "long",
content_type: "fact",
memory_nature: "semantic",
metadata: {
source: "onboarding",
user_id: "user_123"
}
});
console.log("Created memory:", memory.id);
// Created memory: 550e8400-e29b-41d4-a716-446655440000import os
from memoryos import MemoryOS
client = MemoryOS(api_key=os.environ["MEMORY_OS_API_KEY"])
# Create a long-term memory about user preferences
memory = client.memories.create(
content="User's name is Alex. They work as a software engineer at TechCorp and prefer concise responses.",
tier="long",
content_type="fact",
memory_nature="semantic",
metadata={
"source": "onboarding",
"user_id": "user_123"
}
)
print(f"Created memory: {memory['id']}")
# Created memory: 550e8400-e29b-41d4-a716-446655440000curl -X POST https://api.mymemoryos.com/v1/memories \
-H "Authorization: Bearer $MEMORY_OS_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"content": "User'\''s name is Alex. They work as a software engineer at TechCorp and prefer concise responses.",
"tier": "long",
"content_type": "fact",
"memory_nature": "semantic",
"metadata": {
"source": "onboarding",
"user_id": "user_123"
}
}'Response:
{
"success": true,
"data": {
"id": "550e8400-e29b-41d4-a716-446655440000",
"content": "User's name is Alex...",
"tier": "long",
"content_type": "fact",
"memory_nature": "semantic",
"relevance_score": 0.5,
"created_at": "2024-01-15T10:30:00Z"
}
}Step 4: Search Memories
Search uses semantic similarity to find relevant memories. You don't need exact keyword matches.
// Search for memories about the user
const results = await client.search({
query: "What do I know about the user's job?",
limit: 5,
threshold: 0.7 // Minimum similarity score
});
for (const result of results.results) {
console.log(`Score: ${result.combined_score.toFixed(2)}`);
console.log(`Content: ${result.content}`);
console.log("---");
}# Search for memories about the user
results = client.search(
query="What do I know about the user's job?",
limit=5,
threshold=0.7 # Minimum similarity score
)
for result in results["results"]:
print(f"Score: {result['combined_score']:.2f}")
print(f"Content: {result['content']}")
print("---")curl -X POST https://api.mymemoryos.com/v1/search \
-H "Authorization: Bearer $MEMORY_OS_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"query": "What do I know about the user'\''s job?",
"limit": 5,
"threshold": 0.7
}'Response:
{
"success": true,
"data": {
"results": [
{
"id": "550e8400-e29b-41d4-a716-446655440000",
"content": "User's name is Alex. They work as a software engineer...",
"similarity": 0.89,
"combined_score": 0.85,
"tier": "long",
"created_at": "2024-01-15T10:30:00Z"
}
],
"search_type": "semantic",
"threshold": 0.7
}
}Step 5: Get LLM Context
The context endpoint retrieves and formats memories for your LLM prompt, respecting token budgets.
// Get context for an LLM prompt
const context = await client.getContext({
query: "The user is asking about code review best practices",
max_tokens: 2000,
format: "text"
});
console.log(`Retrieved ${context.memories.length} memories`);
console.log(`Token count: ${context.token_count}`);
console.log(`Retrieval time: ${context.retrieval_time_ms}ms`);
// Use in your LLM prompt
const systemPrompt = `You are a helpful assistant. Here is context about the user:
${context.context}
Use this context to personalize your response.`;# Get context for an LLM prompt
context = client.get_context(
query="The user is asking about code review best practices",
max_tokens=2000,
format="text"
)
print(f"Retrieved {len(context['memories'])} memories")
print(f"Token count: {context['token_count']}")
print(f"Retrieval time: {context['retrieval_time_ms']}ms")
# Use in your LLM prompt
system_prompt = f"""You are a helpful assistant. Here is context about the user:
{context['context']}
Use this context to personalize your response."""curl -X POST https://api.mymemoryos.com/v1/context \
-H "Authorization: Bearer $MEMORY_OS_API_KEY" \
-H "Content-Type: application/json" \
-d '{
"query": "The user is asking about code review best practices",
"max_tokens": 2000,
"format": "text"
}'Response:
{
"success": true,
"data": {
"context": "User's name is Alex. They work as a software engineer at TechCorp and prefer concise responses.",
"memories": [
{
"id": "550e8400-e29b-41d4-a716-446655440000",
"content": "User's name is Alex...",
"tier": "long",
"score": 0.85
}
],
"token_count": 28,
"retrieval_time_ms": 45
}
}Complete Working Example
Here's a complete example showing how to build an AI assistant with memory:
import { MemoryOS } from '@memory-os/sdk';
import OpenAI from 'openai';
const memory = new MemoryOS({ apiKey: process.env.MEMORY_OS_API_KEY });
const openai = new OpenAI({ apiKey: process.env.OPENAI_API_KEY });
async function chat(userId, userMessage) {
// Store the user's message as short-term memory
await memory.memories.create({
content: `User said: "${userMessage}"`,
tier: "short",
content_type: "conversation",
memory_nature: "episodic",
metadata: { user_id: userId }
});
// Retrieve relevant context
const context = await memory.getContext({
query: userMessage,
max_tokens: 1500
});
// Generate response with context
const response = await openai.chat.completions.create({
model: "gpt-4",
messages: [
{
role: "system",
content: `You are a helpful assistant. Here's what you know about the user:\n\n${context.context}`
},
{ role: "user", content: userMessage }
]
});
const assistantMessage = response.choices[0].message.content;
// Store important information from the response
await memory.memories.create({
content: `Assistant helped user with: ${userMessage.slice(0, 100)}`,
tier: "medium",
content_type: "event",
memory_nature: "episodic",
metadata: { user_id: userId }
});
return assistantMessage;
}
// Usage
const response = await chat("user_123", "Can you help me with a code review?");
console.log(response);import os
from memoryos import MemoryOS
from openai import OpenAI
memory = MemoryOS(api_key=os.environ["MEMORY_OS_API_KEY"])
openai_client = OpenAI(api_key=os.environ["OPENAI_API_KEY"])
def chat(user_id: str, user_message: str) -> str:
# Store the user's message as short-term memory
memory.memories.create(
content=f'User said: "{user_message}"',
tier="short",
content_type="conversation",
memory_nature="episodic",
metadata={"user_id": user_id}
)
# Retrieve relevant context
context = memory.get_context(
query=user_message,
max_tokens=1500
)
# Generate response with context
response = openai_client.chat.completions.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": f"You are a helpful assistant. Here's what you know about the user:\n\n{context['context']}"
},
{"role": "user", "content": user_message}
]
)
assistant_message = response.choices[0].message.content
# Store important information from the response
memory.memories.create(
content=f"Assistant helped user with: {user_message[:100]}",
tier="medium",
content_type="event",
memory_nature="episodic",
metadata={"user_id": user_id}
)
return assistant_message
# Usage
response = chat("user_123", "Can you help me with a code review?")
print(response)Next Steps
Now that you have the basics, explore these topics: