POST /api/memory/settings Bearer

Update Memory Settings

Update PocketPaw's memory backend configuration: switch between file store and Mem0, configure LLM and embedder providers, set vector store options, and toggle auto-learning.

Overview

Updates the memory backend configuration. Changes take effect immediately — the memory manager is reloaded with the new settings.

Request Body

Body Parameters

memory_backend
string

Memory backend to use. file is the default file-based store, mem0 enables semantic memory with auto-learn.

Allowed values:
filemem0
mem0_llm_provider
string

LLM provider for mem0’s extraction and summarization.

Allowed values:
ollamaopenaianthropic
mem0_llm_model
string

LLM model name (e.g., llama3.2, gpt-4o-mini).

mem0_embedder_provider
string

Embedding provider for semantic search.

Allowed values:
ollamaopenai
mem0_embedder_model
string

Embedding model name (e.g., nomic-embed-text, text-embedding-3-small).

mem0_vector_store
string

Vector store backend.

Allowed values:
qdrant
mem0_ollama_base_url
string

Ollama server URL (default: http://localhost:11434).

mem0_auto_learn
boolean

Enable automatic memory extraction from conversations.

Response

status string
"ok" on success
Terminal window
curl -X POST "http://localhost:8000/api/memory/settings" \
-H "Authorization: Bearer <token>" \
-H "Content-Type: application/json" \
-d '{
"memory_backend": "mem0",
"mem0_llm_provider": "ollama",
"mem0_llm_model": "llama3.2",
"mem0_embedder_provider": "ollama",
"mem0_embedder_model": "nomic-embed-text",
"mem0_vector_store": "qdrant",
"mem0_auto_learn": true
}'
const response = await fetch("http://localhost:8000/api/memory/settings", {
method: "POST",
headers: {
"Authorization": "Bearer <token>",
"Content-Type": "application/json"
},
body: JSON.stringify({
memory_backend: "mem0",
mem0_llm_provider: "ollama",
mem0_llm_model: "llama3.2",
mem0_embedder_provider: "ollama",
mem0_embedder_model: "nomic-embed-text",
mem0_vector_store: "qdrant",
mem0_auto_learn: true
})
});
const data = await response.json();
console.log(data);
import requests
response = requests.post(
"http://localhost:8000/api/memory/settings",
headers={"Authorization": "Bearer <token>"},
json={
"memory_backend": "mem0",
"mem0_llm_provider": "ollama",
"mem0_llm_model": "llama3.2",
"mem0_embedder_provider": "ollama",
"mem0_embedder_model": "nomic-embed-text",
"mem0_vector_store": "qdrant",
"mem0_auto_learn": True
}
)
print(response.json())
{
"status": "ok"
}
Request
curl -X POST "http://localhost:8000/api/memory/settings" \
  -H "Content-Type: application/json" \
  -H "Authorization: Bearer <token>"
const response = await fetch("http://localhost:8000/api/memory/settings", {
  method: "POST",
  headers: {
    "Content-Type": "application/json",
    "Authorization": "Bearer <token>"
},
});

const data = await response.json();
console.log(data);
import requests

response = requests.post(
    "http://localhost:8000/api/memory/settings",
    headers={'Content-Type':'application/json','Authorization':'Bearer <token>'},
)

print(response.json())
package main

import (
    "fmt"
    "net/http"
    "io"
)

func main() {
    req, _ := http.NewRequest("POST", "http://localhost:8000/api/memory/settings", nil)
    req.Header.Set("Content-Type", "application/json")
    req.Header.Set("Authorization", "Bearer <token>")

    client := &http.Client{}
    resp, _ := client.Do(req)
    defer resp.Body.Close()

    body, _ := io.ReadAll(resp.Body)
    fmt.Println(string(body))
}
Response
Send a request to see the response