"ok" on success/api/memory/settings BearerUpdate Memory Settings
Update PocketPaw's memory backend configuration: switch between file store and Mem0, configure LLM and embedder providers, set vector store options, and toggle auto-learning.
Overview
Updates the memory backend configuration. Changes take effect immediately — the memory manager is reloaded with the new settings.
Request Body
Body Parameters
memory_backendMemory backend to use. file is the default file-based store, mem0 enables semantic memory with auto-learn.
filemem0mem0_llm_providerLLM provider for mem0’s extraction and summarization.
ollamaopenaianthropicmem0_llm_modelLLM model name (e.g., llama3.2, gpt-4o-mini).
mem0_embedder_providerEmbedding provider for semantic search.
ollamaopenaimem0_embedder_modelEmbedding model name (e.g., nomic-embed-text, text-embedding-3-small).
mem0_vector_storeVector store backend.
qdrantmem0_ollama_base_urlOllama server URL (default: http://localhost:11434).
mem0_auto_learnEnable automatic memory extraction from conversations.
Response
status stringcurl -X POST "http://localhost:8000/api/memory/settings" \ -H "Authorization: Bearer <token>" \ -H "Content-Type: application/json" \ -d '{ "memory_backend": "mem0", "mem0_llm_provider": "ollama", "mem0_llm_model": "llama3.2", "mem0_embedder_provider": "ollama", "mem0_embedder_model": "nomic-embed-text", "mem0_vector_store": "qdrant", "mem0_auto_learn": true }'const response = await fetch("http://localhost:8000/api/memory/settings", { method: "POST", headers: { "Authorization": "Bearer <token>", "Content-Type": "application/json" }, body: JSON.stringify({ memory_backend: "mem0", mem0_llm_provider: "ollama", mem0_llm_model: "llama3.2", mem0_embedder_provider: "ollama", mem0_embedder_model: "nomic-embed-text", mem0_vector_store: "qdrant", mem0_auto_learn: true })});const data = await response.json();console.log(data);import requests
response = requests.post( "http://localhost:8000/api/memory/settings", headers={"Authorization": "Bearer <token>"}, json={ "memory_backend": "mem0", "mem0_llm_provider": "ollama", "mem0_llm_model": "llama3.2", "mem0_embedder_provider": "ollama", "mem0_embedder_model": "nomic-embed-text", "mem0_vector_store": "qdrant", "mem0_auto_learn": True })print(response.json()){ "status": "ok"}curl -X POST "http://localhost:8000/api/memory/settings" \
-H "Content-Type: application/json" \
-H "Authorization: Bearer <token>"const response = await fetch("http://localhost:8000/api/memory/settings", {
method: "POST",
headers: {
"Content-Type": "application/json",
"Authorization": "Bearer <token>"
},
});
const data = await response.json();
console.log(data);import requests
response = requests.post(
"http://localhost:8000/api/memory/settings",
headers={'Content-Type':'application/json','Authorization':'Bearer <token>'},
)
print(response.json())package main
import (
"fmt"
"net/http"
"io"
)
func main() {
req, _ := http.NewRequest("POST", "http://localhost:8000/api/memory/settings", nil)
req.Header.Set("Content-Type", "application/json")
req.Header.Set("Authorization", "Bearer <token>")
client := &http.Client{}
resp, _ := client.Do(req)
defer resp.Body.Close()
body, _ := io.ReadAll(resp.Body)
fmt.Println(string(body))
}