Loading...
Loading...
Fully local multi-agent swarm intelligence simulation engine using Neo4j + Ollama for public opinion, market sentiment, and social dynamics prediction.
npx skill4agent add aradotso/trending-skills mirofish-offline-simulationSkill by ara.so — Daily 2026 Skills collection.
Document Input
│
▼
Graph Build (NER + relationship extraction via Ollama LLM)
│
▼
Neo4j Knowledge Graph (entities, relations, embeddings via nomic-embed-text)
│
▼
Env Setup (generate hundreds of agent personas with personalities + memory)
│
▼
Simulation (agents post, reply, argue, shift opinions on simulated platforms)
│
▼
Report (ReportAgent interviews focus group, queries graph, generates analysis)
│
▼
Interaction (chat with any individual agent, full memory persists)/v1nomic-embed-textgit clone https://github.com/nikmcfly/MiroFish-Offline.git
cd MiroFish-Offline
cp .env.example .env
# Start Neo4j + Ollama + MiroFish backend + frontend
docker compose up -d
# Pull required models into the Ollama container
docker exec mirofish-ollama ollama pull qwen2.5:32b
docker exec mirofish-ollama ollama pull nomic-embed-text
# Check all services are healthy
docker compose pshttp://localhost:3000docker run -d --name neo4j \
-p 7474:7474 -p 7687:7687 \
-e NEO4J_AUTH=neo4j/mirofish \
neo4j:5.15-communityollama serve &
ollama pull qwen2.5:32b # Main LLM (~20GB, requires 24GB VRAM)
ollama pull qwen2.5:14b # Lighter option (~10GB VRAM)
ollama pull nomic-embed-text # Embeddings (small, fast)cp .env.example .env
# Edit .env (see Configuration section)
cd backend
pip install -r requirements.txt
python run.py
# Backend starts on http://localhost:5000cd frontend
npm install
npm run dev
# Frontend starts on http://localhost:3000.env# ── LLM (Ollama OpenAI-compatible endpoint) ──────────────────────────
LLM_API_KEY=ollama
LLM_BASE_URL=http://localhost:11434/v1
LLM_MODEL_NAME=qwen2.5:32b
# ── Neo4j ─────────────────────────────────────────────────────────────
NEO4J_URI=bolt://localhost:7687
NEO4J_USER=neo4j
NEO4J_PASSWORD=mirofish
# ── Embeddings (Ollama) ───────────────────────────────────────────────
EMBEDDING_MODEL=nomic-embed-text
EMBEDDING_BASE_URL=http://localhost:11434
# ── Optional: swap Ollama for any OpenAI-compatible provider ─────────
# LLM_API_KEY=$OPENAI_API_KEY
# LLM_BASE_URL=https://api.openai.com/v1
# LLM_MODEL_NAME=gpt-4ofrom backend.storage.base import GraphStorage
from backend.storage.neo4j_storage import Neo4jStorage
# Initialize storage (typically done via Flask app.extensions)
storage = Neo4jStorage(
uri=os.environ["NEO4J_URI"],
user=os.environ["NEO4J_USER"],
password=os.environ["NEO4J_PASSWORD"],
embedding_model=os.environ["EMBEDDING_MODEL"],
embedding_base_url=os.environ["EMBEDDING_BASE_URL"],
llm_base_url=os.environ["LLM_BASE_URL"],
llm_api_key=os.environ["LLM_API_KEY"],
llm_model=os.environ["LLM_MODEL_NAME"],
)from backend.services.graph_builder import GraphBuilder
builder = GraphBuilder(storage=storage)
# Feed a document string
with open("press_release.txt", "r") as f:
document_text = f.read()
# Extract entities + relationships, store in Neo4j
graph_id = builder.build(
content=document_text,
title="Q4 Earnings Report",
source_type="financial_report",
)
print(f"Graph built: {graph_id}")
# Returns a graph_id used for subsequent simulation runsfrom backend.services.simulation import SimulationService
sim = SimulationService(storage=storage)
# Create a simulation environment from an existing graph
sim_id = sim.create_environment(
graph_id=graph_id,
agent_count=200, # Number of agents to generate
simulation_hours=24, # Simulated time span
platform="twitter", # "twitter" | "reddit" | "weibo"
)
# Run the simulation (blocking — use async wrapper for production)
result = sim.run(sim_id=sim_id)
print(f"Simulation complete. Posts generated: {result['post_count']}")
print(f"Sentiment trajectory: {result['sentiment_over_time']}")from backend.services.report import ReportAgent
report_agent = ReportAgent(storage=storage)
# Generate a structured analysis report
report = report_agent.generate(
sim_id=sim_id,
focus_group_size=10, # Number of agents to interview
include_graph_search=True,
)
print(report["summary"])
print(report["key_narratives"])
print(report["sentiment_shift"])
print(report["influential_agents"])from backend.services.agent_chat import AgentChatService
chat = AgentChatService(storage=storage)
# List agents from a completed simulation
agents = chat.list_agents(sim_id=sim_id, limit=10)
agent_id = agents[0]["id"]
print(f"Chatting with: {agents[0]['persona']['name']}")
print(f"Personality: {agents[0]['persona']['traits']}")
# Send a message — agent responds in-character with full memory
response = chat.send(
agent_id=agent_id,
message="Why did you post that criticism about the earnings report?",
)
print(response["reply"])
# → Agent responds using its personality, opinion bias, and post historyfrom backend.services.search import SearchService
search = SearchService(storage=storage)
# Hybrid search: 0.7 * vector similarity + 0.3 * BM25
results = search.query(
text="executive compensation controversy",
graph_id=graph_id,
top_k=5,
vector_weight=0.7,
bm25_weight=0.3,
)
for r in results:
print(r["entity"], r["relationship"], r["score"])from backend.storage.base import GraphStorage
from typing import List, Dict, Any
class MyCustomStorage(GraphStorage):
"""
Swap Neo4j for any graph DB by implementing this interface.
Register via Flask app.extensions['neo4j_storage'] = MyCustomStorage(...)
"""
def store_entity(self, entity: Dict[str, Any]) -> str:
# Store entity, return entity_id
raise NotImplementedError
def store_relationship(
self,
source_id: str,
target_id: str,
relation_type: str,
properties: Dict[str, Any],
) -> str:
raise NotImplementedError
def vector_search(
self, embedding: List[float], top_k: int = 5
) -> List[Dict[str, Any]]:
raise NotImplementedError
def keyword_search(
self, query: str, top_k: int = 5
) -> List[Dict[str, Any]]:
raise NotImplementedError
def get_agent_memory(self, agent_id: str) -> Dict[str, Any]:
raise NotImplementedError
def update_agent_memory(
self, agent_id: str, memory_update: Dict[str, Any]
) -> None:
raise NotImplementedError# backend/app.py — how storage is wired via dependency injection
from flask import Flask
from backend.storage.neo4j_storage import Neo4jStorage
import os
def create_app():
app = Flask(__name__)
# Single storage instance, injected everywhere via app.extensions
storage = Neo4jStorage(
uri=os.environ["NEO4J_URI"],
user=os.environ["NEO4J_USER"],
password=os.environ["NEO4J_PASSWORD"],
embedding_model=os.environ["EMBEDDING_MODEL"],
embedding_base_url=os.environ["EMBEDDING_BASE_URL"],
llm_base_url=os.environ["LLM_BASE_URL"],
llm_api_key=os.environ["LLM_API_KEY"],
llm_model=os.environ["LLM_MODEL_NAME"],
)
app.extensions["neo4j_storage"] = storage
from backend.routes import graph_bp, simulation_bp, report_bp
app.register_blueprint(graph_bp)
app.register_blueprint(simulation_bp)
app.register_blueprint(report_bp)
return appfrom flask import Blueprint, current_app, request, jsonify
simulation_bp = Blueprint("simulation", __name__)
@simulation_bp.route("/api/simulation/run", methods=["POST"])
def run_simulation():
storage = current_app.extensions["neo4j_storage"]
data = request.json
sim = SimulationService(storage=storage)
sim_id = sim.create_environment(
graph_id=data["graph_id"],
agent_count=data.get("agent_count", 200),
simulation_hours=data.get("simulation_hours", 24),
)
result = sim.run(sim_id=sim_id)
return jsonify(result)| Method | Endpoint | Description |
|---|---|---|
| | Upload document, build knowledge graph |
| | Get graph entities and relationships |
| | Create simulation environment |
| | Execute simulation |
| | Get posts, sentiment, metrics |
| | List generated agents |
| | Generate ReportAgent analysis |
| | Chat with a specific agent |
| | Hybrid search the knowledge graph |
curl -X POST http://localhost:5000/api/graph/build \
-H "Content-Type: application/json" \
-d '{
"content": "Acme Corp announces record Q4 earnings, CFO resigns...",
"title": "Q4 Press Release",
"source_type": "press_release"
}'
# → {"graph_id": "g_abc123", "entities": 47, "relationships": 89}curl -X POST http://localhost:5000/api/simulation/run \
-H "Content-Type: application/json" \
-d '{
"graph_id": "g_abc123",
"agent_count": 150,
"simulation_hours": 12,
"platform": "twitter"
}'
# → {"sim_id": "s_xyz789", "status": "running"}| Use Case | Model | VRAM | RAM |
|---|---|---|---|
| Quick test / dev | | 6 GB | 16 GB |
| Balanced quality | | 10 GB | 16 GB |
| Production quality | | 24 GB | 32 GB |
| CPU-only (slow) | | None | 16 GB |
.envLLM_MODEL_NAME=qwen2.5:14bimport os
from backend.storage.neo4j_storage import Neo4jStorage
from backend.services.graph_builder import GraphBuilder
from backend.services.simulation import SimulationService
from backend.services.report import ReportAgent
storage = Neo4jStorage(
uri=os.environ["NEO4J_URI"],
user=os.environ["NEO4J_USER"],
password=os.environ["NEO4J_PASSWORD"],
embedding_model=os.environ["EMBEDDING_MODEL"],
embedding_base_url=os.environ["EMBEDDING_BASE_URL"],
llm_base_url=os.environ["LLM_BASE_URL"],
llm_api_key=os.environ["LLM_API_KEY"],
llm_model=os.environ["LLM_MODEL_NAME"],
)
def test_press_release(text: str) -> dict:
# 1. Build knowledge graph
builder = GraphBuilder(storage=storage)
graph_id = builder.build(content=text, title="Draft PR", source_type="press_release")
# 2. Simulate public reaction
sim = SimulationService(storage=storage)
sim_id = sim.create_environment(graph_id=graph_id, agent_count=300, simulation_hours=48)
sim.run(sim_id=sim_id)
# 3. Generate report
report = ReportAgent(storage=storage).generate(sim_id=sim_id, focus_group_size=15)
return {
"sentiment_peak": report["sentiment_over_time"][0],
"key_narratives": report["key_narratives"],
"risk_score": report["risk_score"],
"recommended_edits": report["recommendations"],
}
# Usage
with open("draft_announcement.txt") as f:
result = test_press_release(f.read())
print(f"Risk score: {result['risk_score']}/10")
print(f"Top narrative: {result['key_narratives'][0]}")# Claude via Anthropic (or any proxy)
LLM_API_KEY=$ANTHROPIC_API_KEY
LLM_BASE_URL=https://api.anthropic.com/v1
LLM_MODEL_NAME=claude-3-5-sonnet-20241022
# OpenAI
LLM_API_KEY=$OPENAI_API_KEY
LLM_BASE_URL=https://api.openai.com/v1
LLM_MODEL_NAME=gpt-4o
# Local LM Studio
LLM_API_KEY=lm-studio
LLM_BASE_URL=http://localhost:1234/v1
LLM_MODEL_NAME=your-loaded-model# Check Neo4j is running
docker ps | grep neo4j
# Check bolt port
nc -zv localhost 7687
# View Neo4j logs
docker logs neo4j --tail 50# List available models
ollama list
# Pull missing models
ollama pull qwen2.5:32b
ollama pull nomic-embed-text
# Check Ollama is serving
curl http://localhost:11434/api/tags# Switch to smaller model in .env
LLM_MODEL_NAME=qwen2.5:14b # or qwen2.5:7b
# Restart backend
cd backend && python run.py# nomic-embed-text produces 768-dim vectors
# If you switch embedding models, drop and recreate the Neo4j vector index:
# In Neo4j browser (http://localhost:7474):
# DROP INDEX entity_embedding IF EXISTS;
# Then restart MiroFish — it recreates the index with correct dimensions.# docker-compose.yml — add GPU reservation:
services:
ollama:
deploy:
resources:
reservations:
devices:
- driver: nvidia
count: 1
capabilities: [gpu]qwen2.5:7bagent_countsimulation_hours# Check VITE_API_BASE_URL in frontend/.env
VITE_API_BASE_URL=http://localhost:5000
# Verify backend is up
curl http://localhost:5000/api/healthMiroFish-Offline/
├── backend/
│ ├── run.py # Entry point
│ ├── app.py # Flask factory, DI wiring
│ ├── storage/
│ │ ├── base.py # GraphStorage abstract interface
│ │ └── neo4j_storage.py # Neo4j implementation
│ ├── services/
│ │ ├── graph_builder.py # NER + relationship extraction
│ │ ├── simulation.py # Agent simulation engine
│ │ ├── report.py # ReportAgent + focus group
│ │ ├── agent_chat.py # Per-agent chat interface
│ │ └── search.py # Hybrid vector + BM25 search
│ └── routes/
│ ├── graph.py
│ ├── simulation.py
│ └── report.py
├── frontend/ # Vue 3 (fully English UI)
├── docker-compose.yml
├── .env.example
└── README.md