Many updates including various AI model type support

This commit is contained in:
2025-11-26 09:47:53 +09:00
parent bd38924133
commit 0afe3f49ff
71 changed files with 2573 additions and 156 deletions

View File

@@ -1,7 +1,6 @@
# agents/collaboration_engine.py
import time
from agent_registry import get_agent
from agents.role_registry import role_registry
from memory.episodic_store import episodic_store
@@ -12,7 +11,6 @@ from tenants.rbac_guard import enforce_rbac
from agents.agent_messenger import agent_messenger # NEW
from agents.shared_goal_registry import shared_goal_registry
class CollaborationEngine:
def __init__(self):
self.trace = []

View File

@@ -8,6 +8,8 @@ from agents.goal_store import goal_store
from agents.notification_center import notification_center
from agents.agent_core import sandbox_agent_run_with_tools
##INFO: for 'goal_heatmap'
from monitoring.goal_heatmap import goal_heatmap # ⬅️ Add this import
class GoalSession:
def __init__(self, tenant_id: str, goal: str, roles: list):
@@ -55,6 +57,15 @@ class GoalSession:
self.execute_tasks()
goal_store.save_session(self.tenant_id, self.session_id, self.goal, self.tasks, self.trace)
# ⬇️ Log successful completions for heatmap
for entry in self.trace:
goal_heatmap.log(
tenant_id=self.tenant_id,
agent_role=entry["role"],
goal_id=self.session_id,
success=True # You can refine this if needed
)
return {
"session_id": self.session_id,
"goal": self.goal,

View File

@@ -0,0 +1,6 @@
{
"planner": "Cluster A",
"retriever": "Cluster B",
"summarizer": "Cluster B",
"default": "Unclassified"
}

12
db/database.py Normal file
View File

@@ -0,0 +1,12 @@
# db/models.py
from sqlalchemy import Column, Integer, String, JSON, DateTime, func
from db.database import Base
class Memory(Base):
__tablename__ = "memories"
id = Column(Integer, primary_key=True, index=True)
transcript = Column(String, nullable=False)
response = Column(JSON, nullable=False)
emotion = Column(JSON, nullable=False)
timestamp = Column(DateTime(timezone=True), server_default=func.now())

37
db/memory_repository.py Normal file
View File

@@ -0,0 +1,37 @@
# db/memory_repository.py
from db.database import SessionLocal
from db.models import Memory
from vector_store.base import get_vector_store
vector_store = get_vector_store()
def save_memory(transcript: str, response: dict, emotion: dict):
db = SessionLocal()
memory = Memory(transcript=transcript, response=response, emotion=emotion)
db.add(memory)
db.commit()
db.refresh(memory)
# Add transcript to vector store
vector_store.add_document(transcript)
db.close()
return memory
def get_memories(limit: int = 50):
db = SessionLocal()
memories = db.query(Memory).order_by(Memory.timestamp.desc()).limit(limit).all()
db.close()
return memories
def search_memories(query: str, k: int = 5):
docs = vector_store.query(query, k=k)
# Map vector results back to Memory rows
db = SessionLocal()
results = []
for doc in docs:
mem = db.query(Memory).filter(Memory.transcript == doc.page_content).first()
if mem:
results.append(mem)
db.close()
return results

0
db/models.py Normal file
View File

10
dependencies/goal_heatmap_provider.py vendored Normal file
View File

@@ -0,0 +1,10 @@
# dependencies/goal_heatmap_provider.py
import os
from monitoring.goal_heatmap import get_goal_heatmap
from database import get_db # your SQLAlchemy session factory
from fastapi import Depends
def provide_goal_heatmap(db=Depends(get_db)):
backend = os.getenv("GOAL_HEATMAP_BACKEND", "memory")
return get_goal_heatmap(db_session=db if backend == "sql" else None)

View File

@@ -4,6 +4,7 @@
class PolicyRegistry:
def __init__(self):
self.policies = {} # {tenant_id: {allowed_roles, restricted_tasks, audit_level}}
self.global_policies = self._load_global_policies()
def set_policy(self, tenant_id: str, allowed_roles: list, restricted_tasks: list, audit_level: str = "standard"):
self.policies[tenant_id] = {
@@ -23,4 +24,40 @@ class PolicyRegistry:
def get_all(self):
return self.policies
def _load_global_policies(self):
return {
"max_goal_execution_time": {
"description": "Maximum time allowed for goal execution",
"type": "duration",
"default": "5m",
"enforced": True
},
"agent_invocation_limit": {
"description": "Max number of agent invocations per hour",
"type": "integer",
"default": 100,
"enforced": True
},
"memory_access_scope": {
"description": "Defines which memory types a role can access",
"type": "enum",
"options": ["episodic", "semantic", "graph"],
"default": ["episodic", "semantic"],
"enforced": True
},
"plugin_access_level": {
"description": "Controls access to plugin marketplace",
"type": "enum",
"options": ["none", "read", "install"],
"default": "read",
"enforced": False
}
}
def get_global_policy(self, name: str):
return self.global_policies.get(name)
def list_global_policies(self):
return list(self.global_policies.keys())
policy_registry = PolicyRegistry()

View File

@@ -1,10 +1,12 @@
# governance/sla_monitor.py
import time
from datetime import datetime
class SLAMonitor:
def __init__(self):
self.violations = []
self.violations = [] # SLA breaches
self.agent_metrics = {} # {agent_role: {uptime, latency, goals}}
def check_sla(self, agent_role: str, task: str, latency: float, output: str, sla):
breach = latency > sla["max_latency"] or sla["success_criteria"] not in output
@@ -17,12 +19,39 @@ class SLAMonitor:
"breach": breach
}
self.violations.append(record)
self._log_metrics(agent_role, latency, task, output, not breach)
return record
def _log_metrics(self, agent_role: str, latency: float, task: str, output: str, success: bool):
metrics = self.agent_metrics.setdefault(agent_role, {
"uptime": [],
"latency": [],
"goals": []
})
metrics["uptime"].append(datetime.utcnow())
metrics["latency"].append(latency)
metrics["goals"].append({
"task": task,
"output": output,
"success": success,
"timestamp": datetime.utcnow()
})
def get_violations(self):
return self.violations
def get_by_agent(self, agent_role: str):
return [v for v in self.violations if v["agent"] == agent_role]
def get_summary(self, agent_role: str):
metrics = self.agent_metrics.get(agent_role, {})
return {
"uptime_count": len(metrics.get("uptime", [])),
"avg_latency_ms": self._average(metrics.get("latency", [])),
"goals_completed": sum(1 for g in metrics.get("goals", []) if g["success"])
}
def _average(self, values):
return sum(values) / len(values) if values else 0
sla_monitor = SLAMonitor()

View File

@@ -0,0 +1,34 @@
name: agentic_mobile
description: Agentic control and voice interface for mobile
# ✅ All packages are compatible with Flutter 3.x
# and support Android/iOS.
environment:
sdk: ">=3.0.0 <4.0.0"
dependencies:
flutter:
sdk: flutter
# 🎙️ Voice input
speech_to_text: ^5.5.0
# 🔊 Audio playback
audioplayers: ^5.2.0
# 📈 Static waveform rendering
waveform_flutter: ^0.2.0
# 🔴 Live waveform during recording
audio_waveforms: ^1.0.0
path_provider: ^2.1.1
permission_handler: ^11.0.0
# 🌐 HTTP requests
http: ^1.2.0
dev_dependencies:
flutter_test:
sdk: flutter

17
models/base_model.py Normal file
View File

@@ -0,0 +1,17 @@
# models/base_model.py
from abc import ABC, abstractmethod
class BaseModel(ABC):
"""Abstract interface for all local AI models."""
@abstractmethod
def generate(self, prompt: str) -> str:
pass
@abstractmethod
def embed(self, text: str) -> list[float]:
pass
@abstractmethod
def analyze(self, input_data: dict) -> dict:
pass

View File

@@ -0,0 +1,16 @@
# models/embedding_handler.py
from models.base_model import BaseModel
from models.embedding_loader import get_embedding_by_name
class EmbeddingHandler(BaseModel):
def __init__(self, engine: str):
self.model = get_embedding_by_name(engine)
def generate(self, prompt: str) -> str:
return "[Embedding models do not generate text]"
def embed(self, text: str) -> list[float]:
return self.model.embed_query(text)
def analyze(self, input_data: dict) -> dict:
return {"analysis": "Embedding only"}

View File

@@ -19,6 +19,3 @@ def get_embedding_by_name(name):
return GPT4AllEmbeddings(model_name="nomic-embed-text-v1.5")
else:
raise ValueError(f"Unsupported embedding engine: {name}")

17
models/goal_completion.py Normal file
View File

@@ -0,0 +1,17 @@
# models/goal_completion.py
from sqlalchemy import Column, String, DateTime, Boolean
from database import Base
import uuid
from datetime import datetime
class GoalCompletion(Base):
__tablename__ = "goal_completions"
id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()))
tenant_id = Column(String, nullable=False)
agent = Column(String, nullable=False)
goal_id = Column(String, nullable=False)
timestamp = Column(DateTime, default=datetime.utcnow)
success = Column(Boolean, default=True)
reward = Column(Float, default=1.0)

15
models/lam_handler.py Normal file
View File

@@ -0,0 +1,15 @@
# models/lam_handler.py
from models.base_model import BaseModel
class LAMHandler(BaseModel):
def __init__(self):
pass
def generate(self, prompt: str) -> str:
return "[LAM aligned output stub]"
def embed(self, text: str) -> list[float]:
return []
def analyze(self, input_data: dict) -> dict:
return {"analysis": "LAM stub"}

15
models/lcm_handler.py Normal file
View File

@@ -0,0 +1,15 @@
# models/lcm_handler.py
from models.base_model import BaseModel
class LCMHandler(BaseModel):
def __init__(self):
pass
def generate(self, prompt: str) -> str:
return "[LCM fast generation stub]"
def embed(self, text: str) -> list[float]:
return []
def analyze(self, input_data: dict) -> dict:
return {"analysis": "LCM stub"}

17
models/llm_handler.py Normal file
View File

@@ -0,0 +1,17 @@
# models/llm_handler.py
from models.base_model import BaseModel
from models.llm_loader import get_llm_by_name
class LLMHandler(BaseModel):
def __init__(self, engine: str):
self.model = get_llm_by_name(engine)
def generate(self, prompt: str) -> str:
return self.model(prompt)
def embed(self, text: str) -> list[float]:
# fallback: use embedding model
return []
def analyze(self, input_data: dict) -> dict:
return {"analysis": "LLM not specialized"}

View File

@@ -14,7 +14,6 @@ def get_llm():
else:
raise ValueError(f"Unsupported LLM engine: {LLM_ENGINE}")
def get_llm_by_name(name):
if name == "ollama":
return Ollama(model="llama2")
@@ -24,4 +23,3 @@ def get_llm_by_name(name):
return ChatOpenAI(openai_api_base="http://localhost:8000/v1", model="gpt-3.5-turbo")
else:
raise ValueError(f"Unsupported LLM engine: {name}")

20
models/mlm_handler.py Normal file
View File

@@ -0,0 +1,20 @@
# models/mlm_handler.py
from models.base_model import BaseModel
from transformers import AutoModelForMaskedLM, AutoTokenizer
class MLMHandler(BaseModel):
def __init__(self):
self.model_name = "bert-base-uncased"
self.tokenizer = AutoTokenizer.from_pretrained(self.model_name)
self.model = AutoModelForMaskedLM.from_pretrained(self.model_name)
def generate(self, prompt: str) -> str:
inputs = self.tokenizer(prompt, return_tensors="pt")
outputs = self.model(**inputs)
return "[MLM fill-mask output stub]"
def embed(self, text: str) -> list[float]:
return []
def analyze(self, input_data: dict) -> dict:
return {"analysis": "MLM stub"}

View File

@@ -1,7 +1,5 @@
# models/model_routes.py
# models/model_router.py
from models.model_config import MODEL_CONFIG
from models.llm_loader import get_llm_by_name
from models.slm_loader import get_slm_by_name

16
models/moe_handler.py Normal file
View File

@@ -0,0 +1,16 @@
# models/moe_handler.py
from models.base_model import BaseModel
class MoEHandler(BaseModel):
def __init__(self):
# stub: would load multiple experts and router
self.experts = []
def generate(self, prompt: str) -> str:
return "[MoE output stub]"
def embed(self, text: str) -> list[float]:
return []
def analyze(self, input_data: dict) -> dict:
return {"analysis": "MoE stub"}

30
models/registry.py Normal file
View File

@@ -0,0 +1,30 @@
# models/registry.py
from config import LLM_ENGINE, SLM_ENGINE, EMBEDDING_ENGINE
from models.llm_handler import LLMHandler
from models.slm_handler import SLMHandler
from models.embedding_handler import EmbeddingHandler
from models.vlm_handler import VLMHandler
from models.moe_handler import MoEHandler
from models.lcm_handler import LCMHandler
from models.lam_handler import LAMHandler
from models.mlm_handler import MLMHandler
def get_model(model_type: str):
if model_type == "llm":
return LLMHandler(engine=LLM_ENGINE)
elif model_type == "slm":
return SLMHandler(engine=SLM_ENGINE)
elif model_type == "embedding":
return EmbeddingHandler(engine=EMBEDDING_ENGINE)
elif model_type == "vlm":
return VLMHandler()
elif model_type == "moe":
return MoEHandler()
elif model_type == "lcm":
return LCMHandler()
elif model_type == "lam":
return LAMHandler()
elif model_type == "mlm":
return MLMHandler()
else:
raise ValueError(f"Unsupported model type: {model_type}")

16
models/slm_handler.py Normal file
View File

@@ -0,0 +1,16 @@
# models/slm_handler.py
from models.base_model import BaseModel
from models.slm_loader import get_slm_by_name
class SLMHandler(BaseModel):
def __init__(self, engine: str):
self.model = get_slm_by_name(engine)
def generate(self, prompt: str) -> str:
return self.model(prompt)
def embed(self, text: str) -> list[float]:
return []
def analyze(self, input_data: dict) -> dict:
return {"analysis": "SLM not specialized"}

View File

@@ -1,19 +1,5 @@
# models/slm_loader.py
# from config import SLM_ENGINE
# from transformers import pipeline
# def get_slm():
# if SLM_ENGINE == "phi-3":
# return pipeline("text-generation", model="microsoft/phi-2")
# elif SLM_ENGINE == "gemma":
# return pipeline("text-generation", model="google/gemma-2b")
# else:
# raise ValueError(f"Unsupported SLM engine: {SLM_ENGINE}")
# models/slm_loader.py
from config import SLM_ENGINE
from transformers import AutoModelForCausalLM, AutoTokenizer
import torch
@@ -41,5 +27,3 @@ def get_slm_by_name(name):
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
return lambda prompt: tokenizer.decode(model.generate(tokenizer(prompt, return_tensors="pt").input_ids)[0])

20
models/vlm_handler.py Normal file
View File

@@ -0,0 +1,20 @@
# models/vlm_handler.py
from models.base_model import BaseModel
from transformers import CLIPProcessor, CLIPModel
import torch
class VLMHandler(BaseModel):
def __init__(self):
self.model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
def generate(self, prompt: str) -> str:
return "[VLM cannot generate text directly]"
def embed(self, text: str) -> list[float]:
inputs = self.processor(text=[text], images=None, return_tensors="pt", padding=True)
outputs = self.model.get_text_features(**inputs)
return outputs.detach().numpy().tolist()[0]
def analyze(self, input_data: dict) -> dict:
return {"analysis": "VLM multimodal stub"}

View File

@@ -0,0 +1,11 @@
# monitoring/goal_heatmap.py
import os
from monitoring.goal_heatmap_memory import InMemoryGoalHeatmap
from monitoring.goal_heatmap_sql import SQLGoalHeatmap
def get_goal_heatmap(db_session=None):
backend = os.getenv("GOAL_HEATMAP_BACKEND", "memory")
if backend == "sql" and db_session:
return SQLGoalHeatmap(db_session)
return InMemoryGoalHeatmap()

View File

@@ -0,0 +1,34 @@
# monitoring/goal_heatmap_base.py
from abc import ABC, abstractmethod
class GoalHeatmapBase(ABC):
@abstractmethod
def log(self, tenant_id: str, agent_role: str, goal_id: str, success: bool): ...
@abstractmethod
def get_heatmap(self, tenant_id=None, agent_role=None, start_at=None, end_at=None): ...
@abstractmethod
def get_weekly_trend(self, tenant_id=None, agent_role=None): ...
@abstractmethod
def get_weekly_stacked(self, tenant_id=None, start_at=None, end_at=None, cluster=False): ...
@abstractmethod
def get_segmented_trend(self, tenant_id=None, start_at=None, end_at=None, by="reward"): ...
@abstractmethod
def export_csv(self, path: str, tenant_id=None, agent_role=None, start_at=None, end_at=None): ...
@abstractmethod
def get_segmented_agents(self, segment: str, tenant_id=None, start_at=None, end_at=None): ...
@abstractmethod
def export_segmented_csv(self, tenant_id=None, start_at=None, end_at=None): ...
@abstractmethod
def auto_cluster(self, tenant_id=None, start_at=None, end_at=None, k=3): ...
@abstractmethod
def update_cluster_config(self, config: dict): ...

View File

@@ -0,0 +1,253 @@
# monitoring/goal_heatmap_memory.py
import json, os, csv
from datetime import datetime
from collections import defaultdict
from monitoring.goal_heatmap_base import GoalHeatmapBase
from sklearn.cluster import KMeans
import numpy as np
DATA_PATH = "data/goal_heatmap.json"
CLUSTER_PATH = "config/cluster_config.json"
class InMemoryGoalHeatmap(GoalHeatmapBase):
def __init__(self):
self.records = [] # [{timestamp, tenant_id, agent, goal_id, success}]
self._load()
def _load(self):
if os.path.exists(DATA_PATH):
with open(DATA_PATH, "r") as f:
raw = json.load(f)
self.records = [
{
"timestamp": datetime.fromisoformat(r["timestamp"]),
"tenant_id": r["tenant_id"],
"agent": r["agent"],
"goal_id": r["goal_id"],
"success": r["success"]
}
for r in raw
]
def _save(self):
with open(DATA_PATH, "w") as f:
json.dump([
{
"timestamp": r["timestamp"].isoformat(),
"tenant_id": r["tenant_id"],
"agent": r["agent"],
"goal_id": r["goal_id"],
"success": r["success"]
}
for r in self.records
], f, indent=2)
def _load_clusters(self):
if os.path.exists(CLUSTER_PATH):
with open(CLUSTER_PATH, "r") as f:
return json.load(f)
return {}
def log(self, tenant_id: str, agent_role: str, goal_id: str, success: bool):
entry = {
"timestamp": datetime.utcnow(),
"tenant_id": tenant_id,
"agent": agent_role,
"goal_id": goal_id,
"success": success
}
self.records.append(entry)
self._save()
def get_heatmap(self, tenant_id: str = None, agent_role: str = None, start_at: str = None, end_at: str = None):
start_dt = datetime.fromisoformat(start_at) if start_at else None
end_dt = datetime.fromisoformat(end_at) if end_at else None
heatmap = defaultdict(lambda: defaultdict(int)) # {day: {hour: count}}
for r in self.records:
if not r["success"]: continue
if tenant_id and r["tenant_id"] != tenant_id: continue
if agent_role and r["agent"] != agent_role: continue
if start_dt and r["timestamp"] < start_dt: continue
if end_dt and r["timestamp"] >= end_dt: continue
day = r["timestamp"].strftime("%a")
hour = r["timestamp"].hour
heatmap[day][hour] += 1
return [{"day": d, "hour": f"{h}:00", "count": c} for d, hrs in heatmap.items() for h, c in hrs.items()]
# def get_weekly_trend(self, tenant_id=None, agent_role=None):
# trend = defaultdict(int) # {week_label: count}
# for r in self.records:
# if not r["success"]: continue
# if tenant_id and r["tenant_id"] != tenant_id: continue
# if agent_role and r["agent"] != agent_role: continue
# week = r["timestamp"].strftime("%Y-W%U")
# trend[week] += 1
# return [{"week": k, "count": v} for k, v in sorted(trend.items())]
def get_weekly_trend(self, tenant_id=None, agent_role=None, start_at=None, end_at=None):
start_dt = datetime.fromisoformat(start_at) if start_at else None
end_dt = datetime.fromisoformat(end_at) if end_at else None
trend = defaultdict(int)
for r in self.records:
if not r["success"]: continue
if tenant_id and r["tenant_id"] != tenant_id: continue
if agent_role and r["agent"] != agent_role: continue
if start_dt and r["timestamp"] < start_dt: continue
if end_dt and r["timestamp"] >= end_dt: continue
week = r["timestamp"].strftime("%Y-W%U")
trend[week] += 1
return [{"week": k, "count": v} for k, v in sorted(trend.items())]
def get_weekly_stacked(self, tenant_id=None, start_at=None, end_at=None, cluster=False):
start_dt = datetime.fromisoformat(start_at) if start_at else None
end_dt = datetime.fromisoformat(end_at) if end_at else None
stacked = defaultdict(lambda: defaultdict(int)) # {week: {agent: count}}
for r in self.records:
if not r["success"]: continue
if tenant_id and r["tenant_id"] != tenant_id: continue
if start_dt and r["timestamp"] < start_dt: continue
if end_dt and r["timestamp"] >= end_dt: continue
week = r["timestamp"].strftime("%Y-W%U")
agent = r["agent"]
if cluster:
agent = "Cluster A" if agent.startswith("planner") else "Cluster B"
stacked[week][agent] += 1
result = []
for week, agents in stacked.items():
for agent, count in agents.items():
result.append({"week": week, "agent": agent, "count": count})
return result
def get_segmented_trend(self, tenant_id=None, start_at=None, end_at=None, by="reward"):
start_dt = datetime.fromisoformat(start_at) if start_at else None
end_dt = datetime.fromisoformat(end_at) if end_at else None
agent_scores = defaultdict(list) # {agent: [reward]}
for r in self.records:
if not r["success"]: continue
if tenant_id and r["tenant_id"] != tenant_id: continue
if start_dt and r["timestamp"] < start_dt: continue
if end_dt and r["timestamp"] >= end_dt: continue
agent_scores[r["agent"]].append(r.get("reward", 1.0)) # default reward
# Compute average reward per agent
agent_avg = {a: sum(v)/len(v) for a, v in agent_scores.items()}
sorted_agents = sorted(agent_avg.items(), key=lambda x: x[1], reverse=True)
total = len(sorted_agents)
# Assign percentile segments
segments = {}
for i, (agent, _) in enumerate(sorted_agents):
p = i / total
if p < 0.33:
segments[agent] = "Top"
elif p < 0.66:
segments[agent] = "Mid"
else:
segments[agent] = "Low"
# Aggregate weekly trend by segment
trend = defaultdict(lambda: defaultdict(int)) # {week: {segment: count}}
for r in self.records:
if not r["success"]: continue
if tenant_id and r["tenant_id"] != tenant_id: continue
if start_dt and r["timestamp"] < start_dt: continue
if end_dt and r["timestamp"] >= end_dt: continue
week = r["timestamp"].strftime("%Y-W%U")
segment = segments.get(r["agent"], "Unclassified")
trend[week][segment] += 1
result = []
for week, segs in trend.items():
for segment, count in segs.items():
result.append({"week": week, "segment": segment, "count": count})
return result
def export_csv(self, path: str, tenant_id=None, agent_role=None, start_at=None, end_at=None):
data = self.get_heatmap(tenant_id, agent_role, start_at, end_at)
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["day", "hour", "count"])
writer.writeheader()
writer.writerows(data)
return path
def export_trend_csv(self, path, tenant_id=None, agent_role=None, start_at=None, end_at=None):
trend = self.get_weekly_trend(tenant_id, agent_role, start_at, end_at)
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["week", "count"])
writer.writeheader()
writer.writerows(trend)
return path
def get_segmented_agents(self, segment, tenant_id=None, start_at=None, end_at=None):
clusters = self._load_clusters()
start_dt = datetime.fromisoformat(start_at) if start_at else None
end_dt = datetime.fromisoformat(end_at) if end_at else None
agent_counts = defaultdict(lambda: defaultdict(int)) # {agent: {week: count}}
for r in self.records:
if not r["success"]: continue
if tenant_id and r["tenant_id"] != tenant_id: continue
if start_dt and r["timestamp"] < start_dt: continue
if end_dt and r["timestamp"] >= end_dt: continue
role = r["agent"].split("-")[0]
cluster = clusters.get(role, clusters.get("default", "Unclassified"))
if cluster != segment: continue
week = r["timestamp"].strftime("%Y-W%U")
agent_counts[r["agent"]][week] += 1
result = []
for agent, weeks in agent_counts.items():
for week, count in weeks.items():
result.append({"agent": agent, "week": week, "count": count})
return result
def export_segmented_csv(self, tenant_id=None, start_at=None, end_at=None):
data = self.get_segmented_trend(tenant_id, start_at, end_at)
path = "data/goal_segmented_export.csv"
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["week", "segment", "count"])
writer.writeheader()
writer.writerows(data)
return path
def update_cluster_config(self, config: dict):
with open(CLUSTER_PATH, "w") as f:
json.dump(config, f, indent=2)
return True
def auto_cluster(self, tenant_id=None, start_at=None, end_at=None, k=3):
start_dt = datetime.fromisoformat(start_at) if start_at else None
end_dt = datetime.fromisoformat(end_at) if end_at else None
agent_rewards = defaultdict(list)
for r in self.records:
if not r["success"]: continue
if tenant_id and r["tenant_id"] != tenant_id: continue
if start_dt and r["timestamp"] < start_dt: continue
if end_dt and r["timestamp"] >= end_dt: continue
agent_rewards[r["agent"]].append(r.get("reward", 1.0))
agents = list(agent_rewards.keys())
vectors = [np.array(agent_rewards[a]) for a in agents]
padded = np.array([np.pad(v, (0, max(0, 10 - len(v))), constant_values=0)[:10] for v in vectors])
model = KMeans(n_clusters=k, random_state=42)
labels = model.fit_predict(padded)
result = {agents[i]: f"Cluster {labels[i]+1}" for i in range(len(agents))}
return result
# goal_heatmap = GoalHeatmap()

View File

@@ -0,0 +1,236 @@
# monitoring/goal_heatmap_sql.py
import os, csv, json
from datetime import datetime
from collections import defaultdict
from sqlalchemy.orm import Session
from monitoring.goal_heatmap_base import GoalHeatmapBase
from models.goal_completion import GoalCompletion
from sklearn.cluster import KMeans
import numpy as np
CLUSTER_PATH = "config/cluster_config.json"
class SQLGoalHeatmap(GoalHeatmapBase):
def __init__(self, db: Session):
self.db = db
def _load_clusters(self):
if os.path.exists(CLUSTER_PATH):
with open(CLUSTER_PATH, "r") as f:
return json.load(f)
return {}
def log(self, tenant_id, agent_role, goal_id, success):
record = GoalCompletion(
tenant_id=tenant_id,
agent=agent_role,
goal_id=goal_id,
success=success
)
self.db.add(record)
self.db.commit()
def get_heatmap(self, tenant_id=None, agent_role=None, start_at=None, end_at=None):
query = self.db.query(GoalCompletion).filter(GoalCompletion.success == True)
if tenant_id: query = query.filter(GoalCompletion.tenant_id == tenant_id)
if agent_role: query = query.filter(GoalCompletion.agent == agent_role)
if start_at: query = query.filter(GoalCompletion.timestamp >= datetime.fromisoformat(start_at))
if end_at: query = query.filter(GoalCompletion.timestamp < datetime.fromisoformat(end_at))
heatmap = defaultdict(lambda: defaultdict(int))
for r in query.all():
day = r.timestamp.strftime("%a")
hour = r.timestamp.hour
heatmap[day][hour] += 1
return [{"day": d, "hour": f"{h}:00", "count": c} for d, hrs in heatmap.items() for h, c in hrs.items()]
# def get_weekly_trend(self, tenant_id=None, agent_role=None):
# query = self.db.query(GoalCompletion).filter(GoalCompletion.success == True)
# if tenant_id: query = query.filter(GoalCompletion.tenant_id == tenant_id)
# if agent_role: query = query.filter(GoalCompletion.agent == agent_role)
# trend = defaultdict(int)
# for r in query.all():
# week = r.timestamp.strftime("%Y-W%U")
# trend[week] += 1
# return [{"week": k, "count": v} for k, v in sorted(trend.items())]
def get_weekly_trend(self, tenant_id=None, agent_role=None, start_at=None, end_at=None):
query = self.db.query(GoalCompletion).filter(GoalCompletion.success == True)
if tenant_id:
query = query.filter(GoalCompletion.tenant_id == tenant_id)
if agent_role:
query = query.filter(GoalCompletion.agent == agent_role)
if start_at:
query = query.filter(GoalCompletion.timestamp >= datetime.fromisoformat(start_at))
if end_at:
query = query.filter(GoalCompletion.timestamp < datetime.fromisoformat(end_at))
trend = defaultdict(int)
for r in query.all():
week = r.timestamp.strftime("%Y-W%U")
trend[week] += 1
return [{"week": k, "count": v} for k, v in sorted(trend.items())]
def get_weekly_stacked(self, tenant_id=None, start_at=None, end_at=None, cluster=False):
query = self.db.query(GoalCompletion).filter(GoalCompletion.success == True)
if tenant_id:
query = query.filter(GoalCompletion.tenant_id == tenant_id)
if start_at:
query = query.filter(GoalCompletion.timestamp >= datetime.fromisoformat(start_at))
if end_at:
query = query.filter(GoalCompletion.timestamp < datetime.fromisoformat(end_at))
stacked = defaultdict(lambda: defaultdict(int)) # {week: {agent: count}}
for r in query.all():
week = r.timestamp.strftime("%Y-W%U")
agent = r.agent
if cluster:
agent = "Cluster A" if agent.startswith("planner") else "Cluster B"
stacked[week][agent] += 1
result = []
for week, agents in stacked.items():
for agent, count in agents.items():
result.append({
"week": week,
"agent": agent,
"count": count
})
return result
def get_segmented_trend(self, tenant_id=None, start_at=None, end_at=None, by="reward"):
query = self.db.query(GoalCompletion).filter(GoalCompletion.success == True)
if tenant_id:
query = query.filter(GoalCompletion.tenant_id == tenant_id)
if start_at:
query = query.filter(GoalCompletion.timestamp >= datetime.fromisoformat(start_at))
if end_at:
query = query.filter(GoalCompletion.timestamp < datetime.fromisoformat(end_at))
records = query.all()
# Step 1: Aggregate reward or count per agent
agent_scores = defaultdict(list)
for r in records:
if by == "reward":
agent_scores[r.agent].append(getattr(r, "reward", 1.0)) # fallback if reward missing
else:
agent_scores[r.agent].append(1)
# Step 2: Compute average score per agent
agent_avg = {a: sum(v)/len(v) for a, v in agent_scores.items()}
sorted_agents = sorted(agent_avg.items(), key=lambda x: x[1], reverse=True)
total = len(sorted_agents)
# Step 3: Assign percentile segments
segments = {}
for i, (agent, _) in enumerate(sorted_agents):
p = i / total
if p < 0.33:
segments[agent] = "Top"
elif p < 0.66:
segments[agent] = "Mid"
else:
segments[agent] = "Low"
# Step 4: Aggregate weekly completions by segment
trend = defaultdict(lambda: defaultdict(int)) # {week: {segment: count}}
for r in records:
week = r.timestamp.strftime("%Y-W%U")
segment = segments.get(r.agent, "Unclassified")
trend[week][segment] += 1
result = []
for week, segs in trend.items():
for segment, count in segs.items():
result.append({
"week": week,
"segment": segment,
"count": count
})
return result
def get_segmented_agents(self, segment, tenant_id=None, start_at=None, end_at=None):
clusters = self._load_clusters()
query = self.db.query(GoalCompletion).filter(GoalCompletion.success == True)
if tenant_id:
query = query.filter(GoalCompletion.tenant_id == tenant_id)
if start_at:
query = query.filter(GoalCompletion.timestamp >= datetime.fromisoformat(start_at))
if end_at:
query = query.filter(GoalCompletion.timestamp < datetime.fromisoformat(end_at))
agent_counts = defaultdict(lambda: defaultdict(int))
for r in query.all():
role = r.agent.split("-")[0]
cluster = clusters.get(role, clusters.get("default", "Unclassified"))
if cluster != segment:
continue
week = r.timestamp.strftime("%Y-W%U")
agent_counts[r.agent][week] += 1
result = []
for agent, weeks in agent_counts.items():
for week, count in weeks.items():
result.append({"agent": agent, "week": week, "count": count})
return result
def export_segmented_csv(self, tenant_id=None, start_at=None, end_at=None):
data = self.get_segmented_trend(tenant_id, start_at, end_at)
path = "data/goal_segmented_export.csv"
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["week", "segment", "count"])
writer.writeheader()
writer.writerows(data)
return path
def export_csv(self, path, tenant_id=None, agent_role=None, start_at=None, end_at=None):
data = self.get_heatmap(tenant_id, agent_role, start_at, end_at)
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["day", "hour", "count"])
writer.writeheader()
writer.writerows(data)
return path
def export_trend_csv(self, path, tenant_id=None, agent_role=None, start_at=None, end_at=None):
trend = self.get_weekly_trend(tenant_id, agent_role, start_at, end_at)
with open(path, "w", newline="") as f:
writer = csv.DictWriter(f, fieldnames=["week", "count"])
writer.writeheader()
writer.writerows(trend)
return path
def auto_cluster(self, tenant_id=None, start_at=None, end_at=None, k=3):
query = self.db.query(GoalCompletion).filter(GoalCompletion.success == True)
if tenant_id:
query = query.filter(GoalCompletion.tenant_id == tenant_id)
if start_at:
query = query.filter(GoalCompletion.timestamp >= datetime.fromisoformat(start_at))
if end_at:
query = query.filter(GoalCompletion.timestamp < datetime.fromisoformat(end_at))
agent_rewards = defaultdict(list)
for r in query.all():
agent_rewards[r.agent].append(getattr(r, "reward", 1.0))
agents = list(agent_rewards.keys())
vectors = [np.array(agent_rewards[a]) for a in agents]
padded = np.array([np.pad(v, (0, max(0, 10 - len(v))), constant_values=0)[:10] for v in vectors])
model = KMeans(n_clusters=k, random_state=42)
labels = model.fit_predict(padded)
result = {agents[i]: f"Cluster {labels[i]+1}" for i in range(len(agents))}
return result
def update_cluster_config(self, config: dict):
with open(CLUSTER_PATH, "w") as f:
json.dump(config, f, indent=2)
return True

View File

@@ -1,6 +1,7 @@
# monitoring/metrics_store.py
import time
from collections import defaultdict
class MetricsStore:
def __init__(self):
@@ -33,5 +34,16 @@ class MetricsStore:
summary[key]["avg_latency"] = summary[key]["latency"] / summary[key]["count"]
summary[key]["success_rate"] = summary[key]["success"] / summary[key]["count"]
return summary
def get_agent_metrics(self, agent_role: str):
filtered = [r for r in self.records if r["agent"] == agent_role]
latency_values = [r["latency"] for r in filtered]
success_count = sum(1 for r in filtered if r["success"])
return {
"total_actions": len(filtered),
"avg_latency": sum(latency_values) / len(latency_values) if latency_values else 0,
"success_rate": success_count / len(filtered) if filtered else 0,
"latency_distribution": latency_values
}
metrics_store = MetricsStore()

View File

@@ -6,14 +6,7 @@ class PluginRegistry:
self.plugins = {} # {plugin_name: {metadata}}
self.installed = {} # {(tenant_id, agent_role): [plugin_names]}
# def register_plugin(self, name: str, description: str, version: str, config_schema: dict):
# self.plugins[name] = {
# "name": name,
# "description": description,
# "version": version,
# "config_schema": config_schema
# }
##INFO: capability-based maching logic
##INFO: capability-based matching logic
def register_plugin(self, name: str, description: str, version: str, config_schema: dict, capabilities: list[str]):
self.plugins[name] = {
"name": name,

View File

@@ -7,7 +7,6 @@ from agents.shared_goal_registry import shared_goal_registry
from collab.graph_registry import graph_registry
from collab.graph_query import graph_query
router = APIRouter()
# 🔁 Multi-agent chain execution
@@ -38,7 +37,6 @@ def run_chain(tenant_id: str, chain_id: str, roles: list, task: str, context: di
def get_session(chain_id: str):
return {"session": collaboration_engine.get_session(chain_id)}
##NOTE: Extend with graph-registry and graph-query endpoints
# 🧬 Add agent to graph
@router.post("/admin/agents/graph/add")
@@ -86,5 +84,3 @@ def get_team_trace(goal_id: str):
@router.get("/team/graph")
def get_team_graph(goal_id: str):
return graph_registry.get_team_graph(goal_id)

View File

@@ -3,8 +3,12 @@
from fastapi import APIRouter
from pydantic import BaseModel
import config
import json
import os
from agents.reflection import reflection_memory # for update_cluster_config
router = APIRouter()
CLUSTER_PATH = "config/cluster_config.json"
class EngineUpdate(BaseModel):
llm: str | None = None
@@ -34,3 +38,31 @@ def update_engines(update: EngineUpdate):
"embedding_engine": config.EMBEDDING_ENGINE
}
##INFO: to support 'goal_heatmap' configuration
@router.get("/admin/config/goal-heatmap-backend")
def get_goal_heatmap_backend():
return {"backend": os.getenv("GOAL_HEATMAP_BACKEND", "memory")}
@router.post("/admin/config/goal-heatmap-backend")
def set_goal_heatmap_backend(mode: str):
if mode not in ["memory", "sql"]:
return {"error": "Invalid mode"}
os.environ["GOAL_HEATMAP_BACKEND"] = mode
return {"status": "updated", "backend": mode}
@router.get("/admin/config/clusters")
def get_cluster_config():
with open(CLUSTER_PATH, "r") as f:
return json.load(f)
@router.get("/admin/config/clusters")
def get_cluster_config():
if os.path.exists(CLUSTER_PATH):
with open(CLUSTER_PATH, "r") as f:
return json.load(f)
return {}
@router.post("/admin/config/clusters")
def update_cluster_config(config: dict, heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)):
heatmap.update_cluster_config(config)
return {"status": "updated"}

View File

@@ -2,6 +2,10 @@
from fastapi import APIRouter
from agents.goal_store import goal_store
##INFO: for 'goal_heatmap'
# from monitoring.goal_heatmap import goal_heatmap # ⬅️ Add this
from dependencies.goal_heatmap_provider import provide_goal_heatmap
from monitoring.goal_heatmap_base import GoalHeatmapBase
router = APIRouter()
@@ -17,3 +21,156 @@ def get_goal_session(tenant_id: str, session_id: str):
def resume_goal_session(tenant_id: str, session_id: str):
return goal_store.resume_session(tenant_id, session_id)
@router.get("/admin/goals/completion-heatmap")
# def get_completion_heatmap(
# tenant_id: str = None,
# agent_role: str = None,
# start_at: str = None,
# end_at: str = None
# ):
# return {
# "heatmap": goal_heatmap.get_heatmap(
# tenant_id=tenant_id,
# agent_role=agent_role,
# start_at=start_at,
# end_at=end_at
# )
# }
def get_completion_heatmap(
tenant_id: str = None,
agent_role: str = None,
start_at: str = None,
end_at: str = None,
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
return {"heatmap": heatmap.get_heatmap(tenant_id, agent_role, start_at, end_at)}
# # @router.get("/admin/goals/completion-trend")
# # def get_weekly_trend(tenant_id: str = None, agent_role: str = None):
# # return {"trend": goal_heatmap.get_weekly_trend(tenant_id, agent_role)}
# @router.get("/admin/goals/completion-trend")
# def get_weekly_trend(
# tenant_id: str = None,
# agent_role: str = None,
# heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
# ):
# return {"trend": heatmap.get_weekly_trend(tenant_id, agent_role)}
@router.get("/admin/goals/completion-trend")
def get_weekly_trend(
tenant_id: str = None,
agent_role: str = None,
start_at: str = None,
end_at: str = None,
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
return {
"trend": heatmap.get_weekly_trend(
tenant_id=tenant_id,
agent_role=agent_role,
start_at=start_at,
end_at=end_at
)
}
@router.get("/admin/goals/completion-stacked")
def get_weekly_stacked(
tenant_id: str = None,
start_at: str = None,
end_at: str = None,
cluster: bool = False,
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
return {
"stacked": heatmap.get_weekly_stacked(
tenant_id=tenant_id,
start_at=start_at,
end_at=end_at,
cluster=cluster
)
}
@router.get("/admin/goals/completion-segmented")
def get_segmented_trend(
tenant_id: str = None,
start_at: str = None,
end_at: str = None,
by: str = "reward",
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
return {
"segmented": heatmap.get_segmented_trend(
tenant_id=tenant_id,
start_at=start_at,
end_at=end_at,
by=by
)
}
# @router.get("/admin/goals/completion-heatmap/export")
# def export_heatmap_csv(
# tenant_id: str = None,
# agent_role: str = None,
# start_at: str = None,
# end_at: str = None
# ):
# path = goal_heatmap.export_csv(
# "data/goal_heatmap_export.csv",
# tenant_id=tenant_id,
# agent_role=agent_role,
# start_at=start_at,
# end_at=end_at
# )
# return {"export_path": path}
@router.get("/admin/goals/completion-heatmap/export")
def export_heatmap_csv(
tenant_id: str = None,
agent_role: str = None,
start_at: str = None,
end_at: str = None,
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
path = heatmap.export_csv("data/goal_heatmap_export.csv", tenant_id, agent_role, start_at, end_at)
return {"export_path": path}
@router.get("/admin/goals/completion-trend/export")
def export_trend_csv(
tenant_id: str = None,
agent_role: str = None,
start_at: str = None,
end_at: str = None,
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
path = heatmap.export_trend_csv("data/goal_trend_export.csv", tenant_id, agent_role, start_at, end_at)
return {"export_path": path}
@router.get("/admin/goals/segment-agents")
def get_segmented_agents(
segment: str,
tenant_id: str = None,
start_at: str = None,
end_at: str = None,
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
return {
"agents": heatmap.get_segmented_agents(segment, tenant_id, start_at, end_at)
}
@router.get("/admin/goals/segment-export")
def export_segmented_csv(
tenant_id: str = None,
start_at: str = None,
end_at: str = None,
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
path = heatmap.export_segmented_csv(tenant_id, start_at, end_at)
return {"export_path": path}
@router.post("/admin/goals/auto-cluster")
def auto_cluster(
tenant_id: str = None,
start_at: str = None,
end_at: str = None,
k: int = 3,
heatmap: GoalHeatmapBase = Depends(provide_goal_heatmap)
):
return {"clusters": heatmap.auto_cluster(tenant_id, start_at, end_at, k)}

View File

@@ -36,6 +36,10 @@ def set_policy(tenant_id: str, allowed_roles: list, restricted_tasks: list, audi
def get_all_policies():
return {"policies": policy_registry.get_all()}
@router.get("/admin/governance/global-policy/{name}")
def get_global_policy(name: str):
return {"policy": policy_registry.get_global_policy(name)}
@router.post("/admin/governance/check")
def check_compliance(tenant_id: str, agent_role: str, task: str):
policy = policy_registry.get_policy(tenant_id)

View File

@@ -16,6 +16,9 @@ from routes.emotion_routes import analyze_emotion
from config import PERSONA_PRESETS
from config.prompt_templates import build_persona_prompt
# ✅ NEW: import repository
from db.memory_repository import save_memory, get_memories, search_memories
router = APIRouter()
memory = MemoryManager()
@@ -58,7 +61,6 @@ class PersonaChainRequest(BaseModel):
user_id: str
chain: list[str] # e.g. ["mentor", "poet"]
def get_user_from_token(token: str = Depends(oauth2_scheme)):
try:
payload = jwt.decode(token, "your-secret-key", algorithms=["HS256"])
@@ -77,6 +79,43 @@ def match_mood_to_persona(emotion: str) -> dict:
preset_name = mood_map.get(emotion.lower())
return PERSONA_PRESETS.get(preset_name, None)
# -----------------------------
# Memory Persistence Routes
# -----------------------------
@router.post("/memory/save")
def save_memory_route(
transcript: str = Body(...),
response: dict = Body(...),
emotion: dict = Body(...),
user_id: str = Query("default")
):
"""Save memory to DB and vector store"""
mem = save_memory(transcript=transcript, response=response, emotion=emotion)
return {"status": "saved", "id": mem.id, "timestamp": mem.timestamp}
@router.get("/memory/list")
def list_memories(limit: int = Query(50)):
"""List recent memories"""
memories = get_memories(limit=limit)
return {"memories": [dict(
id=m.id,
transcript=m.transcript,
response=m.response,
emotion=m.emotion,
timestamp=m.timestamp
) for m in memories]}
@router.get("/memory/search")
def search_memories_route(query: str = Query(...), k: int = Query(5)):
"""Semantic search memories via vector store"""
results = search_memories(query, k=k)
return {"matches": [dict(
id=m.id,
transcript=m.transcript,
response=m.response,
emotion=m.emotion,
timestamp=m.timestamp
) for m in results]}
@router.get("/memory/history")
def get_memory_history(user_id: str = Depends(get_user_from_token)):
@@ -117,9 +156,6 @@ def search_memory(user_id: str = Query("default"), query: str = Query(...)):
return {"matches": matches}
# History route
# @router.get("/history")
# def get_history(limit: int = 5):
# return memory.get_recent_history(limit=limit)
@router.get("/history")
def replay_history(user_id: str = Query("default")):
return {"history": memory.get_recent_history(limit=100)}

View File

@@ -1,5 +1,4 @@
# routes/model_router_routes.py
##INFO:
from fastapi import APIRouter
from models.model_router import get_routed_llm, get_routed_slm, get_routed_embedding
@@ -20,4 +19,3 @@ def route_slm(prompt: str):
def route_embedding(text: str):
model = get_routed_embedding(text)
return {"engine": str(model)}

View File

@@ -15,6 +15,10 @@ def register_plugin(name: str, description: str, version: str, config_schema: di
def list_plugins():
return {"plugins": plugin_registry.list_plugins()}
@router.get("/admin/plugins/match")
def match_plugins(agent_traits: list[str]):
return {"matches": plugin_registry.match_plugins(agent_traits)}
@router.post("/admin/plugins/install")
def install_plugin(tenant_id: str, agent_role: str, plugin_name: str):
return plugin_registry.install_plugin(tenant_id, agent_role, plugin_name)

View File

@@ -4,6 +4,8 @@
from fastapi import APIRouter
from security.rbac_registry import rbac_registry
from security.action_validator import action_validator
from tenants.rbac_guard import check_access
from governance.policy_registry import policy_registry
router = APIRouter()
@@ -23,3 +25,11 @@ def validate_action(agent_role: str, action: str, tenant_id: str):
def get_audit_log(tenant_id: str):
return {"audit": action_validator.get_audit(tenant_id)}
@router.post("/admin/security/access-check")
def check_rbac_access(tenant_id: str, role: str, action: str, resource: str):
allowed = check_access(tenant_id, role, action)
return {"access_granted": allowed}
@router.get("/admin/security/global-policies")
def list_global_policies():
return {"global_policies": policy_registry.list_global_policies()}

View File

@@ -2,6 +2,7 @@
from fastapi import APIRouter
from agents.self_evaluator import self_evaluator
from agents.reflection import reflection_memory # or sql version
router = APIRouter()
@@ -13,3 +14,13 @@ def run_evaluation(tenant_id: str, agent_role: str, task: str, output: dict):
def get_all_evaluations(tenant_id: str):
return {"evaluations": self_evaluator.get_all(tenant_id)}
@router.get("/admin/evaluation/agent")
def get_agent_evaluation(tenant_id: str, agent_role: str):
all_evals = self_evaluator.get_all(tenant_id)
filtered = [e for e in all_evals if e["agent"] == agent_role]
return {"evaluations": filtered}
@router.post("/admin/evaluation/auto-cluster")
def auto_cluster(tenant_id: str = None, start_at: str = None, end_at: str = None, k: int = 3):
clusters = reflection_memory.auto_cluster(tenant_id, start_at, end_at, k)
return {"clusters": clusters}

View File

@@ -29,3 +29,6 @@ def get_all_violations():
def get_agent_violations(agent_role: str):
return {"violations": sla_monitor.get_by_agent(agent_role)}
@router.get("/admin/sla/summary")
def get_agent_summary(agent_role: str):
return {"summary": sla_monitor.get_summary(agent_role)}

22
routes/sync_routes.py Normal file
View File

@@ -0,0 +1,22 @@
# routes/sync_routes.py
from fastapi import APIRouter
from agents.agent_registry import get_agent_state
from memory.episodic_store import episodic_store
from agents.collaboration_engine import collaboration_engine
router = APIRouter()
@router.get("/sync/agent/state")
def get_agent_state_view(tenant_id: str, agent_role: str):
return get_agent_state(tenant_id, agent_role)
@router.get("/sync/agent/memory")
def get_agent_memory(tenant_id: str, agent_role: str):
return {"episodes": episodic_store.get_all(tenant_id, agent_role)}
@router.get("/sync/agent/status")
def get_agent_status(tenant_id: str, agent_role: str):
trace = collaboration_engine.get_trace()
recent = [t for t in trace if t["to"] == agent_role or t.get("agent") == agent_role]
return {"status": "active" if recent else "idle", "recent": recent[-1] if recent else None}

View File

@@ -2,6 +2,10 @@
from fastapi import APIRouter
from voice.voice_listener import VoiceListener
from agents.agent_core import run_agent
from emotion.emotion_detector import detect_emotion
import whisper
import os
router = APIRouter()
@@ -11,3 +15,28 @@ def start_voice_listener():
listener.start()
return {"status": "voice listener started"}
@router.post("/voice/upload")
async def upload_voice(file: UploadFile = File(...)):
temp_dir = "temp"
os.makedirs(temp_dir, exist_ok=True)
audio_path = os.path.join(temp_dir, file.filename)
with open(audio_path, "wb") as f:
f.write(await file.read())
# Transcribe using Whisper
model = whisper.load_model("base")
result = model.transcribe(audio_path, language="ko")
transcript = result["text"]
# Emotion detection
emotion = detect_emotion(transcript)
# Agent response
response = run_agent(transcript)
return {
"transcript": transcript,
"emotion": emotion,
"response": response
}

View File

@@ -4,6 +4,7 @@
class RBACRegistry:
def __init__(self):
self.roles = {} # {agent_role: [allowed_actions]}
self._initialize_defaults()
def define_role(self, agent_role: str, allowed_actions: list[str]):
self.roles[agent_role] = allowed_actions
@@ -14,5 +15,18 @@ class RBACRegistry:
def get_all_roles(self):
return self.roles
def _initialize_defaults(self):
self.roles = {
"admin": [
"modify_policy", "view_sla", "invoke_agent", "edit_memory", "access_plugin"
],
"default_user": [
"invoke_agent", "view_memory", "access_plugin"
],
"guest": [
"view_memory"
]
}
rbac_registry = RBACRegistry()

View File

@@ -15,10 +15,20 @@ def enforce_rbac(action: str):
if not tenant_id or not role:
return {"error": "Missing tenant_id or role for RBAC check"}
if not tenant_policy_store.check_permission(tenant_id, role, action):
if not tenant_policy_store.check_permission(tenant_id, role, action) is True:
return {"error": f"Role '{role}' is not permitted to perform '{action}' for tenant '{tenant_id}'"}
return func(*args, **kwargs)
return wrapper
return decorator
def check_access(tenant_id: str, role: str, action: str) -> bool:
"""
Manual RBAC check for non-decorator contexts.
"""
if not tenant_id or not role:
return False
return tenant_policy_store.check_permission(tenant_id, role, action) is True

View File

@@ -27,15 +27,6 @@ class ToolRegistry:
for name in self.tools
]
# def invoke(self, name: str, **kwargs):
# tool = self.get_tool(name)
# if not tool:
# return {"error": f"Tool '{name}' not found"}
# try:
# return tool(**kwargs)
# except Exception as e:
# return {"error": str(e)}
##NOTE: update to log execution and enforce RBAC
@enforce_rbac("invoke_tool")
def invoke(self, name: str, tenant_id: str = "default", agent_role: str = "unknown", **kwargs):
@@ -50,16 +41,6 @@ class ToolRegistry:
trace = execution_trace.log(tenant_id, agent_role, name, kwargs, result)
return {"status": "executed", "result": result, "trace": trace}
# def chain(self, steps: list):
# results = []
# for step in steps:
# name = step.get("name")
# args = step.get("args", {})
# result = self.invoke(name, **args)
# results.append({"tool": name, "args": args, "result": result})
# return results
##NOTE: Update to pass tenant/role
def chain(self, steps: list, tenant_id: str = "default", agent_role: str = "unknown"):
@@ -71,7 +52,6 @@ class ToolRegistry:
results.append({"tool": name, "args": args, "result": result})
return results
##INFO: Discovery metadata
def discover(self, role: str = None):
# Optional filtering by agent role

View File

@@ -7,9 +7,6 @@ import whisper
import threading
import numpy as np
# from agents.agent_core import run_agent
# from agents.background_learner import BackgroundLearner
# from voice.command_parser import parse_voice_command
from voice.command_parser import parse_voice_command
from voice.voice_utils import normalize_audio
from voice.wake_word_config import WAKE_WORD_CONFIG
@@ -17,7 +14,6 @@ from agents.agent_core import run_agent
from agents.background_learner import BackgroundLearner
from utils.logger import logger
class VoiceListener:
def __init__(self, keyword="hey-tony", model_size="base"):
self.keyword = keyword
@@ -90,21 +86,9 @@ class VoiceListener:
return result["text"]
def _handle_command(self, transcription):
# if any(kw in transcription.lower() for kw in ["learn", "요약", "배워"]):
# parsed = parse_voice_command(transcription)
# print(f"🧠 Parsed command: {parsed}")
# self.learner.learn_from_prompt(
# prompt=parsed["query"],
# engine=parsed["engine"],
# num_results=parsed["result_count"],
# language="ko"
# )
# else:
# response = run_agent(transcription)
# print(f"🤖 Agent response: {response}")
if any(kw in transcription.lower() for kw in ["learn", "요약", "배워"]):
parsed = parse_voice_command(transcription)
# print(f"🧠 Parsed command: {parsed}")
logger.info(f"Parsed voice command: {parsed}")
self.learner.learn_from_prompt(
prompt=parsed["query"],
@@ -114,12 +98,10 @@ class VoiceListener:
)
else:
response = run_agent(transcription)
# print(f"🤖 Agent response: {response}")
logger.info(f"Agent response: {response}")
def stop(self):
self.running = False
self.porcupine.delete()
self.audio.terminate()

71
web/package-lock.json generated
View File

@@ -12,15 +12,16 @@
"@rjsf/core": "^5.24.13",
"ajv": "^8.17.1",
"axios": "^1.6.0",
"chart.js": "^4.5.0",
"chart.js": "^4.5.1",
"html2pdf.js": "^0.12.0",
"react": "^18.2.0",
"react-beautiful-dnd": "^13.1.1",
"react-chartjs-2": "^5.3.0",
"react-chartjs-2": "^5.3.1",
"react-dom": "^18.2.0",
"react-flow-renderer": "^10.3.17",
"react-router-dom": "^7.8.2",
"reactflow": "^11.11.4"
"reactflow": "^11.11.4",
"wavesurfer.js": "^7.11.1"
},
"devDependencies": {
"@vitejs/plugin-react": "^4.0.0",
@@ -70,6 +71,7 @@
"resolved": "https://registry.npmjs.org/@babel/core/-/core-7.28.4.tgz",
"integrity": "sha512-2BCOP7TN8M+gVDj7/ht3hsaO/B/n5oDbiAyyvnRlNOs+u1o+JWNYTQrmpuNp1/Wq2gcFrI01JAW+paEKDMx/CA==",
"dev": true,
"peer": true,
"dependencies": {
"@babel/code-frame": "^7.27.1",
"@babel/generator": "^7.28.3",
@@ -2058,6 +2060,7 @@
"version": "19.1.12",
"resolved": "https://registry.npmjs.org/@types/react/-/react-19.1.12.tgz",
"integrity": "sha512-cMoR+FoAf/Jyq6+Df2/Z41jISvGZZ2eTlnsaJRptmZ76Caldwy1odD4xTr/gNV9VLj0AWgg/nmkevIyUfIIq5w==",
"peer": true,
"dependencies": {
"csstype": "^3.0.2"
}
@@ -2211,9 +2214,10 @@
}
},
"node_modules/axios": {
"version": "1.11.0",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.11.0.tgz",
"integrity": "sha512-1Lx3WLFQWm3ooKDYZD1eXmoGO9fxYQjrycfHFC8P0sCfQVXyROp0p9PFWBehewBOdCwHc+f/b8I0fMto5eSfwA==",
"version": "1.13.2",
"resolved": "https://registry.npmjs.org/axios/-/axios-1.13.2.tgz",
"integrity": "sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==",
"license": "MIT",
"dependencies": {
"follow-redirects": "^1.15.6",
"form-data": "^4.0.4",
@@ -2286,6 +2290,7 @@
"url": "https://github.com/sponsors/ai"
}
],
"peer": true,
"dependencies": {
"caniuse-lite": "^1.0.30001737",
"electron-to-chromium": "^1.5.211",
@@ -2360,9 +2365,11 @@
}
},
"node_modules/chart.js": {
"version": "4.5.0",
"resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.0.tgz",
"integrity": "sha512-aYeC/jDgSEx8SHWZvANYMioYMZ2KX02W6f6uVfyteuCGcadDLcYVHdfdygsTQkQ4TKn5lghoojAsPj5pu0SnvQ==",
"version": "4.5.1",
"resolved": "https://registry.npmjs.org/chart.js/-/chart.js-4.5.1.tgz",
"integrity": "sha512-GIjfiT9dbmHRiYi6Nl2yFCq7kkwdkp1W/lp2J99rX0yo9tgJGn3lKQATztIjb5tVtevcBtIdICNWqlq5+E8/Pw==",
"license": "MIT",
"peer": true,
"dependencies": {
"@kurkle/color": "^0.3.0"
},
@@ -2453,7 +2460,6 @@
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/compute-gcd/-/compute-gcd-1.2.1.tgz",
"integrity": "sha512-TwMbxBNz0l71+8Sc4czv13h4kEqnchV9igQZBi6QUaz09dnz13juGnnaWWJTRsP3brxOoxeB4SA2WELLw1hCtg==",
"peer": true,
"dependencies": {
"validate.io-array": "^1.0.3",
"validate.io-function": "^1.0.2",
@@ -2464,7 +2470,6 @@
"version": "1.1.2",
"resolved": "https://registry.npmjs.org/compute-lcm/-/compute-lcm-1.1.2.tgz",
"integrity": "sha512-OFNPdQAXnQhDSKioX8/XYT6sdUlXwpeMjfd6ApxMJfyZ4GxmLR1xvMERctlYhlHwIiz6CSpBc2+qYKjHGZw4TQ==",
"peer": true,
"dependencies": {
"compute-gcd": "^1.2.1",
"validate.io-array": "^1.0.3",
@@ -2657,6 +2662,7 @@
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/d3-selection/-/d3-selection-3.0.0.tgz",
"integrity": "sha512-fmTRWbNMmsmWq6xJV8D19U/gw/bwrHfNXxrIN+HfZgnzqTHp9jOmKMhsTUjXOJnZOdZY9Q28y4yebKzqDKlxlQ==",
"peer": true,
"engines": {
"node": ">=12"
}
@@ -3350,7 +3356,6 @@
"version": "0.2.2",
"resolved": "https://registry.npmjs.org/json-schema-compare/-/json-schema-compare-0.2.2.tgz",
"integrity": "sha512-c4WYmDKyJXhs7WWvAWm3uIYnfyWFoIp+JEoX34rctVvEkMYCPGhXtvmFFXiffBbxfZsvQ0RNnV5H7GvDF5HCqQ==",
"peer": true,
"dependencies": {
"lodash": "^4.17.4"
}
@@ -3359,7 +3364,6 @@
"version": "0.8.1",
"resolved": "https://registry.npmjs.org/json-schema-merge-allof/-/json-schema-merge-allof-0.8.1.tgz",
"integrity": "sha512-CTUKmIlPJbsWfzRRnOXz+0MjIqvnleIXwFTzz+t9T86HnYX/Rozria6ZVGLktAU9e+NygNljveP+yxqtQp/Q4w==",
"peer": true,
"dependencies": {
"compute-lcm": "^1.1.2",
"json-schema-compare": "^0.2.2",
@@ -3390,7 +3394,6 @@
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/jsonpointer/-/jsonpointer-5.0.1.tgz",
"integrity": "sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==",
"peer": true,
"engines": {
"node": ">=0.10.0"
}
@@ -3733,6 +3736,7 @@
"url": "https://github.com/sponsors/ai"
}
],
"peer": true,
"dependencies": {
"nanoid": "^3.3.11",
"picocolors": "^1.1.1",
@@ -3915,6 +3919,7 @@
"version": "18.3.1",
"resolved": "https://registry.npmjs.org/react/-/react-18.3.1.tgz",
"integrity": "sha512-wS+hAgJShR0KhEvPJArfuPVN1+Hz1t0Y6n5jLrGQbkb4urgPE/0Rve+1kMB1v/oWgHgm4WIcV+i7F2pTVj+2iQ==",
"peer": true,
"dependencies": {
"loose-envify": "^1.1.0"
},
@@ -3942,9 +3947,10 @@
}
},
"node_modules/react-chartjs-2": {
"version": "5.3.0",
"resolved": "https://registry.npmjs.org/react-chartjs-2/-/react-chartjs-2-5.3.0.tgz",
"integrity": "sha512-UfZZFnDsERI3c3CZGxzvNJd02SHjaSJ8kgW1djn65H1KK8rehwTjyrRKOG3VTMG8wtHZ5rgAO5oTHtHi9GCCmw==",
"version": "5.3.1",
"resolved": "https://registry.npmjs.org/react-chartjs-2/-/react-chartjs-2-5.3.1.tgz",
"integrity": "sha512-h5IPXKg9EXpjoBzUfyWJvllMjG2mQ4EiuHQFhms/AjUm0XSZHhyRy2xVmLXHKrtcdrPO4mnGqRtYoD0vp95A0A==",
"license": "MIT",
"peerDependencies": {
"chart.js": "^4.1.1",
"react": "^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0"
@@ -3954,6 +3960,7 @@
"version": "18.3.1",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.3.1.tgz",
"integrity": "sha512-5m4nQKp+rZRb09LNH59GM4BxTh9251/ylbKIbpe7TpGxfJ+9kv6BLkLBXIjjspbgbnIBNqlI23tRnTWT0snUIw==",
"peer": true,
"dependencies": {
"loose-envify": "^1.1.0",
"scheduler": "^0.23.2"
@@ -3988,8 +3995,7 @@
"node_modules/react-is": {
"version": "18.3.1",
"resolved": "https://registry.npmjs.org/react-is/-/react-is-18.3.1.tgz",
"integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==",
"peer": true
"integrity": "sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg=="
},
"node_modules/react-redux": {
"version": "7.2.9",
@@ -4548,6 +4554,7 @@
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"peer": true,
"engines": {
"node": ">=12"
},
@@ -4647,20 +4654,17 @@
"node_modules/validate.io-array": {
"version": "1.0.6",
"resolved": "https://registry.npmjs.org/validate.io-array/-/validate.io-array-1.0.6.tgz",
"integrity": "sha512-DeOy7CnPEziggrOO5CZhVKJw6S3Yi7e9e65R1Nl/RTN1vTQKnzjfvks0/8kQ40FP/dsjRAOd4hxmJ7uLa6vxkg==",
"peer": true
"integrity": "sha512-DeOy7CnPEziggrOO5CZhVKJw6S3Yi7e9e65R1Nl/RTN1vTQKnzjfvks0/8kQ40FP/dsjRAOd4hxmJ7uLa6vxkg=="
},
"node_modules/validate.io-function": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/validate.io-function/-/validate.io-function-1.0.2.tgz",
"integrity": "sha512-LlFybRJEriSuBnUhQyG5bwglhh50EpTL2ul23MPIuR1odjO7XaMLFV8vHGwp7AZciFxtYOeiSCT5st+XSPONiQ==",
"peer": true
"integrity": "sha512-LlFybRJEriSuBnUhQyG5bwglhh50EpTL2ul23MPIuR1odjO7XaMLFV8vHGwp7AZciFxtYOeiSCT5st+XSPONiQ=="
},
"node_modules/validate.io-integer": {
"version": "1.0.5",
"resolved": "https://registry.npmjs.org/validate.io-integer/-/validate.io-integer-1.0.5.tgz",
"integrity": "sha512-22izsYSLojN/P6bppBqhgUDjCkr5RY2jd+N2a3DCAUey8ydvrZ/OkGvFPR7qfOpwR2LC5p4Ngzxz36g5Vgr/hQ==",
"peer": true,
"dependencies": {
"validate.io-number": "^1.0.3"
}
@@ -4669,7 +4673,6 @@
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/validate.io-integer-array/-/validate.io-integer-array-1.0.0.tgz",
"integrity": "sha512-mTrMk/1ytQHtCY0oNO3dztafHYyGU88KL+jRxWuzfOmQb+4qqnWmI+gykvGp8usKZOM0H7keJHEbRaFiYA0VrA==",
"peer": true,
"dependencies": {
"validate.io-array": "^1.0.3",
"validate.io-integer": "^1.0.4"
@@ -4678,14 +4681,15 @@
"node_modules/validate.io-number": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/validate.io-number/-/validate.io-number-1.0.3.tgz",
"integrity": "sha512-kRAyotcbNaSYoDnXvb4MHg/0a1egJdLwS6oJ38TJY7aw9n93Fl/3blIXdyYvPOp55CNxywooG/3BcrwNrBpcSg==",
"peer": true
"integrity": "sha512-kRAyotcbNaSYoDnXvb4MHg/0a1egJdLwS6oJ38TJY7aw9n93Fl/3blIXdyYvPOp55CNxywooG/3BcrwNrBpcSg=="
},
"node_modules/vite": {
"version": "7.1.5",
"resolved": "https://registry.npmjs.org/vite/-/vite-7.1.5.tgz",
"integrity": "sha512-4cKBO9wR75r0BeIWWWId9XK9Lj6La5X846Zw9dFfzMRw38IlTk2iCcUt6hsyiDRcPidc55ZParFYDXi0nXOeLQ==",
"version": "7.2.2",
"resolved": "https://registry.npmjs.org/vite/-/vite-7.2.2.tgz",
"integrity": "sha512-BxAKBWmIbrDgrokdGZH1IgkIk/5mMHDreLDmCJ0qpyJaAteP8NvMhkwr/ZCQNqNH97bw/dANTE9PDzqwJghfMQ==",
"dev": true,
"license": "MIT",
"peer": true,
"dependencies": {
"esbuild": "^0.25.0",
"fdir": "^6.5.0",
@@ -4777,6 +4781,7 @@
"resolved": "https://registry.npmjs.org/picomatch/-/picomatch-4.0.3.tgz",
"integrity": "sha512-5gTmgEY/sqK6gFXLIsQNH19lWb4ebPDLA4SdLP7dsWkIXHWlG66oPuVvXSGFPppYZz8ZDZq0dYYrbHfBCVUb1Q==",
"dev": true,
"peer": true,
"engines": {
"node": ">=12"
},
@@ -4784,6 +4789,12 @@
"url": "https://github.com/sponsors/jonschlinkert"
}
},
"node_modules/wavesurfer.js": {
"version": "7.11.1",
"resolved": "https://registry.npmjs.org/wavesurfer.js/-/wavesurfer.js-7.11.1.tgz",
"integrity": "sha512-8Q+wwItpjJAlhQ7crQLtKwgfbqqczm5/wx+76K4PptP+MBAjB0OA78+A9OuLnULz/8GpAQ+fKM6s81DonEO0Sg==",
"license": "BSD-3-Clause"
},
"node_modules/which": {
"version": "2.0.2",
"resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz",

View File

@@ -12,15 +12,16 @@
"@rjsf/core": "^5.24.13",
"ajv": "^8.17.1",
"axios": "^1.6.0",
"chart.js": "^4.5.0",
"chart.js": "^4.5.1",
"html2pdf.js": "^0.12.0",
"react": "^18.2.0",
"react-beautiful-dnd": "^13.1.1",
"react-chartjs-2": "^5.3.0",
"react-chartjs-2": "^5.3.1",
"react-dom": "^18.2.0",
"react-flow-renderer": "^10.3.17",
"react-router-dom": "^7.8.2",
"reactflow": "^11.11.4"
"reactflow": "^11.11.4",
"wavesurfer.js": "^7.11.1"
},
"devDependencies": {
"@vitejs/plugin-react": "^4.0.0",

View File

@@ -0,0 +1,23 @@
// web/src/admin/AgentDrilldown.jsx
export default function AgentDrilldown({ data }) {
const agents = [...new Set(data.map((d) => d.agent))];
const weeks = [...new Set(data.map((d) => d.week))];
return (
<div className="space-y-2">
<h4 className="font-semibold text-md">🔍 Agent-Level Breakdown</h4>
{agents.map((agent) => (
<div key={agent} className="bg-gray-100 p-2 rounded shadow text-sm">
<p><strong>{agent}</strong></p>
<ul className="list-disc ml-4">
{weeks.map((week) => {
const entry = data.find((d) => d.agent === agent && d.week === week);
return entry ? <li key={week}>{week}: {entry.count}</li> : null;
})}
</ul>
</div>
))}
</div>
);
}

View File

@@ -0,0 +1,47 @@
// web/src/admin/AgentReflectionDashboard.jsx
// web/src/admin/AgentReflectionDashboard.jsx
import SelfEvaluationPanel from "./SelfEvaluationPanel";
import FeedbackPanel from "./FeedbackPanel";
import ClusterEditor from "./ClusterEditor";
import AutoClusterTrigger from "./AutoClusterTrigger";
import ReflectionLogViewer from "../tenants/ReflectionLogViewer";
import { useState } from "react";
export default function AgentReflectionDashboard() {
const [tenantId, setTenantId] = useState("default");
return (
<div className="space-y-6">
<h2 className="text-xl font-bold">🧠 Agent Reflection & Self-Improvement</h2>
<div className="flex items-center gap-4">
<label className="text-sm font-medium">Tenant ID:</label>
<input
type="text"
value={tenantId}
onChange={(e) => setTenantId(e.target.value)}
className="px-2 py-1 border rounded text-sm w-64"
/>
</div>
<div className="grid grid-cols-1 md:grid-cols-2 gap-6">
<div className="space-y-4">
<SelfEvaluationPanel />
<ClusterEditor />
</div>
<div className="space-y-4">
<AutoClusterTrigger />
<FeedbackPanel />
</div>
</div>
<div className="mt-6">
<ReflectionLogViewer tenantId={tenantId} />
</div>
</div>
);
}

View File

@@ -0,0 +1,78 @@
// web/src/admin/AgentSyncStatus.jsx
import { useState, useEffect } from "react";
import axios from "axios";
export default function AgentSyncStatus() {
const [tenantId, setTenantId] = useState("default");
const [agentRole, setAgentRole] = useState("");
const [state, setState] = useState(null);
const [memory, setMemory] = useState([]);
const [status, setStatus] = useState(null);
const loadSyncData = async () => {
const [stateRes, memoryRes, statusRes] = await Promise.all([
axios.get(`/api/sync/agent/state?tenant_id=${tenantId}&agent_role=${agentRole}`),
axios.get(`/api/sync/agent/memory?tenant_id=${tenantId}&agent_role=${agentRole}`),
axios.get(`/api/sync/agent/status?tenant_id=${tenantId}&agent_role=${agentRole}`)
]);
setState(stateRes.data);
setMemory(memoryRes.data.episodes || []);
setStatus(statusRes.data);
};
useEffect(() => {
if (agentRole) loadSyncData();
}, [agentRole]);
return (
<div className="space-y-4">
<h3 className="text-lg font-semibold">📡 Agent Sync Status</h3>
<input
type="text"
placeholder="Tenant ID"
value={tenantId}
onChange={(e) => setTenantId(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
<input
type="text"
placeholder="Agent Role"
value={agentRole}
onChange={(e) => setAgentRole(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
{status && (
<div className="bg-gray-100 p-4 rounded shadow">
<p><strong>Status:</strong> {status.status}</p>
{status.recent && (
<pre className="text-xs bg-white p-2 rounded whitespace-pre-wrap mt-2">
{JSON.stringify(status.recent, null, 2)}
</pre>
)}
</div>
)}
{state && (
<div className="bg-gray-100 p-4 rounded shadow">
<h4 className="font-semibold text-md mb-2">🧠 Agent State</h4>
<pre className="text-xs bg-white p-2 rounded whitespace-pre-wrap">{JSON.stringify(state, null, 2)}</pre>
</div>
)}
{memory.length > 0 && (
<div className="bg-gray-100 p-4 rounded shadow">
<h4 className="font-semibold text-md mb-2">🗂 Episodic Memory</h4>
{memory.map((m, i) => (
<div key={i} className="bg-white p-2 rounded shadow text-sm mb-2">
<p><strong>Task:</strong> {m.task}</p>
<pre className="text-xs bg-gray-50 p-2 rounded whitespace-pre-wrap">{JSON.stringify(m.output, null, 2)}</pre>
</div>
))}
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,78 @@
// web/src/admin/AgentVoiceInput.jsx
import { useState } from "react";
import axios from "axios";
import { EmotionBadge } from "../emotion/EmotionBadge"; // adjust path as needed
import { EmotionIntensityBar } from "../emotion/EmotionIntensityBar"; // adjust path as needed
import { WaveformPlayer } from "../emotion/WaveformPlayer"; // adjust path as needed
export default function AgentVoiceInput() {
const [tenantId, setTenantId] = useState("default");
const [agentRole, setAgentRole] = useState("planner");
const [transcript, setTranscript] = useState("");
const [response, setResponse] = useState(null);
const [emotion, setEmotion] = useState(null);
const sendVoice = async () => {
const res = await axios.post("/api/agent/voice", {
tenant_id: tenantId,
agent_role: agentRole,
transcript
});
setResponse(res.data.response);
setEmotion(res.data.emotion);
};
return (
<div className="space-y-4">
<h3 className="text-lg font-semibold">🎙 Voice Input & Emotion Overlay</h3>
<input
type="text"
placeholder="Tenant ID"
value={tenantId}
onChange={(e) => setTenantId(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
<input
type="text"
placeholder="Agent Role"
value={agentRole}
onChange={(e) => setAgentRole(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
<textarea
placeholder="Transcript (simulated voice input)"
value={transcript}
onChange={(e) => setTranscript(e.target.value)}
className="px-2 py-1 border rounded text-sm h-24 w-full"
/>
<button
onClick={sendVoice}
className="px-3 py-1 bg-indigo-600 text-white rounded text-sm"
>
Send Voice Input
</button>
{response && (
<div className="bg-gray-100 p-4 rounded shadow mt-2 space-y-2">
<h4 className="font-semibold text-md">🧠 Agent Response</h4>
<pre className="text-xs bg-white p-2 rounded whitespace-pre-wrap">{JSON.stringify(response, null, 2)}</pre>
{/* {emotion && (
<p className="text-sm text-purple-600">🌀 Detected Emotion: <strong>{emotion.label}</strong> ({emotion.score})</p>
)} */}
{/* {emotion && (
<EmotionBadge label={emotion.label} score={emotion.score} />
)} */}
{emotion && (
<div className="space-y-2">
<EmotionBadge label={emotion.label} score={emotion.score} />
<EmotionIntensityBar label={emotion.label} score={emotion.score} />
</div>
)}
{audioUrl && <WaveformPlayer audioUrl={audioUrl} />}
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,42 @@
// web/src/admin/AutoClusterTrigger.jsx
import { useState } from "react";
import axios from "axios";
export default function AutoClusterTrigger() {
const [tenantId, setTenantId] = useState("default");
const [startDate, setStartDate] = useState("");
const [endDate, setEndDate] = useState("");
const [clusters, setClusters] = useState({});
const runAutoCluster = async () => {
const res = await axios.post("/api/admin/evaluation/auto-cluster", {
tenant_id: tenantId,
start_at: startDate,
end_at: endDate,
k: 3
});
setClusters(res.data.clusters || {});
};
return (
<div className="space-y-4">
<h3 className="text-lg font-semibold">🤖 Auto-Clustering</h3>
<input type="text" value={tenantId} onChange={(e) => setTenantId(e.target.value)} placeholder="Tenant ID" className="px-2 py-1 border rounded text-sm w-full" />
<input type="date" value={startDate} onChange={(e) => setStartDate(e.target.value)} className="px-2 py-1 border rounded text-sm" />
<input type="date" value={endDate} onChange={(e) => setEndDate(e.target.value)} className="px-2 py-1 border rounded text-sm" />
<button onClick={runAutoCluster} className="px-3 py-1 bg-purple-600 text-white rounded text-sm">Run Clustering</button>
{Object.keys(clusters).length > 0 && (
<div className="bg-gray-100 p-4 rounded shadow mt-2 space-y-2">
<h4 className="font-semibold text-md">📊 Cluster Assignments</h4>
<ul className="text-sm">
{Object.entries(clusters).map(([agent, cluster]) => (
<li key={agent}>{agent} {cluster}</li>
))}
</ul>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,41 @@
// web/src/admin/ClusterEditor.jsx
import { useState, useEffect } from "react";
import axios from "axios";
export default function ClusterEditor() {
const [config, setConfig] = useState({});
const [newRole, setNewRole] = useState("");
const [newCluster, setNewCluster] = useState("");
const loadConfig = async () => {
const res = await axios.get("/api/admin/config/clusters");
setConfig(res.data);
};
const updateConfig = async () => {
const updated = { ...config, [newRole]: newCluster };
await axios.post("/api/admin/config/clusters", updated);
setConfig(updated);
setNewRole("");
setNewCluster("");
};
useEffect(() => { loadConfig(); }, []);
return (
<div className="bg-white p-4 rounded shadow space-y-2">
<h4 className="font-semibold text-md mb-2">🧠 Cluster Editor</h4>
<ul className="text-sm">
{Object.entries(config).map(([role, cluster]) => (
<li key={role}>{role} {cluster}</li>
))}
</ul>
<div className="flex gap-2 mt-2">
<input value={newRole} onChange={(e) => setNewRole(e.target.value)} placeholder="Agent Role" className="px-2 py-1 border rounded text-sm" />
<input value={newCluster} onChange={(e) => setNewCluster(e.target.value)} placeholder="Cluster Label" className="px-2 py-1 border rounded text-sm" />
<button onClick={updateConfig} className="px-3 py-1 bg-blue-600 text-white rounded text-sm">Add Mapping</button>
</div>
</div>
);
}

View File

@@ -0,0 +1,117 @@
// web/src/admin/CollabPlanner.jsx
import { useState } from "react";
import axios from "axios";
export default function CollabPlanner() {
const [tenantId, setTenantId] = useState("default");
const [chainId, setChainId] = useState("chain-001");
const [roles, setRoles] = useState("planner,retriever");
const [task, setTask] = useState("");
const [contextJson, setContextJson] = useState("{}");
const [sessionData, setSessionData] = useState(null);
const [executionSteps, setExecutionSteps] = useState(null);
const runChain = async () => {
let context = {};
try {
context = JSON.parse(contextJson);
} catch {
alert("Invalid context JSON");
return;
}
const res = await axios.post("/api/admin/agents/run_chain", {
tenant_id: tenantId,
chain_id: chainId,
roles: roles.split(",").map((r) => r.trim()),
task,
context
});
setExecutionSteps(res.data.steps || []);
viewSession(); // auto-refresh session view
};
const viewSession = async () => {
const res = await axios.get(`/api/admin/agents/session?chain_id=${chainId}`);
setSessionData(res.data.session || {});
};
return (
<div className="space-y-4">
<h3 className="text-lg font-semibold">🤝 Multi-Agent Collaboration Planner</h3>
<input
type="text"
placeholder="Tenant ID"
value={tenantId}
onChange={(e) => setTenantId(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
<input
type="text"
placeholder="Chain ID"
value={chainId}
onChange={(e) => setChainId(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
<input
type="text"
placeholder="Agent Roles (comma)"
value={roles}
onChange={(e) => setRoles(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
<input
type="text"
placeholder="Shared Task"
value={task}
onChange={(e) => setTask(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
<textarea
placeholder="Context JSON"
value={contextJson}
onChange={(e) => setContextJson(e.target.value)}
className="px-2 py-1 border rounded text-sm h-24 w-full"
/>
<button
onClick={runChain}
className="px-3 py-1 bg-green-600 text-white rounded text-sm"
>
Run Collaboration Chain
</button>
<button
onClick={viewSession}
className="px-3 py-1 bg-blue-600 text-white rounded text-sm"
>
View Session
</button>
{executionSteps && (
<div className="bg-gray-100 p-4 rounded shadow mt-4 space-y-2">
<h4 className="font-semibold text-md mb-2">🚀 Execution Steps</h4>
{executionSteps.map((step, i) => (
<div key={i} className="bg-white p-2 rounded shadow text-sm">
<p><strong>Agent:</strong> {step.agent}</p>
<p><strong>Latency:</strong> {step.latency.toFixed(2)}s</p>
<p><strong>Success:</strong> {step.success ? "✅" : "❌"}</p>
<p><strong>Input:</strong></p>
<pre className="text-xs bg-gray-50 p-2 rounded whitespace-pre-wrap">{JSON.stringify(step.input, null, 2)}</pre>
<p><strong>Output:</strong></p>
<pre className="text-xs bg-gray-50 p-2 rounded whitespace-pre-wrap">{JSON.stringify(step.output, null, 2)}</pre>
</div>
))}
</div>
)}
{sessionData && (
<div className="bg-gray-100 p-4 rounded shadow mt-4">
<h4 className="font-semibold text-md mb-2">📋 Session Data</h4>
<pre className="text-xs bg-white p-2 rounded whitespace-pre-wrap">{JSON.stringify(sessionData, null, 2)}</pre>
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,52 @@
// web/src/admin/GoalHeatmap.jsx
import { Chart as ChartJS, Tooltip, Title, Legend, CategoryScale, LinearScale } from "chart.js";
import { MatrixController, MatrixElement } from "chartjs-chart-matrix";
import { Chart } from "react-chartjs-2";
ChartJS.register(MatrixController, MatrixElement, Tooltip, Title, Legend, CategoryScale, LinearScale);
export default function GoalHeatmap({ data }) {
const chartData = {
datasets: [
{
label: "Goals Completed",
data: data.map((entry) => ({
x: entry.day,
y: entry.hour,
v: entry.count,
})),
backgroundColor: (ctx) => {
const value = ctx.dataset.data[ctx.dataIndex].v;
return `rgba(0, 123, 255, ${value / 10})`;
},
width: () => 20,
height: () => 20,
},
],
};
const options = {
scales: {
x: {
type: "category",
labels: ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"],
title: { display: true, text: "Day" },
},
y: {
type: "category",
labels: Array.from({ length: 24 }, (_, i) => `${i}:00`),
title: { display: true, text: "Hour" },
},
},
plugins: {
tooltip: {
callbacks: {
label: (ctx) => `Goals: ${ctx.raw.v}`,
},
},
},
};
return <Chart type="matrix" data={chartData} options={options} />;
}

View File

@@ -0,0 +1,58 @@
// web/src/admin/GoalSegmentChart.jsx
import { Bar } from "react-chartjs-2";
import { Chart as ChartJS, BarElement, CategoryScale, LinearScale, Tooltip, Title, Legend } from "chart.js";
ChartJS.register(BarElement, CategoryScale, LinearScale, Tooltip, Title, Legend);
// export default function GoalSegmentChart({ data }) {
export default function GoalSegmentChart({ data, onSegmentClick }) {
const weeks = [...new Set(data.map((d) => d.week))];
const segments = [...new Set(data.map((d) => d.segment))];
const datasets = segments.map((segment, i) => ({
label: segment,
data: weeks.map((week) => {
const entry = data.find((d) => d.week === week && d.segment === segment);
return entry ? entry.count : 0;
}),
backgroundColor: `hsl(${(i * 90) % 360}, 70%, 50%)`,
stack: "segment",
}));
// return (
// <Bar
// data={{ labels: weeks, datasets }}
// options={{
// responsive: true,
// plugins: {
// legend: { position: "top" },
// title: { display: true, text: "Weekly Goal Completion by Segment" },
// },
// scales: {
// x: { stacked: true },
// y: { stacked: true, beginAtZero: true, title: { display: true, text: "Completions" } },
// },
// }}
// />
// );
const options = {
onClick: (e, elements) => {
if (elements.length > 0) {
const index = elements[0].datasetIndex;
const segment = datasets[index].label;
onSegmentClick(segment);
}
},
plugins: {
legend: { position: "top" },
title: { display: true, text: "Weekly Goal Completion by Segment" },
},
scales: {
x: { stacked: true },
y: { stacked: true, beginAtZero: true },
},
};
return <Bar data={{ labels: weeks, datasets }} options={options} />;
}

View File

@@ -0,0 +1,38 @@
// web/src/admin/GoalStackedChart.jsx
import { Bar } from "react-chartjs-2";
import { Chart as ChartJS, BarElement, CategoryScale, LinearScale, Tooltip, Title, Legend } from "chart.js";
ChartJS.register(BarElement, CategoryScale, LinearScale, Tooltip, Title, Legend);
export default function GoalStackedChart({ data }) {
const weeks = [...new Set(data.map((d) => d.week))];
const agents = [...new Set(data.map((d) => d.agent))];
const datasets = agents.map((agent, i) => ({
label: agent,
data: weeks.map((week) => {
const entry = data.find((d) => d.week === week && d.agent === agent);
return entry ? entry.count : 0;
}),
backgroundColor: `hsl(${(i * 60) % 360}, 70%, 50%)`,
stack: "goal",
}));
return (
<Bar
data={{ labels: weeks, datasets }}
options={{
responsive: true,
plugins: {
legend: { position: "top" },
title: { display: true, text: "Weekly Goal Completion (Stacked by Agent)" },
},
scales: {
x: { stacked: true },
y: { stacked: true, beginAtZero: true, title: { display: true, text: "Completions" } },
},
}}
/>
);
}

View File

@@ -0,0 +1,69 @@
// web/src/admin/GoalTrendChart.jsx
import { Line } from "react-chartjs-2";
import { Chart as ChartJS, LineElement, PointElement, CategoryScale, LinearScale, Tooltip, Title } from "chart.js";
ChartJS.register(LineElement, PointElement, CategoryScale, LinearScale, Tooltip, Title);
// export default function GoalTrendChart({ data }) {
// const chartData = {
// labels: data.map((entry) => entry.week),
// datasets: [
// {
// label: "Goals Completed",
// data: data.map((entry) => entry.count),
// borderColor: "rgba(0, 123, 255, 1)",
// backgroundColor: "rgba(0, 123, 255, 0.2)",
// fill: true,
// tension: 0.3,
// },
// ],
// };
// const options = {
// scales: {
// y: {
// beginAtZero: true,
// title: { display: true, text: "Completions" },
// },
// x: {
// title: { display: true, text: "Week" },
// },
// },
// };
// return <Line data={chartData} options={options} />;
// }
export default function GoalTrendChart({ data }) {
const agents = [...new Set(data.map((d) => d.agent || "All"))];
const weeks = [...new Set(data.map((d) => d.week))];
const datasets = agents.map((agent, i) => ({
label: agent,
data: weeks.map((week) => {
const entry = data.find((d) => d.week === week && (d.agent || "All") === agent);
return entry ? entry.count : 0;
}),
borderColor: `hsl(${(i * 60) % 360}, 70%, 50%)`,
backgroundColor: `hsla(${(i * 60) % 360}, 70%, 50%, 0.2)`,
fill: true,
tension: 0.3,
}));
return (
<Line
data={{ labels: weeks, datasets }}
options={{
responsive: true,
plugins: {
legend: { position: "top" },
title: { display: true, text: "Weekly Goal Completion Trend" },
},
scales: {
y: { beginAtZero: true, title: { display: true, text: "Completions" } },
x: { title: { display: true, text: "Week" } },
},
}}
/>
);
}

View File

@@ -0,0 +1,27 @@
// web/src/admin/LatencyChart.jsx
import { Line } from "react-chartjs-2";
import { Chart as ChartJS, LineElement, PointElement, CategoryScale, LinearScale } from "chart.js";
ChartJS.register(LineElement, PointElement, CategoryScale, LinearScale);
export default function LatencyChart({ data }) {
const chartData = {
labels: data.map((_, i) => `#${i + 1}`),
datasets: [
{
label: "Latency (ms)",
data: data,
borderColor: "rgba(75,192,192,1)",
backgroundColor: "rgba(75,192,192,0.2)",
tension: 0.3,
},
],
};
return (
<div className="mt-4">
<Line data={chartData} />
</div>
);
}

View File

@@ -2,52 +2,363 @@
import { useState, useEffect } from "react";
import axios from "axios";
import LatencyChart from "./LatencyChart";
import GoalHeatmap from "./GoalHeatmap";
import SLATrendChart from "./SLATrendChart";
import GoalTrendChart from "./GoalTrendChart";
export default function MetricsDashboard() {
const [summary, setSummary] = useState({});
const [tenantId, setTenantId] = useState("");
const [selectedAgent, setSelectedAgent] = useState("");
const [agentMetrics, setAgentMetrics] = useState(null);
const [slaViolations, setSlaViolations] = useState([]);
const [goalHeatmap, setGoalHeatmap] = useState([]);
const [goalTrend, setGoalTrend] = useState([]);
const [backend, setBackend] = useState("memory");
const [startDate, setStartDate] = useState("");
const [endDate, setEndDate] = useState("");
const [trendAgent, setTrendAgent] = useState("");
const [multiAgent, setMultiAgent] = useState(false);
const [stackedData, setStackedData] = useState([]);
const [showCluster, setShowCluster] = useState(false);
const [segmentData, setSegmentData] = useState([]);
const [segmentMode, setSegmentMode] = useState("reward");
const [drillAgents, setDrillAgents] = useState([]);
const [selectedSegment, setSelectedSegment] = useState("");
const loadSummary = async () => {
const res = await axios.get(`/api/admin/metrics/summary${tenantId ? `?tenant_id=${tenantId}` : ""}`);
setSummary(res.data.summary || {});
setAgentMetrics(null);
setSelectedAgent("");
};
const loadAgentMetrics = async (agent) => {
const res = await axios.get(`/api/admin/metrics/agent?agent_role=${agent}`);
setSelectedAgent(agent);
setAgentMetrics(res.data.metrics || null);
};
const loadCharts = async () => {
const [slaRes, goalRes] = await Promise.all([
axios.get("/api/admin/sla/violations"),
axios.get("/api/admin/goals/completion-heatmap")
// axios.get("/api/admin/goals/completion-trend")
]);
setSlaViolations(slaRes.data.violations || []);
setGoalHeatmap(goalRes.data.heatmap || []);
};
const loadTrend = async () => {
const params = {};
if (tenantId) params.tenant_id = tenantId;
// if (trendAgent) params.agent_role = trendAgent;
if (!multiAgent && trendAgent) params.agent_role = trendAgent;
if (startDate) params.start_at = startDate;
if (endDate) params.end_at = endDate;
const res = await axios.get("/api/admin/goals/completion-trend", { params });
// setGoalTrend(trendRes.data.trend || []);
const trend = res.data.trend || [];
// Inject agent label for multi-agent chart
const labeled = multiAgent
? trend.map((t) => ({ ...t, agent: trendAgent || "All" }))
: trend.map((t) => ({ ...t, agent: trendAgent || "All" }));
setGoalTrend(labeled);
};
const loadStacked = async () => {
const params = {};
if (tenantId) params.tenant_id = tenantId;
if (startDate) params.start_at = startDate;
if (endDate) params.end_at = endDate;
if (showCluster) params.cluster = true;
const res = await axios.get("/api/admin/goals/completion-stacked", { params });
setStackedData(res.data.stacked || []);
};
const loadSegmented = async () => {
const params = {};
if (tenantId) params.tenant_id = tenantId;
if (startDate) params.start_at = startDate;
if (endDate) params.end_at = endDate;
params.by = segmentMode;
const res = await axios.get("/api/admin/goals/completion-segmented", { params });
setSegmentData(res.data.segmented || []);
};
const loadDrillAgents = async (segment) => {
setSelectedSegment(segment);
const params = {};
if (tenantId) params.tenant_id = tenantId;
if (startDate) params.start_at = startDate;
if (endDate) params.end_at = endDate;
params.segment = segment;
const res = await axios.get("/api/admin/goals/segment-agents", { params });
setDrillAgents(res.data.agents || []);
};
const triggerAutoCluster = async () => {
const res = await axios.post("/api/admin/goals/auto-cluster", {
tenant_id: tenantId,
start_at: startDate,
end_at: endDate,
k: 3
});
alert("Auto-clustered agents:\n" + JSON.stringify(res.data.clusters, null, 2));
};
const loadBackendConfig = async () => {
const res = await axios.get("/api/admin/config/goal-heatmap-backend");
setBackend(res.data.backend || "memory");
};
const updateBackend = async (mode) => {
await axios.post("/api/admin/config/goal-heatmap-backend", { mode });
setBackend(mode);
};
const exportCSV = async () => {
const res = await axios.get("/api/admin/goals/completion-heatmap/export");
alert(`Heatmap CSV exported to: ${res.data.export_path}`);
};
const exportTrendCSV = async () => {
const params = {};
if (tenantId) params.tenant_id = tenantId;
if (!multiAgent && trendAgent) params.agent_role = trendAgent;
if (startDate) params.start_at = startDate;
if (endDate) params.end_at = endDate;
const res = await axios.get("/api/admin/goals/completion-trend/export", { params });
alert(`Trend CSV exported to: ${res.data.export_path}`);
};
const exportSegmentedCSV = async () => {
const params = {};
if (tenantId) params.tenant_id = tenantId;
if (startDate) params.start_at = startDate;
if (endDate) params.end_at = endDate;
const res = await axios.get("/api/admin/goals/segment-export", { params });
alert(`Segmented CSV exported to: ${res.data.export_path}`);
};
useEffect(() => {
loadSummary();
loadCharts();
loadTrend();
loadBackendConfig();
// const interval = setInterval(loadSummary, 30000); // refresh every 30 seconds
const interval = setInterval(() => {
loadSummary();
loadCharts();
loadTrend();
}, 30000); // refresh every 30 seconds
return () => clearInterval(interval);
}, []);
return (
<div className="space-y-4">
<h3 className="text-lg font-semibold">📊 Execution Metrics Dashboard</h3>
<input
type="text"
placeholder="Tenant ID (optional)"
value={tenantId}
onChange={(e) => setTenantId(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full"
/>
<button
onClick={loadSummary}
className="px-3 py-1 bg-blue-600 text-white rounded text-sm"
>
Refresh Summary
</button>
<div className="flex flex-col sm:flex-row gap-2 items-start sm:items-center">
<input
type="text"
placeholder="Tenant ID (optional)"
value={tenantId}
onChange={(e) => setTenantId(e.target.value)}
className="px-2 py-1 border rounded text-sm w-full sm:w-64"
/>
<button
onClick={loadSummary}
className="px-3 py-1 bg-blue-600 text-white rounded text-sm"
>
Refresh Summary
</button>
<div className="flex items-center gap-2">
<label className="text-sm font-medium">Heatmap Backend:</label>
<select
value={backend}
onChange={(e) => updateBackend(e.target.value)}
className="px-2 py-1 border rounded text-sm"
>
<option value="memory">In-Memory</option>
<option value="sql">SQL</option>
</select>
</div>
<button
onClick={exportCSV}
className="px-3 py-1 bg-green-600 text-white rounded text-sm"
>
Export CSV
</button>
</div>
<div className="flex flex-wrap gap-4 items-center">
<input
type="date"
value={startDate}
onChange={(e) => setStartDate(e.target.value)}
className="px-2 py-1 border rounded text-sm"
/>
<input
type="date"
value={endDate}
onChange={(e) => setEndDate(e.target.value)}
className="px-2 py-1 border rounded text-sm"
/>
<input
type="text"
placeholder="Filter by Agent"
value={trendAgent}
onChange={(e) => setTrendAgent(e.target.value)}
className="px-2 py-1 border rounded text-sm"
/>
<label className="text-sm flex items-center gap-1">
<input type="checkbox" checked={multiAgent} onChange={(e) => setMultiAgent(e.target.checked)} />
Multi-Agent Comparison
</label>
<button
onClick={loadTrend}
className="px-3 py-1 bg-purple-600 text-white rounded text-sm"
>
Filter Trend
</button>
<button
onClick={exportTrendCSV}
className="px-3 py-1 bg-green-600 text-white rounded text-sm"
>
Export Trend CSV
</button>
</div>
<div className="flex flex-wrap gap-4 items-center">
<label className="text-sm flex items-center gap-1">
<input type="checkbox" checked={showCluster} onChange={(e) => setShowCluster(e.target.checked)} />
Cluster Agents
</label>
<button onClick={loadStacked} className="px-3 py-1 bg-indigo-600 text-white rounded text-sm">
Load Stacked Chart
</button>
</div>
<div className="flex flex-wrap gap-4 items-center">
<select value={segmentMode} onChange={(e) => setSegmentMode(e.target.value)} className="px-2 py-1 border rounded text-sm">
<option value="reward">By Reward</option>
<option value="count">By Completion Count</option>
</select>
<button onClick={loadSegmented} className="px-3 py-1 bg-orange-600 text-white rounded text-sm">
Load Segmented Chart
</button>
</div>
<div className="flex flex-wrap gap-4 items-center">
<ClusterEditor />
<button onClick={triggerAutoCluster} className="px-3 py-1 bg-purple-600 text-white rounded text-sm mt-2">
Run Auto-Clustering
</button>
</div>
{Object.keys(summary).length > 0 && (
<div className="bg-gray-100 p-4 rounded shadow mt-4 space-y-2">
<h4 className="font-semibold text-md mb-2">📈 Summary</h4>
{Object.entries(summary).map(([key, data], i) => (
<div key={i} className="bg-white p-2 rounded shadow text-sm">
<p><strong>Agent:</strong> {key[0]}</p>
<p><strong>Action:</strong> {key[1]}</p>
<p><strong>Count:</strong> {data.count}</p>
<p><strong>Success Rate:</strong> {(data.success_rate * 100).toFixed(2)}%</p>
<p><strong>Avg Latency:</strong> {data.avg_latency.toFixed(2)}s</p>
</div>
))}
{Object.entries(summary).map(([key, data], i) => {
const [agent, action] = key.split(",");
return (
<div key={i} className="bg-white p-2 rounded shadow text-sm">
<p><strong>Agent:</strong> {agent}</p>
<p><strong>Action:</strong> {action}</p>
<p><strong>Count:</strong> {data.count}</p>
<p><strong>Success Rate:</strong> {(data.success_rate * 100).toFixed(2)}%</p>
<p><strong>Avg Latency:</strong> {data.avg_latency.toFixed(2)}s</p>
<button
onClick={() => loadAgentMetrics(agent)}
className="mt-2 text-blue-600 underline text-xs"
>
View Agent Details
</button>
</div>
);
})}
</div>
)}
{agentMetrics && (
<div className="bg-white p-4 rounded shadow mt-4 space-y-2">
<h4 className="font-semibold text-md mb-2">🔍 Details for Agent: {selectedAgent}</h4>
<p><strong>Total Actions:</strong> {agentMetrics.total_actions}</p>
<p><strong>Success Rate:</strong> {(agentMetrics.success_rate * 100).toFixed(2)}%</p>
<p><strong>Avg Latency:</strong> {agentMetrics.avg_latency.toFixed(2)}s</p>
{agentMetrics.latency_distribution?.length > 0 && (
<>
<p className="font-semibold text-sm mt-4">📉 Latency Distribution</p>
<LatencyChart data={agentMetrics.latency_distribution} />
</>
)}
</div>
)}
{goalHeatmap.length > 0 && (
<div className="bg-white p-4 rounded shadow space-y-2">
<h4 className="font-semibold text-md mb-2">🔥 Goal Completion Heatmap</h4>
<GoalHeatmap data={goalHeatmap} />
</div>
)}
{goalTrend.length > 0 && (
<div className="bg-white p-4 rounded shadow space-y-2">
<h4 className="font-semibold text-md mb-2">📆 Weekly Goal Completion Trend</h4>
<GoalTrendChart data={goalTrend} />
</div>
)}
{stackedData.length > 0 && (
<div className="bg-white p-4 rounded shadow space-y-2">
<h4 className="font-semibold text-md mb-2">📊 Stacked Goal Completion Chart</h4>
<GoalStackedChart data={stackedData} />
</div>
)}
{segmentData.length > 0 && (
<div className="bg-white p-4 rounded shadow space-y-2">
{/* <h4 className="font-semibold text-md mb-2">📊 Goal Completion by Agent Segment</h4>
<GoalSegmentChart data={segmentData} /> */}
<div className="flex justify-between items-center">
<h4 className="font-semibold text-md">📊 Goal Completion by Agent Segment</h4>
<button onClick={exportSegmentedCSV} className="px-2 py-1 bg-green-600 text-white rounded text-sm">
Export Segment CSV
</button>
</div>
<GoalSegmentChart data={segmentData} onSegmentClick={loadDrillAgents} />
</div>
)}
{drillAgents.length > 0 && (
<div className="bg-white p-4 rounded shadow space-y-2">
<h4 className="font-semibold text-md mb-2">📌 Drilldown: {selectedSegment}</h4>
<AgentDrilldown data={drillAgents} />
</div>
)}
{slaViolations.length > 0 && (
<div className="bg-white p-4 rounded shadow space-y-2">
<h4 className="font-semibold text-md mb-2">📉 SLA Violation Trend</h4>
<SLATrendChart data={slaViolations} />
</div>
)}
</div>
);
}

View File

@@ -0,0 +1,34 @@
// web/src/admin/SLATrendChart.jsx
import { Line } from "react-chartjs-2";
import { Chart as ChartJS, LineElement, PointElement, CategoryScale, LinearScale, Tooltip, Title } from "chart.js";
ChartJS.register(LineElement, PointElement, CategoryScale, LinearScale, Tooltip, Title);
export default function SLATrendChart({ data }) {
const chartData = {
labels: data.map((entry) => new Date(entry.timestamp * 1000).toLocaleString()),
datasets: [
{
label: "SLA Breaches",
data: data.map((entry) => (entry.breach ? 1 : 0)),
borderColor: "rgba(255,99,132,1)",
backgroundColor: "rgba(255,99,132,0.2)",
fill: true,
tension: 0.3,
},
],
};
const options = {
scales: {
y: {
beginAtZero: true,
ticks: { stepSize: 1 },
title: { display: true, text: "Breach (1 = Yes)" },
},
},
};
return <Line data={chartData} options={options} />;
}

View File

@@ -11,17 +11,6 @@ export default function SelfEvaluationPanel() {
const [result, setResult] = useState(null);
const [evaluations, setEvaluations] = useState([]);
// const runEvaluation = async () => {
// const parsed = JSON.parse(outputJson);
// const res = await axios.post("/api/admin/evaluation/run", {
// tenant_id: "default",
// agent_role: agentRole,
// task,
// output: parsed
// });
// setResult(res.data);
// loadEvaluations();
// };
const runEvaluation = async () => {
let parsed = {};
try {
@@ -41,10 +30,6 @@ export default function SelfEvaluationPanel() {
loadEvaluations();
};
// const loadEvaluations = async () => {
// const res = await axios.get("/api/admin/evaluation/all?tenant_id=default");
// setEvaluations(res.data.evaluations || []);
// };
const loadEvaluations = async () => {
const res = await axios.get(`/api/admin/self-evaluation?tenant_id=${tenantId}`);
setEvaluations(res.data.evaluations || []);

15
web/src/api/metrics.js Normal file
View File

@@ -0,0 +1,15 @@
// web/src/api/metrics.js
import axios from "axios";
export const fetchSummary = (tenantId) =>
axios.get(`/api/admin/metrics/summary${tenantId ? `?tenant_id=${tenantId}` : ""}`);
export const fetchAgentMetrics = (agent) =>
axios.get(`/api/admin/metrics/agent?agent_role=${agent}`);
export const fetchSlaViolations = () =>
axios.get("/api/admin/sla/violations");
export const fetchGoalHeatmap = () =>
axios.get("/api/admin/goals/completion-heatmap");

View File

@@ -0,0 +1,52 @@
// web/src/admin/GoalHeatmap.jsx
import { Chart as ChartJS, Tooltip, Title, Legend, CategoryScale, LinearScale } from "chart.js";
import { MatrixController, MatrixElement } from "chartjs-chart-matrix";
import { Chart } from "react-chartjs-2";
ChartJS.register(MatrixController, MatrixElement, Tooltip, Title, Legend, CategoryScale, LinearScale);
export default function GoalHeatmap({ data }) {
const chartData = {
datasets: [
{
label: "Goals Completed",
data: data.map((entry) => ({
x: entry.day,
y: entry.hour,
v: entry.count,
})),
backgroundColor: (ctx) => {
const value = ctx.dataset.data[ctx.dataIndex].v;
return `rgba(0, 123, 255, ${value / 10})`;
},
width: () => 20,
height: () => 20,
},
],
};
const options = {
scales: {
x: {
type: "category",
labels: ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"],
title: { display: true, text: "Day" },
},
y: {
type: "category",
labels: Array.from({ length: 24 }, (_, i) => `${i}:00`),
title: { display: true, text: "Hour" },
},
},
plugins: {
tooltip: {
callbacks: {
label: (ctx) => `Goals: ${ctx.raw.v}`,
},
},
},
};
return <Chart type="matrix" data={chartData} options={options} />;
}

View File

@@ -0,0 +1,27 @@
// web/src/admin/LatencyChart.jsx
import { Line } from "react-chartjs-2";
import { Chart as ChartJS, LineElement, PointElement, CategoryScale, LinearScale } from "chart.js";
ChartJS.register(LineElement, PointElement, CategoryScale, LinearScale);
export default function LatencyChart({ data }) {
const chartData = {
labels: data.map((_, i) => `#${i + 1}`),
datasets: [
{
label: "Latency (ms)",
data: data,
borderColor: "rgba(75,192,192,1)",
backgroundColor: "rgba(75,192,192,0.2)",
tension: 0.3,
},
],
};
return (
<div className="mt-4">
<Line data={chartData} />
</div>
);
}

View File

@@ -0,0 +1,34 @@
// web/src/admin/SLATrendChart.jsx
import { Line } from "react-chartjs-2";
import { Chart as ChartJS, LineElement, PointElement, CategoryScale, LinearScale, Tooltip, Title } from "chart.js";
ChartJS.register(LineElement, PointElement, CategoryScale, LinearScale, Tooltip, Title);
export default function SLATrendChart({ data }) {
const chartData = {
labels: data.map((entry) => new Date(entry.timestamp * 1000).toLocaleString()),
datasets: [
{
label: "SLA Breaches",
data: data.map((entry) => (entry.breach ? 1 : 0)),
borderColor: "rgba(255,99,132,1)",
backgroundColor: "rgba(255,99,132,0.2)",
fill: true,
tension: 0.3,
},
],
};
const options = {
scales: {
y: {
beginAtZero: true,
ticks: { stepSize: 1 },
title: { display: true, text: "Breach (1 = Yes)" },
},
},
};
return <Line data={chartData} options={options} />;
}

View File

@@ -0,0 +1,30 @@
// web/src/emotion/EmotionBadge.jsx
export function EmotionBadge({ label, score }) {
const mapping = {
joy: { emoji: "😊", color: "#FFD700" },
sadness: { emoji: "😢", color: "#6495ED" },
anger: { emoji: "😠", color: "#DC143C" },
fear: { emoji: "😨", color: "#8B008B" },
surprise: { emoji: "😲", color: "#FF8C00" },
neutral: { emoji: "😐", color: "#A9A9A9" },
love: { emoji: "❤️", color: "#FF69B4" },
confusion: { emoji: "🤔", color: "#20B2AA" },
disgust: { emoji: "🤢", color: "#556B2F" },
pride: { emoji: "😌", color: "#DAA520" }
};
const emotion = mapping[label] || mapping["neutral"];
return (
<span style={{
backgroundColor: emotion.color,
color: "#fff",
padding: "4px 8px",
borderRadius: "8px",
fontSize: "0.9rem"
}}>
{emotion.emoji} {label} ({score})
</span>
);
}

View File

@@ -0,0 +1,33 @@
export function EmotionIntensityBar({ label, score }) {
const mapping = {
joy: { emoji: "😊", color: "#FFD700" },
sadness: { emoji: "😢", color: "#6495ED" },
anger: { emoji: "😠", color: "#DC143C" },
fear: { emoji: "😨", color: "#8B008B" },
surprise: { emoji: "😲", color: "#FF8C00" },
neutral: { emoji: "😐", color: "#A9A9A9" },
love: { emoji: "❤️", color: "#FF69B4" },
confusion: { emoji: "🤔", color: "#20B2AA" },
disgust: { emoji: "🤢", color: "#556B2F" },
pride: { emoji: "😌", color: "#DAA520" }
};
const emotion = mapping[label] || mapping["neutral"];
const widthPercent = Math.min(Math.max(score, 0), 1) * 100;
return (
<div className="space-y-1">
<div className="text-sm font-medium">{emotion.emoji} {label} ({score.toFixed(2)})</div>
<div className="w-full h-4 bg-gray-200 rounded">
<div
className="h-4 rounded"
style={{
width: `${widthPercent}%`,
backgroundColor: emotion.color,
transition: "width 0.3s ease"
}}
/>
</div>
</div>
);
}

View File

@@ -0,0 +1,37 @@
// web/src/emotion/WaveformPlayer.jsx
import { useEffect, useRef } from "react";
import WaveSurfer from "wavesurfer.js";
export default function WaveformPlayer({ audioUrl }) {
const containerRef = useRef(null);
const waveRef = useRef(null);
useEffect(() => {
if (!audioUrl || !containerRef.current) return;
waveRef.current = WaveSurfer.create({
container: containerRef.current,
waveColor: "#ccc",
progressColor: "#4f46e5",
height: 80,
responsive: true,
});
waveRef.current.load(audioUrl);
return () => waveRef.current?.destroy();
}, [audioUrl]);
return (
<div>
<div ref={containerRef} />
<button
onClick={() => waveRef.current?.playPause()}
className="mt-2 px-3 py-1 bg-indigo-600 text-white rounded text-sm"
>
Play / Pause
</button>
</div>
);
}