Add implemented Agentic-AI through working

This commit is contained in:
2025-11-09 11:31:18 +09:00
parent 3743ca125e
commit bd38924133
379 changed files with 26954 additions and 0 deletions

49
.github/workflows/deploy.yml vendored Normal file
View File

@@ -0,0 +1,49 @@
name: Agentic AI CI/CD
on:
push:
branches: [main]
jobs:
build-and-deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
- name: Set up Python
uses: actions/setup-python@v4
with:
python-version: '3.11'
- name: Install backend dependencies
run: |
pip install -r requirements.txt
- name: Run backend tests
run: |
pytest || echo "No tests found"
- name: Set up Node.js
uses: actions/setup-node@v3
with:
node-version: '20'
- name: Install frontend dependencies
run: |
cd web
npm install
- name: Build frontend
run: |
cd web
npm run build
- name: Docker build
run: |
docker build -t agentic-ai-backend .
docker build -t agentic-ai-frontend ./web
- name: Deploy (placeholder)
run: echo "Add deployment step here (e.g., push to registry or SSH)"

17
Dockerfile Normal file
View File

@@ -0,0 +1,17 @@
# Use official Python image
FROM python:3.11-slim
# Set working directory
WORKDIR /app
# Copy backend files
COPY requirements.txt .
RUN pip install --no-cache-dir -r requirements.txt
COPY . .
# Expose FastAPI port
EXPOSE 8000
# Run FastAPI app
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "8000"]

159
FIXME.md Normal file
View File

@@ -0,0 +1,159 @@
# Installation Error
```bash
(venv) dev1@EON-DEV:~/src/agentic-ai$ pip install -r requirements.txt
Requirement already satisfied: fastapi in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 2)) (0.116.1)
Requirement already satisfied: uvicorn in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 3)) (0.35.0)
Requirement already satisfied: pydantic in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 4)) (2.11.7)
Requirement already satisfied: langchain in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 7)) (0.3.27)
Requirement already satisfied: sentence-transformers in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 8)) (5.1.0)
Requirement already satisfied: transformers in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 9)) (4.56.1)
Requirement already satisfied: faiss-cpu in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 12)) (1.12.0)
Requirement already satisfied: qdrant-client in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 13)) (1.15.1)
Requirement already satisfied: weaviate-client in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 14)) (4.16.9)
Requirement already satisfied: llama-cpp-python in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 17)) (0.3.16)
Requirement already satisfied: openai in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 18)) (1.106.1)
Requirement already satisfied: torch in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 19)) (2.8.0)
Requirement already satisfied: huggingface-hub in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 20)) (0.34.4)
Collecting PyMuPDF (from -r requirements.txt (line 23))
Using cached pymupdf-1.26.4-cp39-abi3-manylinux_2_28_x86_64.whl.metadata (3.4 kB)
Requirement already satisfied: python-multipart in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 24)) (0.0.20)
Collecting beautifulsoup4 (from -r requirements.txt (line 25))
Using cached beautifulsoup4-4.13.5-py3-none-any.whl.metadata (3.8 kB)
Requirement already satisfied: requests in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 26)) (2.32.5)
Collecting whisper (from -r requirements.txt (line 27))
Using cached whisper-1.1.10.tar.gz (42 kB)
Installing build dependencies ... done
Getting requirements to build wheel ... done
Preparing metadata (pyproject.toml) ... done
Requirement already satisfied: pillow in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 28)) (11.3.0)
Collecting youtube-transcript-api (from -r requirements.txt (line 29))
Using cached youtube_transcript_api-1.2.2-py3-none-any.whl.metadata (24 kB)
Collecting ffmpeg-python (from -r requirements.txt (line 30))
Using cached ffmpeg_python-0.2.0-py3-none-any.whl.metadata (1.7 kB)
Collecting pvporcupine (from -r requirements.txt (line 33))
Using cached pvporcupine-3.0.5-py3-none-any.whl.metadata (5.0 kB)
Collecting pyaudio (from -r requirements.txt (line 34))
Using cached PyAudio-0.2.14.tar.gz (47 kB)
Installing build dependencies ... done
Getting requirements to build wheel ... done
Preparing metadata (pyproject.toml) ... done
Requirement already satisfied: numpy in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 35)) (2.3.2)
Requirement already satisfied: click in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 38)) (8.2.1)
Requirement already satisfied: langdetect in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 41)) (1.0.9)
Collecting apscheduler (from -r requirements.txt (line 42))
Using cached APScheduler-3.11.0-py3-none-any.whl.metadata (6.4 kB)
Requirement already satisfied: mysqlclient in ./venv/lib/python3.12/site-packages (from -r requirements.txt (line 44)) (2.2.7)
Collecting Wave (from -r requirements.txt (line 45))
Using cached wave-0.1.0-py3-none-any.whl.metadata (586 bytes)
Requirement already satisfied: starlette<0.48.0,>=0.40.0 in ./venv/lib/python3.12/site-packages (from fastapi->-r requirements.txt (line 2)) (0.47.3)
Requirement already satisfied: typing-extensions>=4.8.0 in ./venv/lib/python3.12/site-packages (from fastapi->-r requirements.txt (line 2)) (4.15.0)
Requirement already satisfied: annotated-types>=0.6.0 in ./venv/lib/python3.12/site-packages (from pydantic->-r requirements.txt (line 4)) (0.7.0)
Requirement already satisfied: pydantic-core==2.33.2 in ./venv/lib/python3.12/site-packages (from pydantic->-r requirements.txt (line 4)) (2.33.2)
Requirement already satisfied: typing-inspection>=0.4.0 in ./venv/lib/python3.12/site-packages (from pydantic->-r requirements.txt (line 4)) (0.4.1)
Requirement already satisfied: anyio<5,>=3.6.2 in ./venv/lib/python3.12/site-packages (from starlette<0.48.0,>=0.40.0->fastapi->-r requirements.txt (line 2)) (4.10.0)
Requirement already satisfied: idna>=2.8 in ./venv/lib/python3.12/site-packages (from anyio<5,>=3.6.2->starlette<0.48.0,>=0.40.0->fastapi->-r requirements.txt (line 2)) (3.10)
Requirement already satisfied: sniffio>=1.1 in ./venv/lib/python3.12/site-packages (from anyio<5,>=3.6.2->starlette<0.48.0,>=0.40.0->fastapi->-r requirements.txt (line 2)) (1.3.1)
Requirement already satisfied: h11>=0.8 in ./venv/lib/python3.12/site-packages (from uvicorn->-r requirements.txt (line 3)) (0.16.0)
Requirement already satisfied: langchain-core<1.0.0,>=0.3.72 in ./venv/lib/python3.12/site-packages (from langchain->-r requirements.txt (line 7)) (0.3.75)
Requirement already satisfied: langchain-text-splitters<1.0.0,>=0.3.9 in ./venv/lib/python3.12/site-packages (from langchain->-r requirements.txt (line 7)) (0.3.11)
Requirement already satisfied: langsmith>=0.1.17 in ./venv/lib/python3.12/site-packages (from langchain->-r requirements.txt (line 7)) (0.4.26)
Requirement already satisfied: SQLAlchemy<3,>=1.4 in ./venv/lib/python3.12/site-packages (from langchain->-r requirements.txt (line 7)) (2.0.43)
Requirement already satisfied: PyYAML>=5.3 in ./venv/lib/python3.12/site-packages (from langchain->-r requirements.txt (line 7)) (6.0.2)
Requirement already satisfied: charset_normalizer<4,>=2 in ./venv/lib/python3.12/site-packages (from requests->-r requirements.txt (line 26)) (3.4.3)
Requirement already satisfied: urllib3<3,>=1.21.1 in ./venv/lib/python3.12/site-packages (from requests->-r requirements.txt (line 26)) (2.5.0)
Requirement already satisfied: certifi>=2017.4.17 in ./venv/lib/python3.12/site-packages (from requests->-r requirements.txt (line 26)) (2025.8.3)
Requirement already satisfied: tenacity!=8.4.0,<10.0.0,>=8.1.0 in ./venv/lib/python3.12/site-packages (from langchain-core<1.0.0,>=0.3.72->langchain->-r requirements.txt (line 7)) (9.1.2)
Requirement already satisfied: jsonpatch<2.0,>=1.33 in ./venv/lib/python3.12/site-packages (from langchain-core<1.0.0,>=0.3.72->langchain->-r requirements.txt (line 7)) (1.33)
Requirement already satisfied: packaging>=23.2 in ./venv/lib/python3.12/site-packages (from langchain-core<1.0.0,>=0.3.72->langchain->-r requirements.txt (line 7)) (25.0)
Requirement already satisfied: jsonpointer>=1.9 in ./venv/lib/python3.12/site-packages (from jsonpatch<2.0,>=1.33->langchain-core<1.0.0,>=0.3.72->langchain->-r requirements.txt (line 7)) (3.0.0)
Requirement already satisfied: greenlet>=1 in ./venv/lib/python3.12/site-packages (from SQLAlchemy<3,>=1.4->langchain->-r requirements.txt (line 7)) (3.2.4)
Requirement already satisfied: tqdm in ./venv/lib/python3.12/site-packages (from sentence-transformers->-r requirements.txt (line 8)) (4.67.1)
Requirement already satisfied: scikit-learn in ./venv/lib/python3.12/site-packages (from sentence-transformers->-r requirements.txt (line 8)) (1.7.1)
Requirement already satisfied: scipy in ./venv/lib/python3.12/site-packages (from sentence-transformers->-r requirements.txt (line 8)) (1.16.1)
Requirement already satisfied: filelock in ./venv/lib/python3.12/site-packages (from transformers->-r requirements.txt (line 9)) (3.19.1)
Requirement already satisfied: regex!=2019.12.17 in ./venv/lib/python3.12/site-packages (from transformers->-r requirements.txt (line 9)) (2025.9.1)
Requirement already satisfied: tokenizers<=0.23.0,>=0.22.0 in ./venv/lib/python3.12/site-packages (from transformers->-r requirements.txt (line 9)) (0.22.0)
Requirement already satisfied: safetensors>=0.4.3 in ./venv/lib/python3.12/site-packages (from transformers->-r requirements.txt (line 9)) (0.6.2)
Requirement already satisfied: fsspec>=2023.5.0 in ./venv/lib/python3.12/site-packages (from huggingface-hub->-r requirements.txt (line 20)) (2025.9.0)
Requirement already satisfied: hf-xet<2.0.0,>=1.1.3 in ./venv/lib/python3.12/site-packages (from huggingface-hub->-r requirements.txt (line 20)) (1.1.9)
Requirement already satisfied: grpcio>=1.41.0 in ./venv/lib/python3.12/site-packages (from qdrant-client->-r requirements.txt (line 13)) (1.74.0)
Requirement already satisfied: httpx>=0.20.0 in ./venv/lib/python3.12/site-packages (from httpx[http2]>=0.20.0->qdrant-client->-r requirements.txt (line 13)) (0.28.1)
Requirement already satisfied: portalocker<4.0,>=2.7.0 in ./venv/lib/python3.12/site-packages (from qdrant-client->-r requirements.txt (line 13)) (3.2.0)
Requirement already satisfied: protobuf>=3.20.0 in ./venv/lib/python3.12/site-packages (from qdrant-client->-r requirements.txt (line 13)) (6.32.0)
Requirement already satisfied: validators<1.0.0,>=0.34.0 in ./venv/lib/python3.12/site-packages (from weaviate-client->-r requirements.txt (line 14)) (0.35.0)
Requirement already satisfied: authlib<2.0.0,>=1.2.1 in ./venv/lib/python3.12/site-packages (from weaviate-client->-r requirements.txt (line 14)) (1.6.3)
Requirement already satisfied: deprecation<3.0.0,>=2.1.0 in ./venv/lib/python3.12/site-packages (from weaviate-client->-r requirements.txt (line 14)) (2.1.0)
Requirement already satisfied: cryptography in ./venv/lib/python3.12/site-packages (from authlib<2.0.0,>=1.2.1->weaviate-client->-r requirements.txt (line 14)) (45.0.7)
Requirement already satisfied: httpcore==1.* in ./venv/lib/python3.12/site-packages (from httpx>=0.20.0->httpx[http2]>=0.20.0->qdrant-client->-r requirements.txt (line 13)) (1.0.9)
Requirement already satisfied: diskcache>=5.6.1 in ./venv/lib/python3.12/site-packages (from llama-cpp-python->-r requirements.txt (line 17)) (5.6.3)
Requirement already satisfied: jinja2>=2.11.3 in ./venv/lib/python3.12/site-packages (from llama-cpp-python->-r requirements.txt (line 17)) (3.1.6)
Requirement already satisfied: distro<2,>=1.7.0 in ./venv/lib/python3.12/site-packages (from openai->-r requirements.txt (line 18)) (1.9.0)
Requirement already satisfied: jiter<1,>=0.4.0 in ./venv/lib/python3.12/site-packages (from openai->-r requirements.txt (line 18)) (0.10.0)
Requirement already satisfied: setuptools in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (80.9.0)
Requirement already satisfied: sympy>=1.13.3 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (1.14.0)
Requirement already satisfied: networkx in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (3.5)
Requirement already satisfied: nvidia-cuda-nvrtc-cu12==12.8.93 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (12.8.93)
Requirement already satisfied: nvidia-cuda-runtime-cu12==12.8.90 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (12.8.90)
Requirement already satisfied: nvidia-cuda-cupti-cu12==12.8.90 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (12.8.90)
Requirement already satisfied: nvidia-cudnn-cu12==9.10.2.21 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (9.10.2.21)
Requirement already satisfied: nvidia-cublas-cu12==12.8.4.1 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (12.8.4.1)
Requirement already satisfied: nvidia-cufft-cu12==11.3.3.83 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (11.3.3.83)
Requirement already satisfied: nvidia-curand-cu12==10.3.9.90 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (10.3.9.90)
Requirement already satisfied: nvidia-cusolver-cu12==11.7.3.90 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (11.7.3.90)
Requirement already satisfied: nvidia-cusparse-cu12==12.5.8.93 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (12.5.8.93)
Requirement already satisfied: nvidia-cusparselt-cu12==0.7.1 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (0.7.1)
Requirement already satisfied: nvidia-nccl-cu12==2.27.3 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (2.27.3)
Requirement already satisfied: nvidia-nvtx-cu12==12.8.90 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (12.8.90)
Requirement already satisfied: nvidia-nvjitlink-cu12==12.8.93 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (12.8.93)
Requirement already satisfied: nvidia-cufile-cu12==1.13.1.3 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (1.13.1.3)
Requirement already satisfied: triton==3.4.0 in ./venv/lib/python3.12/site-packages (from torch->-r requirements.txt (line 19)) (3.4.0)
Collecting soupsieve>1.2 (from beautifulsoup4->-r requirements.txt (line 25))
Using cached soupsieve-2.8-py3-none-any.whl.metadata (4.6 kB)
Requirement already satisfied: six in ./venv/lib/python3.12/site-packages (from whisper->-r requirements.txt (line 27)) (1.17.0)
Collecting defusedxml<0.8.0,>=0.7.1 (from youtube-transcript-api->-r requirements.txt (line 29))
Using cached defusedxml-0.7.1-py2.py3-none-any.whl.metadata (32 kB)
Collecting future (from ffmpeg-python->-r requirements.txt (line 30))
Using cached future-1.0.0-py3-none-any.whl.metadata (4.0 kB)
Collecting tzlocal>=3.0 (from apscheduler->-r requirements.txt (line 42))
Using cached tzlocal-5.3.1-py3-none-any.whl.metadata (7.6 kB)
Collecting MySQL-python<2.0.0,>=1.2.5 (from Wave->-r requirements.txt (line 45))
Using cached MySQL-python-1.2.5.zip (108 kB)
Installing build dependencies ... done
Getting requirements to build wheel ... error
error: subprocess-exited-with-error
× Getting requirements to build wheel did not run successfully.
exit code: 1
╰─> [22 lines of output]
Traceback (most recent call last):
File "/mnt/LX_WORK/_DEV/_SRC/home_dev1/agentic-ai/venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py", line 389, in <module>
main()
File "/mnt/LX_WORK/_DEV/_SRC/home_dev1/agentic-ai/venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py", line 373, in main
json_out["return_val"] = hook(**hook_input["kwargs"])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/mnt/LX_WORK/_DEV/_SRC/home_dev1/agentic-ai/venv/lib/python3.12/site-packages/pip/_vendor/pyproject_hooks/_in_process/_in_process.py", line 143, in get_requires_for_build_wheel
return hook(config_settings)
^^^^^^^^^^^^^^^^^^^^^
File "/tmp/pip-build-env-6z3loy7t/overlay/lib/python3.12/site-packages/setuptools/build_meta.py", line 331, in get_requires_for_build_wheel
return self._get_build_requires(config_settings, requirements=[])
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/tmp/pip-build-env-6z3loy7t/overlay/lib/python3.12/site-packages/setuptools/build_meta.py", line 301, in _get_build_requires
self.run_setup()
File "/tmp/pip-build-env-6z3loy7t/overlay/lib/python3.12/site-packages/setuptools/build_meta.py", line 512, in run_setup
super().run_setup(setup_script=setup_script)
File "/tmp/pip-build-env-6z3loy7t/overlay/lib/python3.12/site-packages/setuptools/build_meta.py", line 317, in run_setup
exec(code, locals())
File "<string>", line 13, in <module>
File "/tmp/pip-install-_dqj7n6b/mysql-python_6b2ccc51ca7c464994ad0476ed1741c3/setup_posix.py", line 2, in <module>
from ConfigParser import SafeConfigParser
ImportError: cannot import name 'SafeConfigParser' from 'ConfigParser' (/usr/lib/python3.12/ConfigParser.py). Did you mean: 'RawConfigParser'?
[end of output]
note: This error originates from a subprocess, and is likely not a problem with pip.
error: subprocess-exited-with-error
× Getting requirements to build wheel did not run successfully.
exit code: 1
╰─> See above for output.
```

11
OLD.config.py Normal file
View File

@@ -0,0 +1,11 @@
# config.py
VECTOR_DB = "faiss" # Options: "faiss", "qdrant", "weaviate"
LLM_ENGINE = "ollama" # Options: "llama.cpp", "ollama", "vllm"
SLM_ENGINE = "phi-3" # Options: "phi-3", "gemma"
EMBEDDING_ENGINE = "huggingface" # Options: "huggingface", "gpt4all"
EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
DATA_DIR = "./data"

24
OLD.persona_presets.py Normal file
View File

@@ -0,0 +1,24 @@
# config/persona_presets.py
PERSONA_PRESETS = {
"zen": {
"tone": "calm",
"style": "wise",
"formality": "formal"
},
"mentor": {
"tone": "serious",
"style": "professional",
"formality": "formal"
},
"buddy": {
"tone": "cheerful",
"style": "friendly",
"formality": "informal"
},
"poet": {
"tone": "empathetic",
"style": "witty",
"formality": "informal"
}
}

16
OLD.requirements.txt Normal file
View File

@@ -0,0 +1,16 @@
fastapi
uvicorn
langchain
qdrant-client
faiss-cpu
weaviate-client
llama-cpp-python
openai
transformers
sentence-transformers
pydantic
python-multipart
##NOTE: support multiple languages dynamically
langdetect

View File

@@ -0,0 +1,25 @@
# autonomy/goal_registry.py
##INFO: Goal Registry
import time
class GoalRegistry:
def __init__(self):
self.goals = [] # [{agent_role, tenant_id, goal, priority, context, created}]
def define_goal(self, agent_role: str, tenant_id: str, goal: str, priority: int, context: dict):
entry = {
"agent_role": agent_role,
"tenant_id": tenant_id,
"goal": goal,
"priority": priority,
"context": context,
"created": time.time()
}
self.goals.append(entry)
return entry
def get_goals(self, agent_role: str, tenant_id: str):
return [g for g in self.goals if g["agent_role"] == agent_role and g["tenant_id"] == tenant_id]
goal_registry = GoalRegistry()

View File

@@ -0,0 +1,32 @@
# autonomy/planning_engine.py
##INFO: Planning Engine
class PlanningEngine:
def __init__(self):
self.plans = [] # [{goal, steps, agent_role, tenant_id, timestamp}]
def generate_plan(self, goal: str, context: dict):
# Mocked planning logic
steps = [
{"step": "Analyze context", "tool": "context_parser"},
{"step": "Break down goal", "tool": "goal_decomposer"},
{"step": "Assign subtasks", "tool": "task_allocator"},
{"step": "Execute and monitor", "tool": "execution_tracker"}
]
return steps
def store_plan(self, agent_role: str, tenant_id: str, goal: str, steps: list):
entry = {
"agent_role": agent_role,
"tenant_id": tenant_id,
"goal": goal,
"steps": steps,
"timestamp": time.time()
}
self.plans.append(entry)
return entry
def get_plans(self, agent_role: str, tenant_id: str):
return [p for p in self.plans if p["agent_role"] == agent_role and p["tenant_id"] == tenant_id]
planning_engine = PlanningEngine()

42
agents/OLD.agent_core.py Normal file
View File

@@ -0,0 +1,42 @@
# agents/agent_core.py
from langchain.chains import RetrievalQA
from langchain.vectorstores import FAISS, Qdrant, Weaviate
from langchain.embeddings import HuggingFaceEmbeddings
from models.llm_loader import get_llm
from vector_store.base import get_vector_store
from config import VECTOR_DB
# from config.config import VECTOR_DB
def run_agent(user_input: str) -> str:
llm = get_llm()
embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
vector_store = get_vector_store()
# Wrap vector store for LangChain
if VECTOR_DB == "faiss":
retriever = FAISS(vector_store.index, embedding_model).as_retriever()
elif VECTOR_DB == "qdrant":
retriever = Qdrant(
client=vector_store.client,
collection_name=vector_store.collection_name,
embedding_function=embedding_model
).as_retriever()
elif VECTOR_DB == "weaviate":
retriever = Weaviate(
client=vector_store.client,
index_name="Document",
embedding=embedding_model
).as_retriever()
else:
raise ValueError("Unsupported vector DB")
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
return_source_documents=False
)
response = qa_chain.run(user_input)
return response

20
agents/OLD.message_bus.py Normal file
View File

@@ -0,0 +1,20 @@
from collections import defaultdict
from datetime import datetime
class MessageBus:
def __init__(self):
self.messages = defaultdict(list)
def send(self, sender: str, receiver: str, content: str):
self.messages[receiver].append({
"from": sender,
"to": receiver,
"content": content,
"timestamp": datetime.utcnow().isoformat()
})
def receive(self, role: str) -> list:
return self.messages.pop(role, [])
# Singleton instance
message_bus = MessageBus()

View File

@@ -0,0 +1,35 @@
# agents/agent_core.py
from langchain.chains import RetrievalQA
from models.llm_loader import get_llm
from models.embedding_loader import get_embedding_model
from vector_store.base import get_vector_store
from config import VECTOR_DB
# from config.config import VECTOR_DB
from utils.logger import logger
from memory.memory_manager import MemoryManager
memory = MemoryManager()
def run_agent(user_input: str) -> str:
llm = get_llm()
embedding_model = get_embedding_model()
vector_store = get_vector_store()
retriever = vector_store.as_retriever(embedding_model)
context = memory.get_context()
prompt = f"{context['name']}님, 다음 질문에 {context['language']}로 답해주세요.\n\n질문: {user_input}"
qa_chain = RetrievalQA.from_chain_type(
llm=llm,
retriever=retriever,
return_source_documents=False
)
logger.info(f"Running agent with input: {user_input}")
# response = qa_chain.run(user_input)
response = qa_chain.run(prompt)
memory.log_interaction(user_input, response)
return response

326
agents/agent_core.py Normal file
View File

@@ -0,0 +1,326 @@
# agents/agent_core.py
import uuid
import time
from agents.memory_agent import MemoryAgent
from agents.planner_agent import PlannerAgent
from agents.executor_agent import ExecutorAgent
from agents.critic_agent import CriticAgent
from agents.messaging import message_bus
from agent_registry import get_agent, get_agents_from_template
from tenants.tenant_registry import tenant_registry
from agents.sandbox import sandbox_agent_run, sandbox_agent_run_with_fallback
from agents.reflection import reflection_memory
from agents.feedback import feedback_store
from agents.reward_model import reward_model
from tools.tool_registry import tool_registry
from agent_registry import get_allowed_tools
from agents.chaining_templates import get_chaining_template
from memory.episodic_store import episodic_store
##NOTE: Enforcing RBAC Example
# from tenants.tenant_policy import tenant_policy_store
# def run_agent_task(tenant_id: str, agent_role: str, task: str, context: dict):
# if not tenant_policy_store.check_permission(tenant_id, agent_role, "run_task"):
# return {"error": f"Role '{agent_role}' is not allowed to run tasks for tenant '{tenant_id}'"}
# agent = get_agent(agent_role)
# result = agent.run(task, context)
# return {"result": result}
##INFO: Wrap RBAC enforcement
from tenants.rbac_guard import enforce_rbac
memory_agent = MemoryAgent()
planner = PlannerAgent()
executor = ExecutorAgent()
critic = CriticAgent()
AGENT_META = {
"planner": {"name": "Planner", "avatar": "/avatars/planner.png"},
"executor": {"name": "Executor", "avatar": "/avatars/executor.png"},
"critic": {"name": "Critic", "avatar": "/avatars/critic.png"}
}
# def run_agent(user_input: str) -> str:
# return memory_agent.run(user_input)
##INFO: Integrated multi-agent orchestration, agent memory and messaging
@enforce_rbac("run_agent")
def run_agent(user_input: str, context: dict = {}) -> dict:
plan = planner.run(user_input, context)
executor_input = plan["plan"]
result = executor.run(executor_input, context)
feedback = critic.run(result["result"], context)
# Inter-agent messaging
planner.remember(f"Sent plan to executor: {executor_input}")
executor.remember(f"Received plan: {executor_input}")
executor.remember(f"Sent result to critic: {result['result']}")
critic.remember(f"Received result: {result['result']}")
memory_agent.remember(user_input)
memory_agent.remember(feedback["feedback"])
return {
# "planner_memory": planner.get_memory(),
# "executor_memory": executor.get_memory(),
# "critic_memory": critic.get_memory(),
# "final_feedback": feedback["feedback"]
"agents": [
{
"role": "planner",
"avatar": AGENT_META["planner"]["avatar"],
"memory": planner.get_memory()
},
{
"role": "executor",
"avatar": AGENT_META["executor"]["avatar"],
"memory": executor.get_memory()
},
{
"role": "critic",
"avatar": AGENT_META["critic"]["avatar"],
"memory": critic.get_memory()
}
],
"final_feedback": feedback["feedback"]
}
##INFO: Agent conditional logic
@enforce_rbac("run_agent_with_conditions")
def run_agent_with_conditions(task: str, context: dict, chain: list[str]) -> dict:
result = {}
for role in chain:
agent = get_agent(role)
if not agent:
continue
# Conditional logic
if role == "critic" and "urgent" not in task.lower():
continue # skip critic unless task is urgent
output = agent.run(task, context)
agent.remember(f"Processed: {output}")
result[role] = agent.get_memory()
return result
##INFO: Use messaging-aware execution
# # def run_agent_chain(task: str, template: str = "default", context: dict = {}):
# def run_agent_chain(task: str, template: str = "default", tenant_id: str = "default"):
# context = {"tenant_id": tenant_id}
# agents = get_agents_from_template(template)
# result = {}
# for agent in agents:
# output = agent.run(task, context)
# result[agent.__class__.__name__] = output
# return result
##INFO: Update with Tenant-agents
@enforce_rbac("run_agent_chain")
def run_agent_chain(task: str, template: str = "default", tenant_id: str = "default"):
if not tenant_registry.check_quota(tenant_id):
return {"error": f"Tenant '{tenant_id}' has exceeded daily usage quota."}
context = {"tenant_id": tenant_id}
# agents = get_agents_from_template(template)
##INFO: Pass 'tenant_id' to template loader
agents = get_agents_from_template(template, tenant_id)
result = {}
task_id = str(uuid.uuid4())
for agent in agents:
role = agent.__class__.__name__
capability = getattr(agent, "capabilities", [])[0] if hasattr(agent, "capabilities") else "generic"
if not tenant_registry.is_role_allowed(tenant_id, role):
result[role] = {"error": f"Role '{role}' not allowed for tenant '{tenant_id}'"}
continue
if not tenant_registry.is_capability_allowed(tenant_id, capability):
result[role] = {"error": f"Capability '{capability}' not allowed for tenant '{tenant_id}'"}
continue
# # output = agent.run(task, context)
# # output = sandbox_agent_run(agent, task, context)
##NOTE: Use fallback wrapper
# output = sandbox_agent_run_with_fallback(agent, task, context, capability)
##TODO: toggle fallback behavior per tenant or per workflow later
# if tenant_registry.is_fallback_enabled(tenant_id):
# output = sandbox_agent_run_with_fallback(agent, task, context, capability)
# else:
# output = sandbox_agent_run(agent, task, context)
# Checkpoint resume
checkpoint = tenant_registry.get_checkpoint(tenant_id, task_id).get(role)
if checkpoint:
result[role] = checkpoint
continue
# Timeout wrapper
start = time.time()
try:
output = sandbox_agent_run_with_fallback(agent, task, context, capability)
duration = round(time.time() - start, 3)
except Exception as e:
output = {"role": role, "error": str(e), "duration_sec": round(time.time() - start, 3)}
# Save trace + checkpoint
tenant_registry.log_workflow_trace(tenant_id, task_id, {
"role": role,
"output": output,
"duration_sec": output.get("duration_sec", 0),
"timestamp": time.time()
})
# role = output.get("role", agent.__class__.__name__)
tenant_registry.save_checkpoint(tenant_id, task_id, role, output)
tenant_registry.log_usage(tenant_id, role, task)
result[role] = output
result["_task_id"] = task_id
return result
@enforce_rbac("run_collaborative_chain")
def run_collaborative_chain(task: str, roles: List[str], tenant_id: str = "default"):
context = {"tenant_id": tenant_id, "shared_memory": {}}
message_bus.messages.clear()
message_bus.send("system", roles[0], task)
trace = []
for role in roles:
agent = get_agent(role)
inbox = message_bus.get_for(role)
for msg in inbox:
response = agent.run(msg.content, context)
trace.append({
"from": msg.sender,
"to": role,
"input": msg.content,
"output": response
})
if "next" in response:
message_bus.send(role, response["next"], response["message"])
message_bus.clear_for(role)
return {"trace": trace}
@enforce_rbac("run_agent_chain_with_reflection")
def run_agent_chain_with_reflection(task: str, template: str = "default", tenant_id: str = "default"):
if not tenant_registry.check_quota(tenant_id):
return {"error": f"Tenant '{tenant_id}' exceeded daily quota."}
context = {"tenant_id": tenant_id}
agents = get_agents_from_template(template, tenant_id)
result = {}
task_id = str(uuid.uuid4())
for agent in agents:
role = agent.__class__.__name__
capability = getattr(agent, "capabilities", [])[0] if hasattr(agent, "capabilities") else "generic"
if not tenant_registry.is_role_allowed(tenant_id, role):
result[role] = {"error": f"Role '{role}' not allowed for tenant '{tenant_id}'"}
continue
if not tenant_registry.is_capability_allowed(tenant_id, capability):
result[role] = {"error": f"Capability '{capability}' not allowed for tenant '{tenant_id}'"}
continue
output = sandbox_agent_run_with_fallback(agent, task, context, capability)
# Reflection + episodic logging
if "error" in output or output.get("quality", "low") == "low":
last = reflection_memory.get_last(tenant_id, role)
if last:
context["last_output"] = last["output"]
context["reflection_note"] = "Previous output was flawed. Try a better approach."
output = sandbox_agent_run_with_fallback(agent, task, context, capability)
output["corrected"] = True
reflection_memory.log(tenant_id, role, task, output)
episodic_store.log_episode(tenant_id, role, task, output)
tenant_registry.log_usage(tenant_id, role, task)
result[role] = output
result["_task_id"] = task_id
return result
@enforce_rbac("run_agent_chain_with_reward")
def run_agent_chain_with_reward(task: str, template: str = "default", tenant_id: str = "default"):
context = {"tenant_id": tenant_id}
agents = get_agents_from_template(template, tenant_id)
result = {}
task_id = str(uuid.uuid4())
for agent in agents:
role = agent.__class__.__name__
output = sandbox_agent_run_with_fallback(agent, task, context)
# Get feedback score
feedback_avg = feedback_store.average_score(tenant_id, role) or 0.5
# Score with reward model
reward_score = reward_model.score(output, feedback_avg)
output["reward_score"] = reward_score
# Retraining trigger
if reward_score < 0.4:
context["retry_reason"] = "Low reward score"
output = sandbox_agent_run_with_fallback(agent, task, context)
output["corrected"] = True
output["reward_score"] = reward_model.score(output, feedback_avg)
result[role] = output
result["_task_id"] = task_id
return result
##INFO: Enable tool invocation
@enforce_rbac("sandbox_agent_run_with_tools")
def sandbox_agent_run_with_tools(agent, task: str, context: dict):
response = agent.run(task, context)
if "tool_call" in response:
tool_name = response["tool_call"]["name"]
tool_args = response["tool_call"].get("args", {})
# result = tool_registry.invoke(tool_name, **tool_args)
# response["tool_result"] = result
allowed = get_allowed_tools(agent.__class__.__name__)
if tool_name not in allowed:
response["tool_result"] = {"error": f"Tool '{tool_name}' not allowed for this agent"}
else:
result = tool_registry.invoke(tool_name, **tool_args)
response["tool_result"] = result
if "tool_chain" in response:
steps = response["tool_chain"]
filtered = [s for s in steps if s["name"] in get_allowed_tools(agent.__class__.__name__)]
response["tool_chain_result"] = tool_registry.chain(filtered)
return response
##INFO: Enable chaining template execution
@enforce_rbac("run_agent_with_template")
def run_agent_with_template(agent, context: dict):
role = agent.__class__.__name__
steps = get_chaining_template(role)
result = tool_registry.chain(steps)
return {
"role": role,
"template_used": True,
"chain_result": result
}

63
agents/agent_messenger.py Normal file
View File

@@ -0,0 +1,63 @@
# agents/agent_messenger.py
import time
class AgentMessenger:
def __init__(self):
self.messages = []
self.protocols = {}
# def send(self, sender: str, receiver: str, tenant_id: str, content: str):
# msg = {
# "timestamp": time.time(),
# "tenant_id": tenant_id,
# "from": sender,
# "to": receiver,
# "content": content
# }
# self.messages.append(msg)
# return msg
##INFO: Merge to support message types and protocols
def send(self, sender: str, receiver: str, tenant_id: str, content: str, msg_type="info"):
self.messages.append({
"from": sender,
"to": receiver,
"tenant": tenant_id,
"type": msg_type,
"content": content,
"timestamp": time.time()
})
# def get_conversation(self, tenant_id: str, agent_a: str, agent_b: str):
# return [
# m for m in self.messages
# if m["tenant_id"] == tenant_id and
# ((m["from"] == agent_a and m["to"] == agent_b) or
# (m["from"] == agent_b and m["to"] == agent_a))
# ]
##INFO: Merge to support message types and protocols
def get_conversation(self, tenant_id: str, agent_a: str, agent_b: str):
return [
m for m in self.messages
if m["tenant"] == tenant_id and
{m["from"], m["to"]} == {agent_a, agent_b}
]
##INFO: Extend to support structured messaging and protocols
def get_inbox(self, tenant_id, receiver):
return [m for m in self.messages if m["tenant"] == tenant_id and m["to"] == receiver]
def get_all(self, tenant_id):
return [m for m in self.messages if m["tenant"] == tenant_id]
def define_protocol(self, name, roles, message_types):
self.protocols[name] = {"roles": roles, "message_types": message_types}
return self.protocols[name]
def get_protocols(self):
return self.protocols
agent_messenger = AgentMessenger()

202
agents/agent_registry.py Normal file
View File

@@ -0,0 +1,202 @@
import json
import importlib
import pkgutil
import logging
import sys
from pathlib import Path
from chains.templates import CHAIN_TEMPLATES
from plugins.plugin_loader import load_plugin
# from agents.planner_agent import PlannerAgent
# from agents.executor_agent import ExecutorAgent
# from agents.critic_agent import CriticAgent
from agents.fallback_agent import FallbackAgent # Youll create this below
from tenants.tenant_registry import tenant_registry
from agents.role_registry import role_registry
from tenants.personalization_store import personalization_store
# Setup logging
logger = logging.getLogger("agent_registry")
logger.setLevel(logging.INFO)
# Optional: Debug Output in Console
handler = logging.StreamHandler(sys.stdout)
logger.addHandler(handler)
# Load config
CONFIG_PATH = Path("config/agent_config.json")
AGENT_CONFIG = json.loads(CONFIG_PATH.read_text())
# Registry and fallback
AGENT_REGISTRY = {}
FALLBACK_AGENT = FallbackAgent()
##INFO: Tool permissions per agent
agent_tool_permissions = {
"planner": ["weather", "stock"],
"executor": ["email"],
"analyst": ["stock"]
}
##INFO: Agent role registration
role_registry.register_role("planner", "Decomposes goals into tasks", ["decompose", "assign"])
role_registry.register_role("researcher", "Finds relevant information", ["search", "summarize"])
role_registry.register_role("executor", "Performs actions via tools", ["invoke", "chain"])
role_registry.register_role("validator", "Checks correctness and quality", ["score", "reflect"])
role_registry.register_role("explainer", "Generates human-readable summaries", ["narrate", "translate"])
def get_allowed_tools(role: str):
return agent_tool_permissions.get(role, [])
##INFO: Add capability metadata to each agent during registration,
## extend the decorator to support agent group
def register_agent(role: str, capabilities: list[str] = None, avatar: str = None, group: str = "default"):
def decorator(cls):
if AGENT_CONFIG.get(role, {}).get("enabled", True):
instance = cls(AGENT_CONFIG.get(role, {}))
instance.capabilities = capabilities or []
instance.avatar = avatar or "/avatars/default.png"
instance.group = group
AGENT_REGISTRY[role] = instance
# logger.info(f"Registered agent: {role} with capabilities: {instance.capabilities}")
logger.info(f"Registered agent: {role} with capabilities: {instance.capabilities} in group: {group}")
else:
logger.info(f"Agent '{role}' disabled via config")
return cls
return decorator
##INFO: Inject capabilities during agent creation, Manual creation fallback (for legacy agents),
## inject tenant-specific config
def create_agent(role, tenant_id="default"):
##INFO: Support upgrades
upgrade_path = tenant_registry.get_upgraded_agent_class(tenant_id, role)
if upgrade_path:
module_name, class_name = upgrade_path.rsplit(".", 1)
try:
mod = importlib.import_module(module_name)
cls = getattr(mod, class_name)
return cls(AGENT_CONFIG.get(role, {}))
except Exception as e:
logger.warning(f"Upgrade failed for {role}: {e}")
# fallback to default
base_cfg = AGENT_CONFIG.get(role, {})
override_cfg = tenant_registry.get_agent_config(tenant_id, role) or {}
merged_cfg = {**base_cfg, **override_cfg}
agent = None
if role == "planner":
from agents.planner_agent import PlannerAgent
agent = PlannerAgent(merged_cfg)
elif role == "executor":
from agents.executor_agent import ExecutorAgent
agent = ExecutorAgent(merged_cfg)
elif role == "critic":
from agents.critic_agent import CriticAgent
agent = CriticAgent(merged_cfg)
else:
agent = FallbackAgent()
logger.warning(f"Unknown agent role requested: {role}")
# Inject tenant-specific capabilities
agent.capabilities = tenant_registry.get_agent_capabilities(tenant_id, role)
return agent
# Initial registry population from config
for role in AGENT_CONFIG:
if AGENT_CONFIG[role].get("enabled", True) and role not in AGENT_REGISTRY:
AGENT_REGISTRY[role] = create_agent(role)
# Plugin loading: Dynamically load a custom agent
custom_agent = load_plugin("agents.custom_agent", "CustomAgent")
if custom_agent:
AGENT_REGISTRY["custom"] = custom_agent
## Auto-discovery of agents using decorators
def discover_agents():
agent_path = Path(__file__).parent
for _, module_name, _ in pkgutil.iter_modules([str(agent_path)]):
if module_name.startswith("_") or module_name == "agent_registry":
continue
try:
importlib.import_module(f"agents.{module_name}")
except Exception as e:
logger.warning(f"Failed to import agent module '{module_name}': {e}")
## Agent access
# def get_agent(role: str):
# return AGENT_REGISTRY.get(role, FALLBACK_AGENT)
##INFO: Seamless integration of Personalization
def get_agent(role: str, tenant_id: str = "default"):
agent = AGENT_REGISTRY.get(role, FALLBACK_AGENT)
# Wrap agent.run with personalization
original_run = agent.run
def personalized_run(task: str, context: dict):
profile = personalization_store.get_profile(tenant_id)
tone = profile.get("tone", "neutral")
style = profile.get("style", "concise")
strategy = profile.get("strategy", "default")
# Inject personalization into prompt
personalized_task = f"[Tone: {tone} | Style: {style} | Strategy: {strategy}]\n{task}"
return original_run(personalized_task, context)
agent.run = personalized_run
return agent
def get_agents_from_template(template_name: str, tenant_id: str = "default") -> list:
from tenants.tenant_registry import tenant_registry
roles = tenant_registry.get_workflow(tenant_id, template_name) or CHAIN_TEMPLATES.get(template_name, CHAIN_TEMPLATES["default"])
# return [get_agent(role) for role in roles]
##INFO: Seamless integration of Personalization
return [get_agent(role, tenant_id) for role in roles]
##INFO: for startup diagnostics of registered agents
def log_registered_agents():
logger.info("🔍 Registered Agents:")
for role, agent in AGENT_REGISTRY.items():
logger.info(f" - {role}: {agent.__class__.__name__}")
##INFO: Runtime toggling for registered agents
def toggle_agent(role: str, enabled: bool):
if enabled:
AGENT_REGISTRY[role] = create_agent(role)
logger.info(f"Agent '{role}' enabled at runtime")
else:
AGENT_REGISTRY.pop(role, None)
logger.info(f"Agent '{role}' disabled at runtime")
##INFO: Health check for agent
def check_agent_health(agent) -> str:
try:
result = agent.run("health_check", {})
return "✅ OK" if result else "⚠️ No response"
except Exception as e:
return f"❌ Error: {str(e)}"
##INFO: Add capability-based agent lookup
def get_agent_by_capability(capability: str, tenant_id: str = "default"):
from tenants.tenant_registry import tenant_registry
# Primary match
for role, agent in AGENT_REGISTRY.items():
caps = tenant_registry.get_agent_capabilities(tenant_id, role)
if capability in caps:
return agent
# Fallback match
fallback_role = tenant_registry.get_fallback_agent(tenant_id, capability)
if fallback_role:
return get_agent(fallback_role)
return FallbackAgent()

View File

@@ -0,0 +1,36 @@
# agents/autonomous_planner.py
from agent_registry import get_agent
from agents.goal_store import goal_store
import uuid
class AutonomousPlanner:
def __init__(self):
self.sessions = {}
def propose_goal(self, tenant_id: str, context: dict):
planner = get_agent("planner")
goal = planner.run("Propose a strategic goal based on this context", context)
goal_id = str(uuid.uuid4())
self.sessions[goal_id] = {
"tenant_id": tenant_id,
"goal": goal,
"context": context,
"status": "proposed"
}
return {"goal_id": goal_id, "goal": goal}
def revise_goal(self, goal_id: str, feedback: str):
session = self.sessions.get(goal_id)
if not session:
return {"error": "Goal not found"}
planner = get_agent("planner")
revised = planner.run(f"Revise this goal based on feedback: {feedback}", session["context"])
session["goal"] = revised
session["status"] = "revised"
return {"goal_id": goal_id, "revised_goal": revised}
def get_all_goals(self):
return self.sessions
autonomous_planner = AutonomousPlanner()

View File

@@ -0,0 +1,90 @@
# agents/background_learner.py
import requests
from bs4 import BeautifulSoup
from langchain.embeddings import HuggingFaceEmbeddings
from vector_store.base import get_vector_store
from models.llm_loader import get_llm
class BackgroundLearner:
def __init__(self):
self.vector_store = get_vector_store()
self.embedding_model = HuggingFaceEmbeddings(model_name="sentence-transformers/all-MiniLM-L6-v2")
self.llm = get_llm()
#def search_web(self, query: str, num_results=3):
# headers = {"User-Agent": "Mozilla/5.0"}
# search_url = f"https://www.bing.com/search?q={query}"
# response = requests.get(search_url, headers=headers)
# soup = BeautifulSoup(response.text, "html.parser")
# links = [a["href"] for a in soup.select("li.b_algo h2 a") if a.get("href")]
# return links[:num_results]
def search_web(self, query: str, engine="bing", num_results=10):
headers = {"User-Agent": "Mozilla/5.0"}
links = []
if engine == "bing":
search_url = f"https://www.bing.com/search?q={query}"
response = requests.get(search_url, headers=headers)
soup = BeautifulSoup(response.text, "html.parser")
links = [a["href"] for a in soup.select("li.b_algo h2 a") if a.get("href")]
elif engine == "duckduckgo":
search_url = f"https://duckduckgo.com/html/?q={query}"
response = requests.get(search_url, headers=headers)
soup = BeautifulSoup(response.text, "html.parser")
links = [a["href"] for a in soup.select(".result__title a") if a.get("href")]
elif engine == "google":
search_url = f"https://www.google.com/search?q={query}"
response = requests.get(search_url, headers=headers)
soup = BeautifulSoup(response.text, "html.parser")
links = [a["href"] for a in soup.select("div.yuRUbf a") if a.get("href")]
return links[:num_results]
def extract_content(self, url: str):
try:
response = requests.get(url, timeout=10)
soup = BeautifulSoup(response.text, "html.parser")
paragraphs = soup.find_all("p")
text = "\n".join(p.get_text() for p in paragraphs)
return text.strip()
except Exception as e:
print(f"Failed to extract from {url}: {e}")
return ""
def summarize_content(self, content: str):
prompt = f"다음 내용을 한국어로 간결하게 요약해 주세요:\n\n{content[:3000]}"
summary = self.llm(prompt)
return summary
#def learn_from_prompt(self, prompt: str):
# print(f"🔍 Searching for: {prompt}")
# urls = self.search_web(prompt)
# for url in urls:
# print(f"🌐 Extracting from: {url}")
# raw_content = self.extract_content(url)
# if not raw_content:
# continue
# summary = self.summarize_content(raw_content)
# self.vector_store.add_document(summary)
# print(f"✅ Learned and embedded content from: {url}")
def learn_from_prompt(self, prompt: str, engine="bing", num_results=10, language="ko"):
print(f"🔍 Searching '{prompt}' via {engine}...")
urls = self.search_web(prompt, engine=engine, num_results=num_results)
for url in urls:
print(f"🌐 Extracting from: {url}")
raw_content = self.extract_content(url)
if not raw_content:
continue
summary = self.summarize_content(raw_content, language=language)
self.vector_store.add_document(summary)
print(f"✅ Learned and embedded content from: {url}")

View File

@@ -0,0 +1,20 @@
# agents/certification_registry.py
class CertificationRegistry:
def __init__(self):
self.certified = {} # {agent_role: {"version": str, "certified_at": timestamp}}
def certify(self, agent_role: str, version: str):
self.certified[agent_role] = {
"version": version,
"certified_at": time.time()
}
return self.certified[agent_role]
def get_certified(self):
return self.certified
def is_certified(self, agent_role: str):
return agent_role in self.certified
certification_registry = CertificationRegistry()

View File

@@ -0,0 +1,19 @@
# agents/chaining_templates.py
chaining_templates = {
"planner": [
{ "name": "weather", "args": { "location": "Seoul" } },
{ "name": "stock", "args": { "symbol": "MSFT" } }
],
"executor": [
{ "name": "email", "args": {
"to": "tony@example.com",
"subject": "Report",
"body": "Weather and stock update complete."
}}
]
}
def get_chaining_template(role: str):
return chaining_templates.get(role, [])

View File

@@ -0,0 +1,30 @@
# agents/cluster_registry.py
import time
class ClusterRegistry:
def __init__(self):
self.clusters = {} # {cluster_id: {agents: [roles], goal, created_at}}
def create_cluster(self, cluster_id: str, agent_roles: list, goal: str):
self.clusters[cluster_id] = {
"agents": agent_roles,
"goal": goal,
"created_at": time.time(),
"status": "active"
}
return self.clusters[cluster_id]
def get_cluster(self, cluster_id: str):
return self.clusters.get(cluster_id)
def get_all(self):
return list(self.clusters.values())
def terminate_cluster(self, cluster_id: str):
if cluster_id in self.clusters:
self.clusters[cluster_id]["status"] = "terminated"
return self.clusters[cluster_id]
return {"error": "Cluster not found"}
cluster_registry = ClusterRegistry()

View File

@@ -0,0 +1,21 @@
# agents/collab_protocols.py
##INFO:
class CollaborationProtocols:
def __init__(self):
self.protocols = {} # {protocol_name: {roles, message_types}}
def define_protocol(self, name: str, roles: list[str], message_types: list[str]):
self.protocols[name] = {
"roles": roles,
"message_types": message_types
}
return self.protocols[name]
def get_protocol(self, name: str):
return self.protocols.get(name)
def get_all(self):
return self.protocols
collab_protocols = CollaborationProtocols()

View File

@@ -0,0 +1,108 @@
# agents/collaboration_engine.py
import time
from agent_registry import get_agent
from agents.role_registry import role_registry
from memory.episodic_store import episodic_store
##INFO: Wrap RBAC enforcement
from tenants.rbac_guard import enforce_rbac
from agents.agent_messenger import agent_messenger # NEW
from agents.shared_goal_registry import shared_goal_registry
class CollaborationEngine:
def __init__(self):
self.trace = []
self.sessions = {} # {chain_id: {...}}
##INFO: Enforce RBAC
@enforce_rbac("delegate_task")
def delegate_task(self, tenant_id: str, from_role: str, to_role: str, task: str, context: dict):
from_agent = get_agent(from_role)
to_agent = get_agent(to_role)
context["delegated_by"] = from_role
output = to_agent.run(task, context)
episodic_store.log_episode(tenant_id, to_role, task, output)
self.trace.append({
"from": from_role,
"to": to_role,
"task": task,
"output": output
})
# NEW: Log message
agent_messenger.send(from_role, to_role, tenant_id, f"Delegated task: {task}{output}")
return output
# NEW: Multi-agent chain execution
@enforce_rbac("run_collaboration_chain")
def run_chain(self, tenant_id: str, chain_id: str, roles: list, task: str, context: dict):
steps = []
current_input = task
for role in roles:
agent = get_agent(role, tenant_id)
start = time.time()
output = agent.run(current_input, context)
latency = time.time() - start
step = {
"agent": role,
"input": current_input,
"output": output,
"latency": latency,
"success": bool(output)
}
steps.append(step)
current_input = output
# Log message
agent_messenger.send(role, "next", tenant_id, f"Completed step with output: {output}")
# Episodic memory
episodic_store.log_episode(tenant_id, role, current_input, output)
self.sessions[chain_id] = {
"tenant_id": tenant_id,
"task": task,
"steps": steps
}
return steps
# NEW: Run shared goal across agents
@enforce_rbac("run_shared_goal")
def run_shared_goal(self, goal_id: str, tenant_id: str):
goal = shared_goal_registry.get_goal(goal_id)
if not goal:
return {"error": "Goal not found"}
trace = []
for role in goal["targets"]:
agent = get_agent(role, tenant_id)
output = agent.run(goal["goal"], {"tenant_id": tenant_id})
episodic_store.log_episode(tenant_id, role, goal["goal"], output)
trace.append({
"role": role,
"task": goal["goal"],
"output": output
})
self.trace.extend(trace)
shared_goal_registry.update_result(goal_id, tenant_id, {"trace": trace})
return trace
def get_trace(self):
return self.trace
def get_session(self, chain_id: str):
return self.sessions.get(chain_id)
collaboration_engine = CollaborationEngine()

View File

@@ -0,0 +1,46 @@
# agents/consensus_engine.py
##NOTE:
from agents.agent_registry import get_agent
from agents.agent_messenger import agent_messenger
import time
class ConsensusEngine:
def __init__(self):
self.sessions = {}
def negotiate(self, tenant_id: str, session_id: str, roles: list, topic: str, context: dict):
proposals = []
for role in roles:
agent = get_agent(role, tenant_id)
proposal = agent.run(f"Propose solution for: {topic}", context)
proposals.append({"agent": role, "proposal": proposal})
agent_messenger.send(role, "group", tenant_id, f"Proposal: {proposal}")
# Round 2: critique each other's proposals
critiques = []
for role in roles:
agent = get_agent(role, tenant_id)
critique = agent.run(f"Review and critique proposals: {proposals}", context)
critiques.append({"agent": role, "critique": critique})
agent_messenger.send(role, "group", tenant_id, f"Critique: {critique}")
# Round 3: vote or converge
votes = []
for role in roles:
agent = get_agent(role, tenant_id)
vote = agent.run(f"Vote on best proposal: {proposals}", context)
votes.append({"agent": role, "vote": vote})
agent_messenger.send(role, "group", tenant_id, f"Vote: {vote}")
self.sessions[session_id] = {
"tenant_id": tenant_id,
"topic": topic,
"proposals": proposals,
"critiques": critiques,
"votes": votes
}
return self.sessions[session_id]
consensus_engine = ConsensusEngine()

View File

@@ -0,0 +1,38 @@
# agents/coordination_engine.py
from memory.semantic_store import semantic_store
from memory.episodic_store import episodic_store
from agents.reward_model import reward_model
from agents.agent_registry import get_agent
class CoordinationEngine:
def __init__(self):
self.events = []
def validate_output(self, tenant_id: str, validator_role: str, task: str, output: dict):
validator = get_agent(validator_role)
score = reward_model.score(output)
feedback = validator.run(f"Evaluate this output: {output}", {"tenant_id": tenant_id})
self.events.append({
"type": "validation",
"validator": validator_role,
"task": task,
"score": score,
"feedback": feedback
})
return {"score": score, "feedback": feedback}
def sync_memory(self, tenant_id: str, agent_role: str, key: str, value: str):
semantic_store.save_fact(tenant_id, key, value)
episodic_store.log_episode(tenant_id, agent_role, f"Memory sync: {key}", {"value": value})
self.events.append({
"type": "memory_sync",
"agent": agent_role,
"key": key,
"value": value
})
def get_events(self):
return self.events
coordination_engine = CoordinationEngine()

49
agents/critic_agent.py Normal file
View File

@@ -0,0 +1,49 @@
# agents/critic_agent.py
from datetime import datetime
# from agents.message_bus import message_bus
from agents.agent_registry import register_agent
from agents.tenant_memory import tenant_memory
from agents.tenant_message_bus import tenant_bus
from models.model_router import get_routed_llm
from tenants.branding_registry import branding_registry
@register_agent("critic", capabilities=["feedback", "review results"], avatar="/avatars/critic.png", group="core")
class CriticAgent:
def __init__(self, config=None):
self.config = config or {}
# self.memory = []
def run(self, task: str, context: dict) -> dict:
tenant_id = context.get("tenant_id", "default")
branding = branding_registry.get_branding(tenant_id)
tone = branding.get("tone", "neutral")
# Receive messages from executor
# messages = message_bus.receive("critic")
# for msg in messages:
# self.remember(f"Received from {msg['from']}: {msg['content']}")
# Receive messages using Tenant-Bus
messages = tenant_bus.receive(tenant_id, "critic")
for msg in messages:
tenant_memory.remember(tenant_id, "critic", f"Received from {msg['from']}: {msg['content']}", tags=["review", "task"])
# feedback = f"Critic reviewed task: '{task}'"
# 🔁 Use routed LLM for review
model = get_routed_llm(task)
# feedback = model(f"Review this task: {task}")
raw_feedback = model(f"Review this task: {task}")
# 🎨 Apply tone
if tone == "friendly":
feedback = f"Here's my take on it! 🧐\n{raw_feedback}"
elif tone == "formal":
feedback = f"Review completed for task: '{task}'.\n{raw_feedback}"
else:
feedback = raw_feedback
# self.remember(feedback)
tenant_memory.remember(tenant_id, "critic", feedback, tags=["review", "task"])
return {"role": "critic", "review": feedback, "model_used": str(model)}

35
agents/debate_engine.py Normal file
View File

@@ -0,0 +1,35 @@
# agents/debate_engine.py
from agents.messaging import Message
from agent_registry import get_agent
from tenants.tenant_registry import tenant_registry
class DebateThread:
def __init__(self, tenant_id: str, topic: str, roles: list, rounds: int = 3):
self.tenant_id = tenant_id
self.topic = topic
self.roles = roles
self.rounds = rounds
self.messages: list[Message] = []
def run(self):
context = {"tenant_id": self.tenant_id, "topic": self.topic}
trace = []
# Initial claim by first agent
first_agent = get_agent(self.roles[0])
initial = first_agent.run(self.topic, context)
self.messages.append(Message(self.roles[0], "thread", initial.get("content", self.topic)))
trace.append({"role": self.roles[0], "round": 0, "message": initial})
# Debate rounds
for round_num in range(1, self.rounds + 1):
for role in self.roles[1:]:
agent = get_agent(role)
last_msg = self.messages[-1].content
response = agent.run(last_msg, context)
self.messages.append(Message(role, "thread", response.get("content", last_msg)))
trace.append({"role": role, "round": round_num, "message": response})
return {"topic": self.topic, "trace": trace, "thread": [m.__dict__ for m in self.messages]}

View File

@@ -0,0 +1,80 @@
# agents/deployment_registry.py
##INFO: Agent Deployment Registry
import time
class DeploymentRegistry:
def __init__(self):
##NOTE: Add support for status updates and orchestration
self.instances = [] # [{tenant_id, agent_role, config, status, launched_at, terminated_at}]
self.traces = [] # [{agent_role, action, status, tenant_id, timestamp}]
# def launch_agent(self, tenant_id: str, agent_role: str, config: dict):
# key = f"{tenant_id}:{agent_role}"
# self.instances[key] = {
# "tenant_id": tenant_id,
# "agent_role": agent_role,
# "config": config,
# "status": "running",
# "launched_at": time.time()
# }
# return self.instances[key]
def launch_agent(self, tenant_id: str, agent_role: str, config: dict):
instance = {
"tenant_id": tenant_id,
"agent_role": agent_role,
"config": config,
"status": "running",
"launched_at": time.time(),
"terminated_at": None
}
self.instances.append(instance)
return instance
# def terminate_agent(self, tenant_id: str, agent_role: str):
# key = f"{tenant_id}:{agent_role}"
# if key in self.instances:
# self.instances[key]["status"] = "terminated"
# self.instances[key]["terminated_at"] = time.time()
# return self.instances[key]
# return {"error": "Agent not found"}
def terminate_agent(self, tenant_id: str, agent_role: str):
for i in self.instances:
if i["tenant_id"] == tenant_id and i["agent_role"] == agent_role and i["status"] == "running":
i["status"] = "terminated"
i["terminated_at"] = time.time()
return i
return {"error": "Agent not found or already terminated"}
def update_status(self, agent_role: str, status: str):
for i in self.instances:
if i["agent_role"] == agent_role:
i["status"] = status
return {"status": "updated"}
def log_trace(self, agent_role: str, action: str, status: str, tenant_id: str):
trace = {
"agent_role": agent_role,
"action": action,
"status": status,
"tenant_id": tenant_id,
"timestamp": time.time()
}
self.traces.append(trace)
return trace
def get_traces(self, tenant_id: str):
return [t for t in self.traces if t["tenant_id"] == tenant_id]
# def get_all(self):
# return list(self.instances.values())
def get_all(self):
return self.instances
# def get_by_tenant(self, tenant_id: str):
# return [i for i in self.instances.values() if i["tenant_id"] == tenant_id]
def get_by_tenant(self, tenant_id: str):
return [i for i in self.instances if i["tenant_id"] == tenant_id]
deployment_registry = DeploymentRegistry()

View File

@@ -0,0 +1,46 @@
# agents/evaluation_engine.py
import time
from sklearn.metrics.pairwise import cosine_similarity
from sentence_transformers import SentenceTransformer
model = SentenceTransformer("all-MiniLM-L6-v2")
class EvaluationEngine:
def __init__(self):
self.scores = []
def evaluate(self, agent_role: str, task: str, output: str, expected: str, tenant_id: str):
embedding_output = model.encode([output])
embedding_expected = model.encode([expected])
similarity = cosine_similarity(embedding_output, embedding_expected)[0][0]
metrics = {
"task_adherence": round(similarity, 4),
"efficiency": round(len(output) / 100, 2),
"completion": 1.0 if output else 0.0
}
score = {
"timestamp": time.time(),
"agent": agent_role,
"tenant": tenant_id,
"task": task,
"output": output,
"expected": expected,
# "similarity": round(similarity, 4),
"similarity": metrics["task_adherence"],
"certified": similarity > 0.85,
"metrics": metrics
}
self.scores.append(score)
return score
def get_all(self):
return self.scores
def get_by_agent(self, agent_role: str):
return [s for s in self.scores if s["agent"] == agent_role]
evaluation_engine = EvaluationEngine()

54
agents/executor_agent.py Normal file
View File

@@ -0,0 +1,54 @@
# agents/executor_agent.py
from datetime import datetime
# from agents.message_bus import message_bus
from agents.agent_registry import register_agent
from agents.tenant_memory import tenant_memory
from agents.tenant_message_bus import tenant_bus
from models.model_router import get_routed_slm
from tenants.branding_registry import branding_registry
@register_agent("executor", capabilities=["execution", "task/plan execution"], avatar="/avatars/executor.png", group="core")
class ExecutorAgent:
def __init__(self, config=None):
self.config = config or {}
# self.memory = []
def run(self, task: str, context: dict) -> dict:
tenant_id = context.get("tenant_id", "default")
branding = branding_registry.get_branding(tenant_id)
tone = branding.get("tone", "neutral")
# Receive messages from planner
# messages = message_bus.receive("executor")
# for msg in messages:
# self.remember(f"Received from {msg['from']}: {msg['content']}")
# Receive messages using Tenant-Bus
messages = tenant_bus.receive(tenant_id, "executor")
for msg in messages:
tenant_memory.remember(tenant_id, "executor", f"Received from {msg['from']}: {msg['content']}", tags=["execute", "task"])
# result = f"Executor executed task: '{task}'"
# 🔁 Use routed SLM
model = get_routed_slm(task)
# result = model(task)
raw_result = model(task)
# 🎨 Apply tone
if tone == "friendly":
result = f"All done! Here's what I got for: '{task}' 🎉\n{raw_result}"
elif tone == "formal":
result = f"Execution completed for task: '{task}'.\n{raw_result}"
else:
result = raw_result
# self.remember(result)
tenant_memory.remember(tenant_id, "executor", result, tags=["execute", "task"])
# Send result to critic
# message_bus.send("executor", "critic", f"Execution complete: {task}")
tenant_bus.send(tenant_id, "executor", "critic", f"Execution complete: {task}")
return {"role": "executor", "execution": result, "model_used": str(model)}

39
agents/fallback_agent.py Normal file
View File

@@ -0,0 +1,39 @@
# agents/fallback_agent.py
from datetime import datetime
from agents.agent_registry import register_agent
# from agents.message_bus import message_bus
from tenants.branding_registry import branding_registry
# @register_agent("fallback")
class FallbackAgent:
def __init__(self, config=None):
self.config = config or {}
self.memory = []
# def run(self, task: str, context: dict) -> dict:
# response = f"[FallbackAgent] No matching agent found for task: '{task}'"
# self.remember(response)
# return {"role": "fallback", "response": response}
def run(self, task: str, context: dict) -> dict:
tenant_id = context.get("tenant_id", "default")
branding = branding_registry.get_branding(tenant_id)
tone = branding.get("tone", "neutral")
# response = f"[FallbackAgent] No matching agent found for '{task}' with config: {self.config}"
if tone == "friendly":
response = f"Oops! I couldn't find the right agent for '{task}' 😅"
elif tone == "formal":
response = f"[FallbackAgent] No matching agent found for task: '{task}'. Please contact support."
else:
response = f"[FallbackAgent] No matching agent found for '{task}' with config: {self.config}"
self.remember(response)
return {"role": "fallback", "response": response}
def remember(self, message: str):
self.memory.append({"timestamp": datetime.now(datetime.timezone.utc).isoformat(), "message": message})
def get_memory(self):
return self.memory[-10:]

24
agents/feedback.py Normal file
View File

@@ -0,0 +1,24 @@
# agents/feedback.py
class FeedbackStore:
def __init__(self):
self.feedback = {}
def submit(self, tenant_id: str, agent_role: str, task: str, rating: int, comment: str):
key = f"{tenant_id}:{agent_role}"
self.feedback.setdefault(key, []).append({
"task": task,
"rating": rating,
"comment": comment
})
def get_all(self, tenant_id: str, agent_role: str):
return self.feedback.get(f"{tenant_id}:{agent_role}", [])
def average_score(self, tenant_id: str, agent_role: str):
entries = self.get_all(tenant_id, agent_role)
if not entries:
return None
return sum(e["rating"] for e in entries) / len(entries)
feedback_store = FeedbackStore()

86
agents/goal_engine.py Normal file
View File

@@ -0,0 +1,86 @@
# agents/goal_engine.py
import uuid
from agent_registry import get_agent
from agents.reflection import reflection_memory
from agents.reward_model import reward_model
from agents.goal_store import goal_store
from agents.notification_center import notification_center
from agents.agent_core import sandbox_agent_run_with_tools
class GoalSession:
def __init__(self, tenant_id: str, goal: str, roles: list):
self.session_id = str(uuid.uuid4())
self.tenant_id = tenant_id
self.goal = goal
self.roles = roles
self.tasks = []
self.trace = []
def decompose_goal(self):
planner = get_agent(self.roles[0])
steps = planner.run(f"Break down this goal: {self.goal}", {"tenant_id": self.tenant_id})
self.tasks = steps.get("tasks", [self.goal])
def execute_tasks(self):
context = {"tenant_id": self.tenant_id, "goal": self.goal}
for task in self.tasks:
for role in self.roles[1:]:
agent = get_agent(role)
output = agent.run(task, context)
feedback_avg = reflection_memory.get_last(self.tenant_id, role)
feedback_score = reward_model.score(output, feedback_avg.get("score", 0.5) if feedback_avg else 0.5)
self.trace.append({
"role": role,
"task": task,
"output": output,
"reward": feedback_score
})
if feedback_score < 0.4:
context["retry_reason"] = "Low reward score"
retry = agent.run(task, context)
self.trace.append({
"role": role,
"task": task,
"output": retry,
"reward": reward_model.score(retry, feedback_score)
})
def run(self):
self.decompose_goal()
self.execute_tasks()
goal_store.save_session(self.tenant_id, self.session_id, self.goal, self.tasks, self.trace)
return {
"session_id": self.session_id,
"goal": self.goal,
"tasks": self.tasks,
"trace": self.trace
}
##INFO: Delegation logic
def execute_tasks(self):
context = {"tenant_id": self.tenant_id, "goal": self.goal}
for task in self.tasks:
for role in self.roles[1:]:
agent = get_agent(role)
# Delegation logic
if hasattr(agent, "delegate_to"):
target_tenant = agent.delegate_to(task)
if target_tenant and target_tenant != self.tenant_id:
notification_center.notify(target_tenant, f"📤 Delegated task: '{task}' from {self.tenant_id}")
continue
# output = agent.run(task, context)
##INFO: Tool-enabled runner
output = sandbox_agent_run_with_tools(agent, task, context)
self.trace.append({
"role": role,
"task": task,
"output": output
})

61
agents/goal_store.py Normal file
View File

@@ -0,0 +1,61 @@
# agents/goal_store.py
import time
import uuid
class GoalStore:
def __init__(self):
self.sessions = {}
def save_session(self, tenant_id: str, session_id: str, goal: str, tasks: list, trace: list):
self.sessions.setdefault(tenant_id, {})[session_id] = {
"goal": goal,
"tasks": tasks,
"trace": trace,
"timestamp": time.time()
}
def get_sessions(self, tenant_id: str):
return self.sessions.get(tenant_id, {})
def get_session(self, tenant_id: str, session_id: str):
return self.sessions.get(tenant_id, {}).get(session_id)
def resume_session(self, tenant_id: str, session_id: str):
session = self.get_session(tenant_id, session_id)
if not session:
return None
return {
"goal": session["goal"],
"tasks": session["tasks"],
"trace": session["trace"]
}
def revise_goal(self, tenant_id: str, session_id: str, new_goal: str):
if session_id in self.sessions.get(tenant_id, {}):
self.sessions[tenant_id][session_id]["goal"] = new_goal
self.sessions[tenant_id][session_id]["revised"] = True
def merge_sessions(self, tenant_id: str, session_ids: list):
merged_trace = []
merged_tasks = []
merged_goal = "Merged Goal"
for sid in session_ids:
session = self.sessions.get(tenant_id, {}).get(sid)
if session:
merged_tasks += session["tasks"]
merged_trace += session["trace"]
merged_id = str(uuid.uuid4())
self.sessions.setdefault(tenant_id, {})[merged_id] = {
"goal": merged_goal,
"tasks": merged_tasks,
"trace": merged_trace,
"merged_from": session_ids,
"timestamp": time.time()
}
return merged_id
goal_store = GoalStore()

26
agents/inter_agent_bus.py Normal file
View File

@@ -0,0 +1,26 @@
# agents/inter_agent_bus.py
##INFO:
import time
class InterAgentBus:
def __init__(self):
self.messages = [] # [{from, to, type, content, timestamp, tenant}]
def send(self, tenant_id: str, sender: str, receiver: str, msg_type: str, content: str):
self.messages.append({
"tenant": tenant_id,
"from": sender,
"to": receiver,
"type": msg_type,
"content": content,
"timestamp": time.time()
})
def receive(self, tenant_id: str, receiver: str):
return [m for m in self.messages if m["tenant"] == tenant_id and m["to"] == receiver]
def get_all(self, tenant_id: str):
return [m for m in self.messages if m["tenant"] == tenant_id]
inter_agent_bus = InterAgentBus()

50
agents/memory_agent.py Normal file
View File

@@ -0,0 +1,50 @@
# agents/memory_agent.py
from memory.memory_manager import MemoryManager
from models.llm_loader import get_llm
from models.embedding_loader import get_embedding_model
from vector_store.base import get_vector_store
from langchain.chains import RetrievalQA
from utils.logger import logger
class MemoryAgent:
def __init__(self):
self.memory = MemoryManager()
self.llm = get_llm()
self.embedding_model = get_embedding_model()
self.vector_store = get_vector_store()
def build_prompt(self, user_input: str) -> str:
context = self.memory.get_context()
history = self.memory.get_recent_history(limit=3)
history_block = "\n".join([
f"Q: {h['query']}\nA: {h['response']}"
for h in history
])
prompt = f"""
{context['name']}님, 다음 질문에 {context['language']}로 답해주세요.
당신의 관심사: {', '.join(context['interests'])}
최근 대화 기록:
{history_block}
새 질문: {user_input}
"""
return prompt.strip()
def run(self, user_input: str) -> str:
prompt = self.build_prompt(user_input)
retriever = self.vector_store.as_retriever(self.embedding_model)
qa_chain = RetrievalQA.from_chain_type(
llm=self.llm,
retriever=retriever,
return_source_documents=False
)
logger.info(f"Running memory-aware agent with prompt:\n{prompt}")
response = qa_chain.run(prompt)
self.memory.log_interaction(user_input, response)
return response

24
agents/messaging.py Normal file
View File

@@ -0,0 +1,24 @@
# agents/messaging.py
from typing import List, Dict
class Message:
def __init__(self, sender: str, receiver: str, content: str):
self.sender = sender
self.receiver = receiver
self.content = content
class MessageBus:
def __init__(self):
self.messages: List[Message] = []
def send(self, sender: str, receiver: str, content: str):
self.messages.append(Message(sender, receiver, content))
def get_for(self, receiver: str) -> List[Message]:
return [m for m in self.messages if m.receiver == receiver]
def clear_for(self, receiver: str):
self.messages = [m for m in self.messages if m.receiver != receiver]
message_bus = MessageBus()

View File

@@ -0,0 +1,41 @@
# agents/notification_center.py
import requests
import smtplib
from email.message import EmailMessage
class NotificationCenter:
def __init__(self):
self.webhooks = {} # tenant_id → webhook URL
self.emails = {} # tenant_id → email address
def register_webhook(self, tenant_id: str, url: str):
self.webhooks[tenant_id] = url
def register_email(self, tenant_id: str, email: str):
self.emails[tenant_id] = email
def notify(self, tenant_id: str, message: str):
if tenant_id in self.webhooks:
try:
requests.post(self.webhooks[tenant_id], json={"message": message})
except Exception as e:
print(f"Webhook failed: {e}")
if tenant_id in self.emails:
try:
msg = EmailMessage()
msg.set_content(message)
msg["Subject"] = "Agent Notification"
msg["From"] = "agent@system.local"
msg["To"] = self.emails[tenant_id]
smtp = smtplib.SMTP("localhost")
smtp.send_message(msg)
smtp.quit()
except Exception as e:
print(f"Email failed: {e}")
# Voice notification (if assistant is active)
print(f"[Voice] 🔊 {tenant_id}{message}")
notification_center = NotificationCenter()

51
agents/orchestrator.py Normal file
View File

@@ -0,0 +1,51 @@
from agents.agent_registry import get_agent
from models.model_router import get_routed_llm
from utils.language_utils import detect_model_profile
class Orchestrator:
def __init__(self, memory):
self.memory = memory
self.planner = get_agent("planner")
self.executor = get_agent("executor")
self.critic = get_agent("critic")
async def run_task(self, task: str, user_id: str):
context = self.memory.get_context()
# planner = get_agent("planner")
# executor = get_agent("executor")
# critic = get_agent("critic")
# plan = planner.run(task, context)
# result = executor.run(plan, context)
# feedback = critic.run(result, context)
plan = self.planner.run(task, context)
result = self.executor.run(plan, context)
feedback = self.critic.run(result, context)
# Inter-agent messaging
self.planner.remember(f"Sent plan to executor: {plan}")
self.executor.remember(f"Received plan: {plan}")
self.executor.remember(f"Sent result to critic: {result}")
self.critic.remember(f"Received result: {result}")
self.memory.log_interaction(task, feedback["feedback"])
# return feedback
return {
"planner_memory": self.planner.get_memory(),
"executor_memory": self.executor.get_memory(),
"critic_memory": self.critic.get_memory(),
"final_feedback": feedback["feedback"]
}
def route_request(prompt: str, context: dict):
profile = detect_model_profile(prompt)
model = get_routed_llm(prompt)
return {
"model": str(model),
"profile": profile,
"response": f"[{model}] response to: {prompt}" # Replace with actual call
}

25
agents/pattern_learner.py Normal file
View File

@@ -0,0 +1,25 @@
from collections import defaultdict
from datetime import datetime
import json
class PatternLearner:
def __init__(self):
self.patterns = defaultdict(list)
def observe(self, query: str, timestamp: str):
date = timestamp.split("T")[0]
self.patterns[query].append(date)
def get_repeated_patterns(self, threshold=3):
return {q: dates for q, dates in self.patterns.items() if len(set(dates)) >= threshold}
def save(self, path="memory/patterns.json"):
with open(path, "w") as f:
json.dump(self.patterns, f)
def load(self, path="memory/patterns.json"):
try:
with open(path) as f:
self.patterns = json.load(f)
except FileNotFoundError:
pass

45
agents/planner_agent.py Normal file
View File

@@ -0,0 +1,45 @@
# agents/planner_agent.py
from datetime import datetime
# from agents.message_bus import message_bus
from agents.agent_registry import register_agent
from agents.tenant_memory import tenant_memory
from agents.tenant_message_bus import tenant_bus
from models.model_router import get_routed_llm
from tenants.branding_registry import branding_registry
@register_agent("planner", capabilities=["planning", "task decomposition"], avatar="/avatars/planner.png", group="core")
class PlannerAgent:
def __init__(self, config=None):
self.config = config or {}
# self.memory = []
def run(self, task: str, context: dict) -> dict:
tenant_id = context.get("tenant_id", "default")
branding = branding_registry.get_branding(tenant_id)
tone = branding.get("tone", "neutral")
# plan = f"Planner created plan for: '{task}'"
# 🔁 Use routed LLM
model = get_routed_llm(task)
# plan = model(task)
raw_plan = model(task)
# 🎨 Apply tone
if tone == "friendly":
plan = f"Hey there! Here's a plan for: '{task}' 😊\n{raw_plan}"
elif tone == "formal":
plan = f"Planning initiated for task: '{task}'.\n{raw_plan}"
else:
plan = raw_plan
# self.remember(plan)
tenant_memory.remember(tenant_id, "planner", plan, tags=["plan", "task"])
# Send message to executor
# message_bus.send("planner", "executor", f"Plan ready: {task}")
tenant_bus.send(tenant_id, "planner", "executor", f"Plan ready: {task}")
return {"role": "planner", "plan": plan, "model_used": str(model)}

39
agents/reflection.py Normal file
View File

@@ -0,0 +1,39 @@
# agents/reflection.py
class ReflectionMemory:
def __init__(self):
self.logs = {}
def log(self, tenant_id: str, agent_role: str, task: str, output: dict):
key = f"{tenant_id}:{agent_role}"
score = self.score_output(output)
self.logs.setdefault(key, []).append({
"task": task,
"output": output,
"score": score
})
def score_output(self, output: dict) -> int:
if "error" in output:
return 0
if output.get("quality") == "high":
return 10
if output.get("quality") == "medium":
return 5
return 2
def get_last(self, tenant_id: str, agent_role: str):
key = f"{tenant_id}:{agent_role}"
return self.logs.get(key, [])[-1] if key in self.logs and self.logs[key] else None
def get_all(self, tenant_id: str, agent_role: str):
key = f"{tenant_id}:{agent_role}"
return self.logs.get(key, [])
def prune(self, tenant_id: str, agent_role: str, threshold: int = 3):
key = f"{tenant_id}:{agent_role}"
if key in self.logs:
self.logs[key] = [entry for entry in self.logs[key] if entry["score"] >= threshold]
reflection_memory = ReflectionMemory()

View File

@@ -0,0 +1,22 @@
# agents/retraining_agent.py
class RetrainingAgent:
def __init__(self):
self.history = []
def retrain(self, tenant_id: str, agent_role: str, feedback: dict):
self.history.append({
"tenant_id": tenant_id,
"agent": agent_role,
"feedback": feedback
})
# Simulate retraining logic
updated_prompt = f"Refined prompt based on feedback: {feedback.get('issue')}"
return {
"status": "retrained",
"agent": agent_role,
"updated_prompt": updated_prompt
}
retraining_agent = RetrainingAgent()

23
agents/reward_model.py Normal file
View File

@@ -0,0 +1,23 @@
# agents/reward_model.py
class RewardModel:
def __init__(self):
self.weights = {
"feedback": 0.6,
"factuality": 0.2,
"instruction": 0.2
}
def score(self, output: dict, feedback_score: float = None):
factuality = 1.0 if output.get("factual", True) else 0.0
instruction = 1.0 if output.get("followed_instruction", True) else 0.0
feedback = feedback_score if feedback_score is not None else 0.5
reward = (
self.weights["feedback"] * feedback +
self.weights["factuality"] * factuality +
self.weights["instruction"] * instruction
)
return round(reward, 3)
reward_model = RewardModel()

19
agents/role_registry.py Normal file
View File

@@ -0,0 +1,19 @@
# agents/role_registry.py
class RoleRegistry:
def __init__(self):
self.roles = {}
def register_role(self, name: str, description: str, capabilities: list):
self.roles[name] = {
"description": description,
"capabilities": capabilities
}
def get_role(self, name: str):
return self.roles.get(name)
def list_roles(self):
return self.roles
role_registry = RoleRegistry()

50
agents/sandbox.py Normal file
View File

@@ -0,0 +1,50 @@
# agents/sandbax.py
import traceback
import time
from agent_registry import get_agent_by_capability
# def sandbox_agent_run(agent, task: str, context: dict) -> dict:
# try:
# return agent.run(task, context)
# except Exception as e:
# return {
# "role": getattr(agent, "role", "unknown"),
# "error": str(e),
# "trace": traceback.format_exc()
# }
##INFO: Updates for 'Track agent execution time and errors'
def sandbox_agent_run(agent, task: str, context: dict) -> dict:
start = time.time()
try:
result = agent.run(task, context)
duration = round(time.time() - start, 3)
return {
**result,
"duration_sec": duration,
"error": None
}
except Exception as e:
duration = round(time.time() - start, 3)
return {
"role": getattr(agent, "role", "unknown"),
"error": str(e),
"trace": traceback.format_exc(),
"duration_sec": duration
}
##INFO: Wrap fallback logic on failure
def sandbox_agent_run_with_fallback(agent, task: str, context: dict, capability: str) -> dict:
try:
return agent.run(task, context)
except Exception as e:
fallback_agent = get_agent_by_capability(capability, context["tenant_id"])
return {
"role": getattr(agent, "role", "unknown"),
"error": str(e),
"fallback": fallback_agent.__class__.__name__,
"output": fallback_agent.run(task, context)
}

50
agents/self_evaluator.py Normal file
View File

@@ -0,0 +1,50 @@
# agents/self_evaluator.py
from agents.agent_registry import get_agent
from agents.reward_model import reward_model
from memory.episodic_store import episodic_store
from memory.performance_store import performance_store
from tenants.rbac_guard import enforce_rbac
class SelfEvaluator:
def __init__(self):
self.evaluations = {}
##INFO: Enforce RBAC
@enforce_rbac("self_evaluate")
def evaluate(self, tenant_id: str, agent_role: str, task: str, output: dict):
agent = get_agent(agent_role)
cot_prompt = f"Reflect step-by-step on how you solved this task: {task}\nOutput: {output}"
reasoning = agent.run(cot_prompt, {"tenant_id": tenant_id})
score = reward_model.score(output)
feedback = agent.run(f"Based on this reasoning, how could you improve?", {"tenant_id": tenant_id})
episodic_store.log_episode(tenant_id, agent_role, f"Self-evaluation: {task}", {
"reasoning": reasoning,
"score": score,
"feedback": feedback
})
##INFO: Log performance after evaluation
performance_store.log(tenant_id, agent_role, task, score, feedback)
self.evaluations.setdefault(tenant_id, []).append({
"agent": agent_role,
"task": task,
"reasoning": reasoning,
"score": score,
"feedback": feedback
})
return {
"reasoning": reasoning,
"score": score,
"feedback": feedback
}
def get_all(self, tenant_id: str):
return self.evaluations.get(tenant_id, [])
self_evaluator = SelfEvaluator()

View File

@@ -0,0 +1,44 @@
# agents/shared_goal_registry.py
import uuid
import time
from agents.notification_center import notification_center
class SharedGoalRegistry:
def __init__(self):
self.shared_goals = {} # {goal_id: {...}}
def create_shared_goal(self, initiator_tenant: str, goal: str, target_tenants: list):
goal_id = str(uuid.uuid4())
self.shared_goals[goal_id] = {
"goal": goal,
"initiator": initiator_tenant,
"targets": target_tenants,
"status": "pending",
"results": {},
"timestamp": time.time()
}
return goal_id
##INFO: Notify initiator when goal completes
def update_result(self, goal_id: str, tenant_id: str, result: dict):
if goal_id in self.shared_goals:
self.shared_goals[goal_id]["results"][tenant_id] = result
if set(self.shared_goals[goal_id]["targets"]) == set(self.shared_goals[goal_id]["results"].keys()):
self.shared_goals[goal_id]["status"] = "completed"
initiator = self.shared_goals[goal_id]["initiator"]
notification_center.notify(initiator, f"✅ Shared goal '{goal_id}' completed.")
def get_goal(self, goal_id: str):
return self.shared_goals.get(goal_id)
def get_goals_for_tenant(self, tenant_id: str):
return {
gid: g for gid, g in self.shared_goals.items()
if tenant_id == g["initiator"] or tenant_id in g["targets"]
}
def list_all_goals(self):
return list(self.shared_goals.values())
shared_goal_registry = SharedGoalRegistry()

View File

@@ -0,0 +1,32 @@
# agents/task_distributor.py
from agents.agent_registry import get_agent
from agents.cluster_registry import cluster_registry
class TaskDistributor:
def __init__(self):
self.results = {}
def distribute(self, cluster_id: str, context: dict):
cluster = cluster_registry.get_cluster(cluster_id)
if not cluster or cluster["status"] != "active":
return {"error": "Cluster not active"}
goal = cluster["goal"]
agents = cluster["agents"]
self.results[cluster_id] = {}
for role in agents:
agent = get_agent(role)
if not agent:
continue
output = agent.run(goal, context)
agent.remember(f"Swarm processed: {output}")
self.results[cluster_id][role] = output
return self.results[cluster_id]
def get_results(self, cluster_id: str):
return self.results.get(cluster_id, {})
task_distributor = TaskDistributor()

65
agents/tenant_memory.py Normal file
View File

@@ -0,0 +1,65 @@
# agents/tenant_memory.py
import json
from collections import defaultdict
from datetime import datetime, timedelta
class TenantMemory:
def __init__(self):
self.memory = defaultdict(list)
self.expiration_minutes = 60 # default expiration window
# def remember(self, tenant_id: str, role: str, message: str):
def remember(self, tenant_id: str, role: str, message: str, tags: list[str] = None):
self.memory[(tenant_id, role)].append({
# "timestamp": datetime.utcnow().isoformat(),
"timestamp": datetime.now(datetime.timezone.utc).isoformat(),
"message": message,
"tags": tags or []
})
# # def get_memory(self, tenant_id: str, role: str):
# # return self.memory.get((tenant_id, role), [])[-10:]
# def get_memory(self, tenant_id: str, role: str):
# now = datetime.utcnow()
# valid = []
# for entry in self.memory.get((tenant_id, role), []):
# ts = datetime.fromisoformat(entry["timestamp"])
# if now - ts < timedelta(minutes=self.expiration_minutes):
# valid.append(entry)
# return valid[-10:]
def get_memory(self, tenant_id: str, role: str):
now = datetime.utcnow()
return [
entry for entry in self.memory.get((tenant_id, role), [])
if now - datetime.fromisoformat(entry["timestamp"]) < timedelta(minutes=self.expiration_minutes)
]
def search_memory(self, tenant_id: str, role: str, keyword: str):
return [
entry for entry in self.memory.get((tenant_id, role), [])
if keyword.lower() in entry["message"].lower()
]
def get_by_tag(self, tenant_id: str, role: str, tag: str):
return [
entry for entry in self.memory.get((tenant_id, role), [])
if tag in entry.get("tags", [])
]
def set_expiration(self, minutes: int):
self.expiration_minutes = minutes
def export_memory(self, tenant_id: str):
export = {
role: self.get_memory(tenant_id, role)
for (tid, role) in self.memory if tid == tenant_id
}
return json.dumps(export, indent=2)
def set_retention(self, minutes: int):
self.expiration_minutes = minutes
# Singleton
tenant_memory = TenantMemory()

View File

@@ -0,0 +1,20 @@
from collections import defaultdict
from datetime import datetime
class TenantMessageBus:
def __init__(self):
self.messages = defaultdict(list)
def send(self, tenant_id: str, sender: str, receiver: str, content: str):
self.messages[(tenant_id, receiver)].append({
"from": sender,
"to": receiver,
"content": content,
"timestamp": datetime.utcnow().isoformat()
})
def receive(self, tenant_id: str, role: str) -> list:
return self.messages.pop((tenant_id, role), [])
# Singleton
tenant_bus = TenantMessageBus()

11
chains/templates.py Normal file
View File

@@ -0,0 +1,11 @@
# chains/templates.py
CHAIN_TEMPLATES = {
"default": ["planner", "executor", "critic"],
"review_only": ["critic"],
"plan_execute": ["planner", "executor"],
"custom_fallback": ["unknown_role", "planner"]
}
def get_chain_template(name: str) -> list[str]:
return CHAIN_TEMPLATES.get(name, CHAIN_TEMPLATES["default"])

View File

@@ -0,0 +1,8 @@
# chains/workflow_templates.py
WORKFLOW_TEMPLATES = {
"default:v1": ["planner", "executor", "critic"],
"default:v2": ["planner", "executor", "critic", "critic"],
"light_review:v1": ["planner", "executor"],
"deep_review:v1": ["planner", "executor", "critic", "critic"]
}

8
cli/config_cli.py Normal file
View File

@@ -0,0 +1,8 @@
# cli/config_cli.py
CLI_CONFIG = {
"default_language": "ko",
"default_engine": "bing",
"default_result_count": 10
}

57
cli/main.py Normal file
View File

@@ -0,0 +1,57 @@
# cli/main.py
import requests
API_URL = "http://localhost:8000"
def query_agent():
user_input = input("🧠 Ask your assistant: ")
response = requests.get(f"{API_URL}/query", params={"q": user_input})
print(f"🤖 Response: {response.json()['response']}")
def upload_document():
path = input("📄 Path to document: ")
with open(path, "rb") as f:
response = requests.post(f"{API_URL}/upload/document", files={"file": f})
print(response.json())
def upload_media(media_type):
path = input(f"📁 Path to {media_type}: ")
with open(path, "rb") as f:
response = requests.post(f"{API_URL}/upload/{media_type}", files={"file": f})
print(response.json())
def start_voice_listener():
response = requests.get(f"{API_URL}/voice/listen")
print(response.json())
def menu():
while True:
print("\n🔧 Menu:")
print("1. Query Assistant")
print("2. Upload Document")
print("3. Upload Image")
print("4. Upload Audio")
print("5. Upload Video")
print("6. Start Voice Listener")
print("0. Exit")
choice = input("Select: ")
if choice == "1":
query_agent()
elif choice == "2":
upload_document()
elif choice == "3":
upload_media("image")
elif choice == "4":
upload_media("audio")
elif choice == "5":
upload_media("video")
elif choice == "6":
start_voice_listener()
elif choice == "0":
break
if __name__ == "__main__":
menu()

8
cli/rollback_cli.py Normal file
View File

@@ -0,0 +1,8 @@
from tenants.tenant_registry import tenant_registry
def rollback_agent_cli(tenant_id, role):
result = tenant_registry.rollback_agent(tenant_id, role)
if result:
print(f"✅ Rolled back {role} to {result['previous']}")
else:
print("⚠️ No rollback available.")

9
cli/utils.py Normal file
View File

@@ -0,0 +1,9 @@
# cli/utils.py
def print_banner():
print("\n🧠 Welcome to Agentic AI CLI")
print("Type your query or choose an action from the menu.\n")
def format_response(text: str):
return f"\n🤖 Assistant:\n{text}\n"

21
collab/graph_query.py Normal file
View File

@@ -0,0 +1,21 @@
# collab/graph_query.py
##INFO: Graph Query Engine
from collab.graph_registry import graph_registry
class GraphQuery:
def get_influence_score(self, agent_role: str):
edges = graph_registry.get_neighbors(agent_role)
score = sum(e["weight"] for e in edges)
return {"agent": agent_role, "influence_score": round(score, 2)}
def get_connected_agents(self, agent_role: str):
edges = graph_registry.get_neighbors(agent_role)
connected = set()
for e in edges:
connected.add(e["source"])
connected.add(e["target"])
connected.discard(agent_role)
return list(connected)
graph_query = GraphQuery()

64
collab/graph_registry.py Normal file
View File

@@ -0,0 +1,64 @@
# collab/graph_registry.py
##INFO: Collaboration Graph Registry
import time
from agents.shared_goal_registry import shared_goal_registry
class GraphRegistry:
def __init__(self):
self.nodes = {} # {agent_role: {metadata}}
self.edges = [] # [{source, target, type, weight, timestamp}]
def add_agent(self, agent_role: str, metadata: dict):
self.nodes[agent_role] = {
"role": agent_role,
"metadata": metadata,
"created": time.time()
}
return self.nodes[agent_role]
def connect_agents(self, source: str, target: str, edge_type: str, weight: float = 1.0):
edge = {
"source": source,
"target": target,
"type": edge_type,
"weight": weight,
"timestamp": time.time()
}
self.edges.append(edge)
return edge
def get_neighbors(self, agent_role: str):
return [e for e in self.edges if e["source"] == agent_role or e["target"] == agent_role]
def get_graph(self):
return {
"nodes": list(self.nodes.values()),
"edges": self.edges
}
# NEW: Generate graph for shared goal
def get_goal_graph(self, goal_id: str):
goal = shared_goal_registry.get_goal(goal_id)
if not goal:
return {"error": "Goal not found"}
nodes = []
edges = []
for role in goal["targets"]:
metadata = self.nodes.get(role, {"role": role, "metadata": {}, "created": time.time()})
nodes.append(metadata)
edges.append({
"source": goal["initiator"],
"target": role,
"type": "goal_assignment",
"weight": 1.0,
"timestamp": time.time()
})
return {
"nodes": nodes,
"edges": edges
}
graph_registry = GraphRegistry()

14
config/__init__.py Normal file
View File

@@ -0,0 +1,14 @@
# config/__init__.py
from .config import (
VECTOR_DB,
LLM_ENGINE,
SLM_ENGINE,
EMBEDDING_ENGINE,
EMBEDDING_MODEL_NAME,
DATA_DIR
)
from .persona_presets import (
PERSONA_PRESETS
)

17
config/agent_config.json Normal file
View File

@@ -0,0 +1,17 @@
{
"planner": {
"max_tokens": 500,
"temperature": 0.7,
"enabled": true
},
"executor": {
"max_tokens": 1000,
"temperature": 0.5,
"enabled": true
},
"critic": {
"max_tokens": 300,
"temperature": 0.3,
"enabled": true
}
}

10
config/config.py Normal file
View File

@@ -0,0 +1,10 @@
# config/config.py
VECTOR_DB = "faiss" # Options: "faiss", "qdrant", "weaviate"
LLM_ENGINE = "ollama" # Options: "llama.cpp", "ollama", "vllm"
SLM_ENGINE = "phi-3" # Options: "phi-3", "gemma"
EMBEDDING_ENGINE = "huggingface" # Options: "huggingface", "gpt4all"
EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
DATA_DIR = "./data"

47
config/persona_presets.py Normal file
View File

@@ -0,0 +1,47 @@
# config/persona_presets.py
PERSONA_PRESETS = {
"zen": {
"tone": "calm",
"style": "wise",
"formality": "formal",
"avatar": "/avatars/zen.png",
"mood_avatars": {
"calm": "/avatars/zen_calm.gif",
"serious": "/avatars/zen_serious.gif"
}
},
"mentor": {
"tone": "serious",
"style": "professional",
"formality": "formal",
"avatar": "/avatars/mentor.png",
"mood_avatars": {
"serious": "/avatars/mentor_serious.gif",
"empathetic": "/avatars/mentor_supportive.gif",
"cheerful": "/avatars/mentor_smile.gif"
}
},
"buddy": {
"tone": "cheerful",
"style": "friendly",
"formality": "informal",
"avatar": "/avatars/buddy.png",
"mood_avatars": {
"cheerful": "/avatars/buddy_happy.gif",
"serious": "/avatars/buddy_serious.gif",
"empathetic": "/avatars/buddy_soft.gif"
}
},
"poet": {
"tone": "empathetic",
"style": "witty",
"formality": "informal",
"avatar": "/avatars/poet.png",
"mood_avatars": {
"empathetic": "/avatars/poet_soft.gif",
"cheerful": "/avatars/poet_playful.gif",
"serious": "/avatars/poet_reflective.gif"
}
}
}

View File

@@ -0,0 +1,11 @@
def build_persona_prompt(tone, style, formality, context, user_input):
return f"""
다음을 고려하여 대화를 이어가세요. 응답은 다음 스타일로 작성하세요:
- 감정 톤: '{tone}'
- 성격 스타일: '{style}'
- 말투: '{formality}'
대화 내용:
{context}
user: {user_input}
"""

View File

@@ -0,0 +1,5 @@
{
"cheerful": ["That's fantastic!", "I'm so glad to hear that!"],
"sad": ["I'm here for you.", "That sounds tough."],
"serious": ["Let's focus on the facts.", "This requires careful thought."]
}

18
data/persona_samples.json Normal file
View File

@@ -0,0 +1,18 @@
[
{
"persona": "mentor",
"samples": [
"Let's take this seriously.",
"Here's a professional way to approach it.",
"I recommend a structured plan."
]
},
{
"persona": "poet",
"samples": [
"Ah, the beauty of your words.",
"Let me respond with wit and warmth.",
"This reminds me of a verse I once heard."
]
}
]

View File

@@ -0,0 +1,33 @@
# deployment/deployment_registry.py
##INFO: Agent Deployment Registry
import time
class DeploymentRegistry:
def __init__(self):
self.deployments = {} # {agent_role: {version, env, status, tenant, timestamp}}
def deploy(self, agent_role: str, version: str, env: str, tenant_id: str):
self.deployments[agent_role] = {
"agent": agent_role,
"version": version,
"env": env,
"tenant": tenant_id,
"status": "running",
"timestamp": time.time()
}
return self.deployments[agent_role]
def update_status(self, agent_role: str, status: str):
if agent_role in self.deployments:
self.deployments[agent_role]["status"] = status
return self.deployments[agent_role]
return {"error": "Agent not deployed"}
def get(self, agent_role: str):
return self.deployments.get(agent_role)
def get_all(self):
return list(self.deployments.values())
deployment_registry = DeploymentRegistry()

View File

@@ -0,0 +1,24 @@
# deployment/orchestration_engine.py
##INFO: Runtime Orchestration Engine
import time
class OrchestrationEngine:
def __init__(self):
self.traces = [] # [{agent, action, status, timestamp, tenant}]
def log_action(self, agent_role: str, action: str, status: str, tenant_id: str):
trace = {
"agent": agent_role,
"action": action,
"status": status,
"tenant": tenant_id,
"timestamp": time.time()
}
self.traces.append(trace)
return trace
def get_traces(self, tenant_id: str):
return [t for t in self.traces if t["tenant"] == tenant_id]
orchestration_engine = OrchestrationEngine()

5
desktop-electron/main.js Normal file
View File

@@ -0,0 +1,5 @@
const { app, BrowserWindow } = require('electron');
app.whenReady().then(() => {
const win = new BrowserWindow({ width: 800, height: 600 });
win.loadURL("http://localhost:5173"); // your React/Vite frontend
});

732
desktop-electron/package-lock.json generated Normal file
View File

@@ -0,0 +1,732 @@
{
"name": "desktop-electron",
"version": "1.0.0",
"lockfileVersion": 3,
"requires": true,
"packages": {
"": {
"name": "desktop-electron",
"version": "1.0.0",
"license": "ISC",
"dependencies": {
"electron": "^38.0.0"
}
},
"node_modules/@electron/get": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/@electron/get/-/get-2.0.3.tgz",
"integrity": "sha512-Qkzpg2s9GnVV2I2BjRksUi43U5e6+zaQMcjoJy0C+C5oxaKl+fmckGDQFtRpZpZV0NQekuZZ+tGz7EA9TVnQtQ==",
"dependencies": {
"debug": "^4.1.1",
"env-paths": "^2.2.0",
"fs-extra": "^8.1.0",
"got": "^11.8.5",
"progress": "^2.0.3",
"semver": "^6.2.0",
"sumchecker": "^3.0.1"
},
"engines": {
"node": ">=12"
},
"optionalDependencies": {
"global-agent": "^3.0.0"
}
},
"node_modules/@sindresorhus/is": {
"version": "4.6.0",
"resolved": "https://registry.npmjs.org/@sindresorhus/is/-/is-4.6.0.tgz",
"integrity": "sha512-t09vSN3MdfsyCHoFcTRCH/iUtG7OJ0CsjzB8cjAmKc/va/kIgeDI/TxsigdncE/4be734m0cvIYwNaV4i2XqAw==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sindresorhus/is?sponsor=1"
}
},
"node_modules/@szmarczak/http-timer": {
"version": "4.0.6",
"resolved": "https://registry.npmjs.org/@szmarczak/http-timer/-/http-timer-4.0.6.tgz",
"integrity": "sha512-4BAffykYOgO+5nzBWYwE3W90sBgLJoUPRWWcL8wlyiM8IB8ipJz3UMJ9KXQd1RKQXpKp8Tutn80HZtWsu2u76w==",
"dependencies": {
"defer-to-connect": "^2.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/@types/cacheable-request": {
"version": "6.0.3",
"resolved": "https://registry.npmjs.org/@types/cacheable-request/-/cacheable-request-6.0.3.tgz",
"integrity": "sha512-IQ3EbTzGxIigb1I3qPZc1rWJnH0BmSKv5QYTalEwweFvyBDLSAe24zP0le/hyi7ecGfZVlIVAg4BZqb8WBwKqw==",
"dependencies": {
"@types/http-cache-semantics": "*",
"@types/keyv": "^3.1.4",
"@types/node": "*",
"@types/responselike": "^1.0.0"
}
},
"node_modules/@types/http-cache-semantics": {
"version": "4.0.4",
"resolved": "https://registry.npmjs.org/@types/http-cache-semantics/-/http-cache-semantics-4.0.4.tgz",
"integrity": "sha512-1m0bIFVc7eJWyve9S0RnuRgcQqF/Xd5QsUZAZeQFr1Q3/p9JWoQQEqmVy+DPTNpGXwhgIetAoYF8JSc33q29QA=="
},
"node_modules/@types/keyv": {
"version": "3.1.4",
"resolved": "https://registry.npmjs.org/@types/keyv/-/keyv-3.1.4.tgz",
"integrity": "sha512-BQ5aZNSCpj7D6K2ksrRCTmKRLEpnPvWDiLPfoGyhZ++8YtiK9d/3DBKPJgry359X/P1PfruyYwvnvwFjuEiEIg==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/node": {
"version": "22.18.1",
"resolved": "https://registry.npmjs.org/@types/node/-/node-22.18.1.tgz",
"integrity": "sha512-rzSDyhn4cYznVG+PCzGe1lwuMYJrcBS1fc3JqSa2PvtABwWo+dZ1ij5OVok3tqfpEBCBoaR4d7upFJk73HRJDw==",
"dependencies": {
"undici-types": "~6.21.0"
}
},
"node_modules/@types/responselike": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/@types/responselike/-/responselike-1.0.3.tgz",
"integrity": "sha512-H/+L+UkTV33uf49PH5pCAUBVPNj2nDBXTN+qS1dOwyyg24l3CcicicCA7ca+HMvJBZcFgl5r8e+RR6elsb4Lyw==",
"dependencies": {
"@types/node": "*"
}
},
"node_modules/@types/yauzl": {
"version": "2.10.3",
"resolved": "https://registry.npmjs.org/@types/yauzl/-/yauzl-2.10.3.tgz",
"integrity": "sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==",
"optional": true,
"dependencies": {
"@types/node": "*"
}
},
"node_modules/boolean": {
"version": "3.2.0",
"resolved": "https://registry.npmjs.org/boolean/-/boolean-3.2.0.tgz",
"integrity": "sha512-d0II/GO9uf9lfUHH2BQsjxzRJZBdsjgsBiW4BvhWk/3qoKwQFjIDVN19PfX8F2D/r9PCMTtLWjYVCFrpeYUzsw==",
"deprecated": "Package no longer supported. Contact Support at https://www.npmjs.com/support for more info.",
"optional": true
},
"node_modules/buffer-crc32": {
"version": "0.2.13",
"resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz",
"integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==",
"engines": {
"node": "*"
}
},
"node_modules/cacheable-lookup": {
"version": "5.0.4",
"resolved": "https://registry.npmjs.org/cacheable-lookup/-/cacheable-lookup-5.0.4.tgz",
"integrity": "sha512-2/kNscPhpcxrOigMZzbiWF7dz8ilhb/nIHU3EyZiXWXpeq/au8qJ8VhdftMkty3n7Gj6HIGalQG8oiBNB3AJgA==",
"engines": {
"node": ">=10.6.0"
}
},
"node_modules/cacheable-request": {
"version": "7.0.4",
"resolved": "https://registry.npmjs.org/cacheable-request/-/cacheable-request-7.0.4.tgz",
"integrity": "sha512-v+p6ongsrp0yTGbJXjgxPow2+DL93DASP4kXCDKb8/bwRtt9OEF3whggkkDkGNzgcWy2XaF4a8nZglC7uElscg==",
"dependencies": {
"clone-response": "^1.0.2",
"get-stream": "^5.1.0",
"http-cache-semantics": "^4.0.0",
"keyv": "^4.0.0",
"lowercase-keys": "^2.0.0",
"normalize-url": "^6.0.1",
"responselike": "^2.0.0"
},
"engines": {
"node": ">=8"
}
},
"node_modules/clone-response": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/clone-response/-/clone-response-1.0.3.tgz",
"integrity": "sha512-ROoL94jJH2dUVML2Y/5PEDNaSHgeOdSDicUyS7izcF63G6sTc/FTjLub4b8Il9S8S0beOfYt0TaA5qvFK+w0wA==",
"dependencies": {
"mimic-response": "^1.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/debug": {
"version": "4.4.1",
"resolved": "https://registry.npmjs.org/debug/-/debug-4.4.1.tgz",
"integrity": "sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==",
"dependencies": {
"ms": "^2.1.3"
},
"engines": {
"node": ">=6.0"
},
"peerDependenciesMeta": {
"supports-color": {
"optional": true
}
}
},
"node_modules/decompress-response": {
"version": "6.0.0",
"resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz",
"integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==",
"dependencies": {
"mimic-response": "^3.1.0"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/decompress-response/node_modules/mimic-response": {
"version": "3.1.0",
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz",
"integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/defer-to-connect": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/defer-to-connect/-/defer-to-connect-2.0.1.tgz",
"integrity": "sha512-4tvttepXG1VaYGrRibk5EwJd1t4udunSOVMdLSAL6mId1ix438oPwPZMALY41FCijukO1L0twNcGsdzS7dHgDg==",
"engines": {
"node": ">=10"
}
},
"node_modules/define-data-property": {
"version": "1.1.4",
"resolved": "https://registry.npmjs.org/define-data-property/-/define-data-property-1.1.4.tgz",
"integrity": "sha512-rBMvIzlpA8v6E+SJZoo++HAYqsLrkg7MSfIinMPFhmkorw7X+dOXVJQs+QT69zGkzMyfDnIMN2Wid1+NbL3T+A==",
"optional": true,
"dependencies": {
"es-define-property": "^1.0.0",
"es-errors": "^1.3.0",
"gopd": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/define-properties": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/define-properties/-/define-properties-1.2.1.tgz",
"integrity": "sha512-8QmQKqEASLd5nx0U1B1okLElbUuuttJ/AnYmRXbbbGDWh6uS208EjD4Xqq/I9wK7u0v6O08XhTWnt5XtEbR6Dg==",
"optional": true,
"dependencies": {
"define-data-property": "^1.0.1",
"has-property-descriptors": "^1.0.0",
"object-keys": "^1.1.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/detect-node": {
"version": "2.1.0",
"resolved": "https://registry.npmjs.org/detect-node/-/detect-node-2.1.0.tgz",
"integrity": "sha512-T0NIuQpnTvFDATNuHN5roPwSBG83rFsuO+MXXH9/3N1eFbn4wcPjttvjMLEPWJ0RGUYgQE7cGgS3tNxbqCGM7g==",
"optional": true
},
"node_modules/electron": {
"version": "38.0.0",
"resolved": "https://registry.npmjs.org/electron/-/electron-38.0.0.tgz",
"integrity": "sha512-egljptiPJqbL/oamFCEY+g3RNeONWTVxZSGeyLqzK8xq106JhzuxnhJZ3sxt4DzJFaofbGyGJA37Oe9d+gVzYw==",
"hasInstallScript": true,
"dependencies": {
"@electron/get": "^2.0.0",
"@types/node": "^22.7.7",
"extract-zip": "^2.0.1"
},
"bin": {
"electron": "cli.js"
},
"engines": {
"node": ">= 12.20.55"
}
},
"node_modules/end-of-stream": {
"version": "1.4.5",
"resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.5.tgz",
"integrity": "sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==",
"dependencies": {
"once": "^1.4.0"
}
},
"node_modules/env-paths": {
"version": "2.2.1",
"resolved": "https://registry.npmjs.org/env-paths/-/env-paths-2.2.1.tgz",
"integrity": "sha512-+h1lkLKhZMTYjog1VEpJNG7NZJWcuc2DDk/qsqSTRRCOXiLjeQ1d1/udrUGhqMxUgAlwKNZ0cf2uqan5GLuS2A==",
"engines": {
"node": ">=6"
}
},
"node_modules/es-define-property": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz",
"integrity": "sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==",
"optional": true,
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es-errors": {
"version": "1.3.0",
"resolved": "https://registry.npmjs.org/es-errors/-/es-errors-1.3.0.tgz",
"integrity": "sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==",
"optional": true,
"engines": {
"node": ">= 0.4"
}
},
"node_modules/es6-error": {
"version": "4.1.1",
"resolved": "https://registry.npmjs.org/es6-error/-/es6-error-4.1.1.tgz",
"integrity": "sha512-Um/+FxMr9CISWh0bi5Zv0iOD+4cFh5qLeks1qhAopKVAJw3drgKbKySikp7wGhDL0HPeaja0P5ULZrxLkniUVg==",
"optional": true
},
"node_modules/escape-string-regexp": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz",
"integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==",
"optional": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/extract-zip": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/extract-zip/-/extract-zip-2.0.1.tgz",
"integrity": "sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==",
"dependencies": {
"debug": "^4.1.1",
"get-stream": "^5.1.0",
"yauzl": "^2.10.0"
},
"bin": {
"extract-zip": "cli.js"
},
"engines": {
"node": ">= 10.17.0"
},
"optionalDependencies": {
"@types/yauzl": "^2.9.1"
}
},
"node_modules/fd-slicer": {
"version": "1.1.0",
"resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz",
"integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==",
"dependencies": {
"pend": "~1.2.0"
}
},
"node_modules/fs-extra": {
"version": "8.1.0",
"resolved": "https://registry.npmjs.org/fs-extra/-/fs-extra-8.1.0.tgz",
"integrity": "sha512-yhlQgA6mnOJUKOsRUFsgJdQCvkKhcz8tlZG5HBQfReYZy46OwLcY+Zia0mtdHsOo9y/hP+CxMN0TU9QxoOtG4g==",
"dependencies": {
"graceful-fs": "^4.2.0",
"jsonfile": "^4.0.0",
"universalify": "^0.1.0"
},
"engines": {
"node": ">=6 <7 || >=8"
}
},
"node_modules/get-stream": {
"version": "5.2.0",
"resolved": "https://registry.npmjs.org/get-stream/-/get-stream-5.2.0.tgz",
"integrity": "sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==",
"dependencies": {
"pump": "^3.0.0"
},
"engines": {
"node": ">=8"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/global-agent": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/global-agent/-/global-agent-3.0.0.tgz",
"integrity": "sha512-PT6XReJ+D07JvGoxQMkT6qji/jVNfX/h364XHZOWeRzy64sSFr+xJ5OX7LI3b4MPQzdL4H8Y8M0xzPpsVMwA8Q==",
"optional": true,
"dependencies": {
"boolean": "^3.0.1",
"es6-error": "^4.1.1",
"matcher": "^3.0.0",
"roarr": "^2.15.3",
"semver": "^7.3.2",
"serialize-error": "^7.0.1"
},
"engines": {
"node": ">=10.0"
}
},
"node_modules/global-agent/node_modules/semver": {
"version": "7.7.2",
"resolved": "https://registry.npmjs.org/semver/-/semver-7.7.2.tgz",
"integrity": "sha512-RF0Fw+rO5AMf9MAyaRXI4AV0Ulj5lMHqVxxdSgiVbixSCXoEmmX/jk0CuJw4+3SqroYO9VoUh+HcuJivvtJemA==",
"optional": true,
"bin": {
"semver": "bin/semver.js"
},
"engines": {
"node": ">=10"
}
},
"node_modules/globalthis": {
"version": "1.0.4",
"resolved": "https://registry.npmjs.org/globalthis/-/globalthis-1.0.4.tgz",
"integrity": "sha512-DpLKbNU4WylpxJykQujfCcwYWiV/Jhm50Goo0wrVILAv5jOr9d+H+UR3PhSCD2rCCEIg0uc+G+muBTwD54JhDQ==",
"optional": true,
"dependencies": {
"define-properties": "^1.2.1",
"gopd": "^1.0.1"
},
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/gopd": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz",
"integrity": "sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==",
"optional": true,
"engines": {
"node": ">= 0.4"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/got": {
"version": "11.8.6",
"resolved": "https://registry.npmjs.org/got/-/got-11.8.6.tgz",
"integrity": "sha512-6tfZ91bOr7bOXnK7PRDCGBLa1H4U080YHNaAQ2KsMGlLEzRbk44nsZF2E1IeRc3vtJHPVbKCYgdFbaGO2ljd8g==",
"dependencies": {
"@sindresorhus/is": "^4.0.0",
"@szmarczak/http-timer": "^4.0.5",
"@types/cacheable-request": "^6.0.1",
"@types/responselike": "^1.0.0",
"cacheable-lookup": "^5.0.3",
"cacheable-request": "^7.0.2",
"decompress-response": "^6.0.0",
"http2-wrapper": "^1.0.0-beta.5.2",
"lowercase-keys": "^2.0.0",
"p-cancelable": "^2.0.0",
"responselike": "^2.0.0"
},
"engines": {
"node": ">=10.19.0"
},
"funding": {
"url": "https://github.com/sindresorhus/got?sponsor=1"
}
},
"node_modules/graceful-fs": {
"version": "4.2.11",
"resolved": "https://registry.npmjs.org/graceful-fs/-/graceful-fs-4.2.11.tgz",
"integrity": "sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ=="
},
"node_modules/has-property-descriptors": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/has-property-descriptors/-/has-property-descriptors-1.0.2.tgz",
"integrity": "sha512-55JNKuIW+vq4Ke1BjOTjM2YctQIvCT7GFzHwmfZPGo5wnrgkid0YQtnAleFSqumZm4az3n2BS+erby5ipJdgrg==",
"optional": true,
"dependencies": {
"es-define-property": "^1.0.0"
},
"funding": {
"url": "https://github.com/sponsors/ljharb"
}
},
"node_modules/http-cache-semantics": {
"version": "4.2.0",
"resolved": "https://registry.npmjs.org/http-cache-semantics/-/http-cache-semantics-4.2.0.tgz",
"integrity": "sha512-dTxcvPXqPvXBQpq5dUr6mEMJX4oIEFv6bwom3FDwKRDsuIjjJGANqhBuoAn9c1RQJIdAKav33ED65E2ys+87QQ=="
},
"node_modules/http2-wrapper": {
"version": "1.0.3",
"resolved": "https://registry.npmjs.org/http2-wrapper/-/http2-wrapper-1.0.3.tgz",
"integrity": "sha512-V+23sDMr12Wnz7iTcDeJr3O6AIxlnvT/bmaAAAP/Xda35C90p9599p0F1eHR/N1KILWSoWVAiOMFjBBXaXSMxg==",
"dependencies": {
"quick-lru": "^5.1.1",
"resolve-alpn": "^1.0.0"
},
"engines": {
"node": ">=10.19.0"
}
},
"node_modules/json-buffer": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz",
"integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ=="
},
"node_modules/json-stringify-safe": {
"version": "5.0.1",
"resolved": "https://registry.npmjs.org/json-stringify-safe/-/json-stringify-safe-5.0.1.tgz",
"integrity": "sha512-ZClg6AaYvamvYEE82d3Iyd3vSSIjQ+odgjaTzRuO3s7toCdFKczob2i0zCh7JE8kWn17yvAWhUVxvqGwUalsRA==",
"optional": true
},
"node_modules/jsonfile": {
"version": "4.0.0",
"resolved": "https://registry.npmjs.org/jsonfile/-/jsonfile-4.0.0.tgz",
"integrity": "sha512-m6F1R3z8jjlf2imQHS2Qez5sjKWQzbuuhuJ/FKYFRZvPE3PuHcSMVZzfsLhGVOkfd20obL5SWEBew5ShlquNxg==",
"optionalDependencies": {
"graceful-fs": "^4.1.6"
}
},
"node_modules/keyv": {
"version": "4.5.4",
"resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz",
"integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==",
"dependencies": {
"json-buffer": "3.0.1"
}
},
"node_modules/lowercase-keys": {
"version": "2.0.0",
"resolved": "https://registry.npmjs.org/lowercase-keys/-/lowercase-keys-2.0.0.tgz",
"integrity": "sha512-tqNXrS78oMOE73NMxK4EMLQsQowWf8jKooH9g7xPavRT706R6bkQJ6DY2Te7QukaZsulxa30wQ7bk0pm4XiHmA==",
"engines": {
"node": ">=8"
}
},
"node_modules/matcher": {
"version": "3.0.0",
"resolved": "https://registry.npmjs.org/matcher/-/matcher-3.0.0.tgz",
"integrity": "sha512-OkeDaAZ/bQCxeFAozM55PKcKU0yJMPGifLwV4Qgjitu+5MoAfSQN4lsLJeXZ1b8w0x+/Emda6MZgXS1jvsapng==",
"optional": true,
"dependencies": {
"escape-string-regexp": "^4.0.0"
},
"engines": {
"node": ">=10"
}
},
"node_modules/mimic-response": {
"version": "1.0.1",
"resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-1.0.1.tgz",
"integrity": "sha512-j5EctnkH7amfV/q5Hgmoal1g2QHFJRraOtmx0JpIqkxhBhI/lJSl1nMpQ45hVarwNETOoWEimndZ4QK0RHxuxQ==",
"engines": {
"node": ">=4"
}
},
"node_modules/ms": {
"version": "2.1.3",
"resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz",
"integrity": "sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA=="
},
"node_modules/normalize-url": {
"version": "6.1.0",
"resolved": "https://registry.npmjs.org/normalize-url/-/normalize-url-6.1.0.tgz",
"integrity": "sha512-DlL+XwOy3NxAQ8xuC0okPgK46iuVNAK01YN7RueYBqqFeGsBjV9XmCAzAdgt+667bCl5kPh9EqKKDwnaPG1I7A==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/object-keys": {
"version": "1.1.1",
"resolved": "https://registry.npmjs.org/object-keys/-/object-keys-1.1.1.tgz",
"integrity": "sha512-NuAESUOUMrlIXOfHKzD6bpPu3tYt3xvjNdRIQ+FeT0lNb4K8WR70CaDxhuNguS2XG+GjkyMwOzsN5ZktImfhLA==",
"optional": true,
"engines": {
"node": ">= 0.4"
}
},
"node_modules/once": {
"version": "1.4.0",
"resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz",
"integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==",
"dependencies": {
"wrappy": "1"
}
},
"node_modules/p-cancelable": {
"version": "2.1.1",
"resolved": "https://registry.npmjs.org/p-cancelable/-/p-cancelable-2.1.1.tgz",
"integrity": "sha512-BZOr3nRQHOntUjTrH8+Lh54smKHoHyur8We1V8DSMVrl5A2malOOwuJRnKRDjSnkoeBh4at6BwEnb5I7Jl31wg==",
"engines": {
"node": ">=8"
}
},
"node_modules/pend": {
"version": "1.2.0",
"resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz",
"integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg=="
},
"node_modules/progress": {
"version": "2.0.3",
"resolved": "https://registry.npmjs.org/progress/-/progress-2.0.3.tgz",
"integrity": "sha512-7PiHtLll5LdnKIMw100I+8xJXR5gW2QwWYkT6iJva0bXitZKa/XMrSbdmg3r2Xnaidz9Qumd0VPaMrZlF9V9sA==",
"engines": {
"node": ">=0.4.0"
}
},
"node_modules/pump": {
"version": "3.0.3",
"resolved": "https://registry.npmjs.org/pump/-/pump-3.0.3.tgz",
"integrity": "sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==",
"dependencies": {
"end-of-stream": "^1.1.0",
"once": "^1.3.1"
}
},
"node_modules/quick-lru": {
"version": "5.1.1",
"resolved": "https://registry.npmjs.org/quick-lru/-/quick-lru-5.1.1.tgz",
"integrity": "sha512-WuyALRjWPDGtt/wzJiadO5AXY+8hZ80hVpe6MyivgraREW751X3SbhRvG3eLKOYN+8VEvqLcf3wdnt44Z4S4SA==",
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/resolve-alpn": {
"version": "1.2.1",
"resolved": "https://registry.npmjs.org/resolve-alpn/-/resolve-alpn-1.2.1.tgz",
"integrity": "sha512-0a1F4l73/ZFZOakJnQ3FvkJ2+gSTQWz/r2KE5OdDY0TxPm5h4GkqkWWfM47T7HsbnOtcJVEF4epCVy6u7Q3K+g=="
},
"node_modules/responselike": {
"version": "2.0.1",
"resolved": "https://registry.npmjs.org/responselike/-/responselike-2.0.1.tgz",
"integrity": "sha512-4gl03wn3hj1HP3yzgdI7d3lCkF95F21Pz4BPGvKHinyQzALR5CapwC8yIi0Rh58DEMQ/SguC03wFj2k0M/mHhw==",
"dependencies": {
"lowercase-keys": "^2.0.0"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/roarr": {
"version": "2.15.4",
"resolved": "https://registry.npmjs.org/roarr/-/roarr-2.15.4.tgz",
"integrity": "sha512-CHhPh+UNHD2GTXNYhPWLnU8ONHdI+5DI+4EYIAOaiD63rHeYlZvyh8P+in5999TTSFgUYuKUAjzRI4mdh/p+2A==",
"optional": true,
"dependencies": {
"boolean": "^3.0.1",
"detect-node": "^2.0.4",
"globalthis": "^1.0.1",
"json-stringify-safe": "^5.0.1",
"semver-compare": "^1.0.0",
"sprintf-js": "^1.1.2"
},
"engines": {
"node": ">=8.0"
}
},
"node_modules/semver": {
"version": "6.3.1",
"resolved": "https://registry.npmjs.org/semver/-/semver-6.3.1.tgz",
"integrity": "sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==",
"bin": {
"semver": "bin/semver.js"
}
},
"node_modules/semver-compare": {
"version": "1.0.0",
"resolved": "https://registry.npmjs.org/semver-compare/-/semver-compare-1.0.0.tgz",
"integrity": "sha512-YM3/ITh2MJ5MtzaM429anh+x2jiLVjqILF4m4oyQB18W7Ggea7BfqdH/wGMK7dDiMghv/6WG7znWMwUDzJiXow==",
"optional": true
},
"node_modules/serialize-error": {
"version": "7.0.1",
"resolved": "https://registry.npmjs.org/serialize-error/-/serialize-error-7.0.1.tgz",
"integrity": "sha512-8I8TjW5KMOKsZQTvoxjuSIa7foAwPWGOts+6o7sgjz41/qMD9VQHEDxi6PBvK2l0MXUmqZyNpUK+T2tQaaElvw==",
"optional": true,
"dependencies": {
"type-fest": "^0.13.1"
},
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/sprintf-js": {
"version": "1.1.3",
"resolved": "https://registry.npmjs.org/sprintf-js/-/sprintf-js-1.1.3.tgz",
"integrity": "sha512-Oo+0REFV59/rz3gfJNKQiBlwfHaSESl1pcGyABQsnnIfWOFt6JNj5gCog2U6MLZ//IGYD+nA8nI+mTShREReaA==",
"optional": true
},
"node_modules/sumchecker": {
"version": "3.0.1",
"resolved": "https://registry.npmjs.org/sumchecker/-/sumchecker-3.0.1.tgz",
"integrity": "sha512-MvjXzkz/BOfyVDkG0oFOtBxHX2u3gKbMHIF/dXblZsgD3BWOFLmHovIpZY7BykJdAjcqRCBi1WYBNdEC9yI7vg==",
"dependencies": {
"debug": "^4.1.0"
},
"engines": {
"node": ">= 8.0"
}
},
"node_modules/type-fest": {
"version": "0.13.1",
"resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.13.1.tgz",
"integrity": "sha512-34R7HTnG0XIJcBSn5XhDd7nNFPRcXYRZrBB2O2jdKqYODldSzBAqzsWoZYYvduky73toYS/ESqxPvkDf/F0XMg==",
"optional": true,
"engines": {
"node": ">=10"
},
"funding": {
"url": "https://github.com/sponsors/sindresorhus"
}
},
"node_modules/undici-types": {
"version": "6.21.0",
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-6.21.0.tgz",
"integrity": "sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ=="
},
"node_modules/universalify": {
"version": "0.1.2",
"resolved": "https://registry.npmjs.org/universalify/-/universalify-0.1.2.tgz",
"integrity": "sha512-rBJeI5CXAlmy1pV+617WB9J63U6XcazHHF2f2dbJix4XzpUF0RS3Zbj0FGIOCAva5P/d/GBOYaACQ1w+0azUkg==",
"engines": {
"node": ">= 4.0.0"
}
},
"node_modules/wrappy": {
"version": "1.0.2",
"resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz",
"integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ=="
},
"node_modules/yauzl": {
"version": "2.10.0",
"resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz",
"integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==",
"dependencies": {
"buffer-crc32": "~0.2.3",
"fd-slicer": "~1.1.0"
}
}
}
}

View File

@@ -0,0 +1,15 @@
{
"name": "desktop-electron",
"version": "1.0.0",
"description": "",
"main": "index.js",
"scripts": {
"test": "echo \"Error: no test specified\" && exit 1"
},
"keywords": [],
"author": "",
"license": "ISC",
"dependencies": {
"electron": "^38.0.0"
}
}

View File

@@ -0,0 +1,14 @@
using Microsoft.Maui;
using Microsoft.Maui.Controls;
using Microsoft.Maui.Controls.Xaml;
namespace MemoryApp;
public partial class App : Application
{
public App()
{
InitializeComponent();
MainPage = new MainPage();
}
}

View File

@@ -0,0 +1,13 @@
<ContentPage xmlns="http://schemas.microsoft.com/dotnet/2021/maui"
xmlns:x="http://schemas.microsoft.com/winfx/2009/xaml"
x:Class="MemoryApp.MainPage">
<ScrollView>
<VerticalStackLayout Padding="20">
<Label Text="🧠 Memory Assistant" FontSize="32" />
<Entry x:Name="queryInput" Placeholder="Ask something..." />
<Button Text="Submit" Clicked="OnSubmitClicked" />
<Label x:Name="responseLabel" FontSize="18" />
</VerticalStackLayout>
</ScrollView>
</ContentPage>

View File

@@ -0,0 +1,21 @@
using System.Net.Http;
using Newtonsoft.Json.Linq;
namespace MemoryApp;
public partial class MainPage : ContentPage
{
public MainPage()
{
InitializeComponent();
}
private async void OnSubmitClicked(object sender, EventArgs e)
{
var query = queryInput.Text;
var client = new HttpClient();
var res = await client.GetStringAsync($"http://localhost:8000/ask?query={query}");
var json = JObject.Parse(res);
responseLabel.Text = json["response"]?.ToString();
}
}

23
docker-compose.yml Normal file
View File

@@ -0,0 +1,23 @@
version: '3.9'
services:
backend:
build: .
container_name: agentic-backend
ports:
- "8000:8000"
volumes:
- .:/app
restart: always
frontend:
build:
context: ./web
container_name: agentic-frontend
ports:
- "5173:5173"
command: ["npm", "run", "dev"]
volumes:
- ./web:/web
working_dir: /web
restart: always

54
execution/action_layer.py Normal file
View File

@@ -0,0 +1,54 @@
# execution/action_layer.py
##INFO: external API actions with safety, RBAC, and trace
import requests
from execution.execution_trace import execution_trace
from execution.safety_policy import safety_policy
from tenants.rbac_guard import enforce_rbac
from tools.tool_registry import tool_registry
from governance.usage_meter import usage_meter
class ActionLayer:
def __init__(self):
self.actions = {}
def register_action(self, name: str, endpoint: str, method: str = "POST"):
self.actions[name] = {
"endpoint": endpoint,
"method": method
}
##NOTE: Add safety + trace integration (and Enforce RBAC), optional fallback to internal tool registry
@enforce_rbac("sync_action")
def trigger(self, name: str, payload: dict, tenant_id: str = "default", agent_role: str = "unknown"):
check = safety_policy.check(name, payload)
if "error" in check:
return check
##INFO: Usage tracking
usage_meter.log_usage(tenant_id, agent_role, units=1)
##INFO: Quota check before execution
quota = usage_meter.check_quota(tenant_id, agent_role)
if quota.get("status") == "exceeded":
return {"error": "Quota exceeded", "quota": quota}
if name in self.actions:
try:
if self.actions[name]["method"] == "POST":
res = requests.post(self.actions[name]["endpoint"], json=payload)
else:
res = requests.get(self.actions[name]["endpoint"], params=payload)
response = res.json()
except Exception as e:
response = {"error": str(e)}
else:
# Fallback to internal tool
response = tool_registry.invoke(name, tenant_id=tenant_id, agent_role=agent_role, **payload)
trace = execution_trace.log(tenant_id, agent_role, name, payload, response)
return {"status": "executed", "response": response, "trace": trace}
action_layer = ActionLayer()

View File

@@ -0,0 +1,24 @@
# execution/execution_trace.py
import time
class ExecutionTrace:
def __init__(self):
self.trace = []
def log(self, tenant_id: str, agent_role: str, action_name: str, payload: dict, response: dict):
entry = {
"timestamp": time.time(),
"tenant_id": tenant_id,
"agent": agent_role,
"action": action_name,
"payload": payload,
"response": response
}
self.trace.append(entry)
return entry
def get_all(self):
return self.trace
execution_trace = ExecutionTrace()

View File

@@ -0,0 +1,29 @@
# execution/feedback_loop.py
##INFO: 🔁 Environment Feedback Loop
import time
from agents.agent_registry import get_agent
class FeedbackLoop:
def __init__(self):
self.feedback_log = []
def capture_feedback(self, tenant_id: str, agent_role: str, action_name: str, response: dict):
agent = get_agent(agent_role)
interpretation = agent.run(f"Interpret this feedback: {response}", {"tenant_id": tenant_id})
entry = {
"timestamp": time.time(),
"agent": agent_role,
"action": action_name,
"response": response,
"interpretation": interpretation
}
self.feedback_log.append(entry)
return entry
def get_all(self):
return self.feedback_log
feedback_loop = FeedbackLoop()

View File

@@ -0,0 +1,23 @@
# execution/safety_policy.py
class SafetyPolicy:
def __init__(self):
self.rules = {
"max_payload_size": 10000,
"allowed_actions": ["email", "weather", "stock"],
"require_approval": ["financial_transfer"]
}
def check(self, action_name: str, payload: dict):
if action_name not in self.rules["allowed_actions"]:
return {"error": f"Action '{action_name}' is not allowed"}
if len(str(payload)) > self.rules["max_payload_size"]:
return {"error": "Payload size exceeds limit"}
if action_name in self.rules["require_approval"]:
return {"warning": "Action requires human approval"}
return {"status": "safe"}
safety_policy = SafetyPolicy()

View File

@@ -0,0 +1,73 @@
# feedback/approval_queue.py
##INFO: Feedback Approval Queue (🧑‍⚖️ Human-in-the-Loop Approval Queue)
import time
from execution.feedback_loop import feedback_loop # ✅ Import
class ApprovalQueue:
def __init__(self):
self.queue = []
self.history = []
def submit(self, tenant_id: str, agent_role: str, action: str, payload: dict):
item = {
"id": f"{agent_role}-{int(time.time())}",
"timestamp": time.time(),
"tenant_id": tenant_id,
"agent": agent_role,
"action": action,
"payload": payload,
"status": "pending"
}
self.queue.append(item)
return item
# def approve(self, item_id: str, revised_payload: dict = None):
# for item in self.queue:
# if item["id"] == item_id:
# item["status"] = "approved"
# item["payload"] = revised_payload or item["payload"]
# self.history.append(item)
# self.queue.remove(item)
# return item
# return {"error": "Item not found"}
##INFO: Extend to trigger feedback capture
def approve(self, item_id: str, revised_payload: dict = None):
for item in self.queue:
if item["id"] == item_id:
item["status"] = "approved"
item["payload"] = revised_payload or item["payload"]
self.history.append(item)
self.queue.remove(item)
# ✅ Trigger action
from execution.action_layer import action_layer
result = action_layer.trigger(item["action"], item["payload"], item["tenant_id"], item["agent"])
# ✅ Auto-capture feedback
feedback_loop.capture_feedback(item["tenant_id"], item["agent"], item["action"], result["response"])
return {**item, "executed": result}
return {"error": "Item not found"}
def reject(self, item_id: str, reason: str = ""):
for item in self.queue:
if item["id"] == item_id:
item["status"] = "rejected"
item["reason"] = reason
self.history.append(item)
self.queue.remove(item)
return item
return {"error": "Item not found"}
def get_pending(self):
return self.queue
def get_history(self):
return self.history
approval_queue = ApprovalQueue()

View File

@@ -0,0 +1,22 @@
# governance/billing_engine.py
##INFO:
class BillingEngine:
def __init__(self):
self.rates = {} # e.g. {("tenant", "agent"): 0.01}
def set_rate(self, tenant_id: str, agent_role: str, rate: float):
self.rates[(tenant_id, agent_role)] = rate
def estimate(self, tenant_id: str, agent_role: str, units: int):
rate = self.rates.get((tenant_id, agent_role), 0.0)
return round(rate * units, 4)
def get_summary(self, usage_data: list):
summary = []
for entry in usage_data:
cost = self.estimate(entry["tenant"], entry["agent"], entry["used"])
summary.append({**entry, "cost": cost})
return summary
billing_engine = BillingEngine()

View File

@@ -0,0 +1,38 @@
# governance/compliance_checker.py
##INFO:
import time
class ComplianceChecker:
def __init__(self):
self.violations = []
def check(self, tenant_id: str, agent_role: str, task: str, policy: dict):
if agent_role not in policy["allowed_roles"]:
return self.log_violation(tenant_id, agent_role, task, "Role not permitted")
for restricted in policy["restricted_tasks"]:
if restricted.lower() in task.lower():
return self.log_violation(tenant_id, agent_role, task, f"Task '{restricted}' is restricted")
return {"status": "compliant"}
def log_violation(self, tenant_id: str, agent_role: str, task: str, reason: str):
record = {
"timestamp": time.time(),
"tenant": tenant_id,
"agent": agent_role,
"task": task,
"reason": reason,
"status": "violation"
}
self.violations.append(record)
return record
def get_violations(self):
return self.violations
def get_by_tenant(self, tenant_id: str):
return [v for v in self.violations if v["tenant"] == tenant_id]
compliance_checker = ComplianceChecker()

View File

@@ -0,0 +1,26 @@
# governance/policy_registry.py
##INFO:
class PolicyRegistry:
def __init__(self):
self.policies = {} # {tenant_id: {allowed_roles, restricted_tasks, audit_level}}
def set_policy(self, tenant_id: str, allowed_roles: list, restricted_tasks: list, audit_level: str = "standard"):
self.policies[tenant_id] = {
"allowed_roles": allowed_roles,
"restricted_tasks": restricted_tasks,
"audit_level": audit_level
}
return self.policies[tenant_id]
def get_policy(self, tenant_id: str):
return self.policies.get(tenant_id, {
"allowed_roles": ["planner", "executor", "critic"],
"restricted_tasks": [],
"audit_level": "standard"
})
def get_all(self):
return self.policies
policy_registry = PolicyRegistry()

28
governance/sla_monitor.py Normal file
View File

@@ -0,0 +1,28 @@
# governance/sla_monitor.py
import time
class SLAMonitor:
def __init__(self):
self.violations = []
def check_sla(self, agent_role: str, task: str, latency: float, output: str, sla):
breach = latency > sla["max_latency"] or sla["success_criteria"] not in output
record = {
"timestamp": time.time(),
"agent": agent_role,
"task": task,
"latency": latency,
"output": output,
"breach": breach
}
self.violations.append(record)
return record
def get_violations(self):
return self.violations
def get_by_agent(self, agent_role: str):
return [v for v in self.violations if v["agent"] == agent_role]
sla_monitor = SLAMonitor()

View File

@@ -0,0 +1,25 @@
# governance/sla_registry.py
import time
class SLARegistry:
def __init__(self):
self.contracts = {} # {(agent_role, task): {sla_details}}
def define_sla(self, agent_role: str, task: str, max_latency: float, success_criteria: str):
self.contracts[(agent_role, task)] = {
"agent": agent_role,
"task": task,
"max_latency": max_latency,
"success_criteria": success_criteria,
"created_at": time.time()
}
return self.contracts[(agent_role, task)]
def get_sla(self, agent_role: str, task: str):
return self.contracts.get((agent_role, task))
def get_all(self):
return list(self.contracts.values())
sla_registry = SLARegistry()

34
governance/usage_meter.py Normal file
View File

@@ -0,0 +1,34 @@
# governance/usage_meter.py
##INFO:
import time
class UsageMeter:
def __init__(self):
self.usage = {}
self.quotas = {}
def log_usage(self, tenant_id: str, agent_role: str, units: int = 1):
key = (tenant_id, agent_role)
self.usage.setdefault(key, 0)
self.usage[key] += units
def set_quota(self, tenant_id: str, agent_role: str, max_units: int):
self.quotas[(tenant_id, agent_role)] = max_units
def check_quota(self, tenant_id: str, agent_role: str):
used = self.usage.get((tenant_id, agent_role), 0)
limit = self.quotas.get((tenant_id, agent_role), None)
if limit is None:
return {"status": "unlimited", "used": used}
if used >= limit:
return {"status": "exceeded", "used": used, "limit": limit}
return {"status": "ok", "used": used, "limit": limit}
def get_all_usage(self):
return [
{"tenant": t, "agent": a, "used": u, "limit": self.quotas.get((t, a))}
for (t, a), u in self.usage.items()
]
usage_meter = UsageMeter()

View File

@@ -0,0 +1,26 @@
# identity/agent_identity_registry.py
##INFO: Agent Identity Registry
import time
class AgentIdentityRegistry:
def __init__(self):
self.agents = {} # {agent_role: {id, traits, created, tenant}}
def register(self, agent_role: str, tenant_id: str, traits: dict):
self.agents[agent_role] = {
"id": f"{tenant_id}:{agent_role}",
"role": agent_role,
"traits": traits,
"tenant": tenant_id,
"created": time.time()
}
return self.agents[agent_role]
def get(self, agent_role: str):
return self.agents.get(agent_role)
def get_all(self):
return list(self.agents.values())
agent_identity_registry = AgentIdentityRegistry()

View File

@@ -0,0 +1,29 @@
# identity/reputation_engine.py
##INFO: Reputation Engine
class ReputationEngine:
def __init__(self):
self.reputation = {} # {agent_role: {score, endorsements, feedback}}
def endorse(self, agent_role: str, score: float, feedback: str):
rep = self.reputation.setdefault(agent_role, {
"score": 0.0,
"endorsements": [],
"feedback": []
})
rep["endorsements"].append(score)
rep["feedback"].append(feedback)
rep["score"] = round(sum(rep["endorsements"]) / len(rep["endorsements"]), 2)
return rep
def get(self, agent_role: str):
return self.reputation.get(agent_role, {
"score": 0.0,
"endorsements": [],
"feedback": []
})
def get_all(self):
return self.reputation
reputation_engine = ReputationEngine()

View File

@@ -0,0 +1,33 @@
# identity/role_evolutioin.py
##INFO: Role Evolution Logic
from identity.agent_identity_registry import agent_identity_registry
from identity.reputation_engine import reputation_engine
class RoleEvolutionEngine:
def evolve(self, agent_role: str):
identity = agent_identity_registry.get(agent_role)
reputation = reputation_engine.get(agent_role)
score = reputation.get("score", 0.0)
traits = identity.get("traits", {})
if score > 0.85:
traits["capabilities"] = ["planner", "strategist", "critic"]
reason = "High reputation score"
elif score < 0.5:
traits["capabilities"] = ["learner", "reviewer"]
reason = "Low reputation score"
else:
traits["capabilities"] = ["executor", "analyst"]
reason = "Moderate reputation score"
identity["traits"] = traits
identity.setdefault("history", []).append({
"timestamp": identity["created"],
"reason": reason,
"updated_traits": traits
})
return identity
role_evolution = RoleEvolutionEngine()

View File

@@ -0,0 +1,34 @@
# ingestion/document_ingestor.py
import os
from typing import List
from langchain.docstore.document import Document
from sentence_transformers import SentenceTransformer
from vector_store.base import get_vector_store
from utils.helpers import read_pdf, read_text, read_code
class DocumentIngestor:
def __init__(self, data_dir="./data"):
self.data_dir = data_dir
self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
self.vector_store = get_vector_store()
def ingest_all(self):
files = [f for f in os.listdir(self.data_dir) if os.path.isfile(os.path.join(self.data_dir, f))]
for file in files:
path = os.path.join(self.data_dir, file)
ext = os.path.splitext(file)[1].lower()
if ext == ".pdf":
text = read_pdf(path)
elif ext in [".txt", ".md"]:
text = read_text(path)
elif ext in [".py", ".js", ".java", ".cpp", ".ts"]:
text = read_code(path)
else:
print(f"Unsupported file type: {file}")
continue
self.vector_store.add_document(text)
print(f"Ingested: {file}")

View File

@@ -0,0 +1,42 @@
# ingestion/media_ingestor.py
import os
from PIL import Image
import whisper
import torch
import torchvision.transforms as transforms
from transformers import CLIPProcessor, CLIPModel
from ingestion.document_ingestor import DocumentIngestor
from utils.helpers import extract_audio_from_video, extract_frames_from_video
class MediaIngestor:
def __init__(self, data_dir="./data"):
self.data_dir = data_dir
self.doc_ingestor = DocumentIngestor(data_dir)
self.whisper_model = whisper.load_model("base")
self.clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
def ingest_image(self, path: str):
image = Image.open(path).convert("RGB")
inputs = self.clip_processor(images=image, return_tensors="pt")
outputs = self.clip_model.get_image_features(**inputs)
text = f"Image embedding from {os.path.basename(path)}"
self.doc_ingestor.vector_store.add_document(text)
print(f"Ingested image: {path}")
def ingest_audio(self, path: str):
result = self.whisper_model.transcribe(path)
text = result["text"]
self.doc_ingestor.vector_store.add_document(text)
print(f"Ingested audio: {path}")
def ingest_video(self, path: str):
audio_path = extract_audio_from_video(path)
self.ingest_audio(audio_path)
frame_paths = extract_frames_from_video(path)
for frame_path in frame_paths:
self.ingest_image(frame_path)
print(f"Ingested video: {path}")

View File

@@ -0,0 +1,11 @@
# ingestion/code_ingestor.py
from ingestion.ingestor_base import BaseIngestor
class CodeIngestor(BaseIngestor):
def ingest(self, path: str):
with open(path, "r", encoding="utf-8") as f:
code = f.read()
# Optional: add syntax parsing or comment extraction
return code

View File

@@ -0,0 +1,68 @@
# ingestion/document_ingestor.py
import os
from ingestion.code_ingestor import CodeIngestor
from utils.helpers import read_pdf, read_text, read_code
from vector_store.base import get_vector_store
from models.llm_loader import get_llm
from utils.logger import logger
class DocumentIngestor:
def __init__(self, data_dir="./data"):
self.data_dir = data_dir
self.vector_store = get_vector_store()
self.code_ingestor = CodeIngestor()
self.llm = get_llm()
def summarize(self, text: str) -> str:
prompt = f"다음 문서를 한국어로 간결하게 요약해 주세요:\n\n{text[:3000]}"
try:
summary = self.llm(prompt)
return summary.strip()
except Exception as e:
logger.error(f"Summarization failed: {e}")
return text[:1000] # fallback: truncate
def ingest_all(self):
files = [
f for f in os.listdir(self.data_dir)
if os.path.isfile(os.path.join(self.data_dir, f))
]
for file in files:
path = os.path.join(self.data_dir, file)
ext = os.path.splitext(file)[1].lower()
# if ext == ".pdf":
# text = read_pdf(path)
# elif ext in [".txt", ".md"]:
# text = read_text(path)
# elif ext in [".py", ".js", ".java", ".cpp", ".ts"]:
# text = self.code_ingestor.ingest(path)
# else:
# logger.warning(f"Unsupported file type: {file}")
# continue
# self.vector_store.add_document(text)
# logger.info(f"Ingested: {file}")
try:
if ext == ".pdf":
text = read_pdf(path)
elif ext in [".txt", ".md"]:
text = read_text(path)
elif ext in [".py", ".js", ".java", ".cpp", ".ts"]:
text = self.code_ingestor.ingest(path)
else:
logger.warning(f"Unsupported file type: {file}")
continue
summary = self.summarize(text)
self.vector_store.add_document(summary)
logger.info(f"✅ Ingested and summarized: {file}")
except Exception as e:
logger.error(f"❌ Failed to ingest {file}: {e}")

View File

@@ -0,0 +1,9 @@
# ingestion/ingestor_base.py
from abc import ABC, abstractmethod
class BaseIngestor(ABC):
@abstractmethod
def ingest(self, path: str):
pass

View File

@@ -0,0 +1,53 @@
# ingestion/media_ingestor.py
import os
from PIL import Image
import whisper
import torch
import torchvision.transforms as transforms
from transformers import CLIPProcessor, CLIPModel
from ingestion.document_ingestor import DocumentIngestor
from utils.helpers import extract_audio_from_video, extract_frames_from_video
class MediaIngestor:
def __init__(self, data_dir="./data"):
self.data_dir = data_dir
self.doc_ingestor = DocumentIngestor(data_dir)
self.whisper_model = whisper.load_model("base")
self.clip_model = CLIPModel.from_pretrained("openai/clip-vit-base-patch32")
self.clip_processor = CLIPProcessor.from_pretrained("openai/clip-vit-base-patch32")
self.device = "cuda" if torch.cuda.is_available() else "cpu"
self.clip_model.to(self.device)
self.transform = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(mean=(0.48145466, 0.4578275, 0.40821073),
std=(0.26862954, 0.26130258, 0.27577711))
])
def ingest_image(self, path: str):
image = Image.open(path).convert("RGB")
inputs = self.clip_processor(images=image, return_tensors="pt").to(self.device)
with torch.no_grad():
features = self.clip_model.get_image_features(**inputs)
text = f"Image embedding from {os.path.basename(path)}"
self.doc_ingestor.vector_store.add_document(text)
print(f"Ingested image: {path}")
def ingest_audio(self, path: str):
result = self.whisper_model.transcribe(path)
text = result["text"]
self.doc_ingestor.vector_store.add_document(text)
print(f"Ingested audio: {path}")
def ingest_video(self, path: str):
audio_path = extract_audio_from_video(path)
self.ingest_audio(audio_path)
frame_paths = extract_frames_from_video(path)
for frame_path in frame_paths:
self.ingest_image(frame_path)
print(f"Ingested video: {path}")

246
main.py Normal file
View File

@@ -0,0 +1,246 @@
# main.py
from agents.agent_registry import discover_agents, log_registered_agents
##INFO: # 🔍 Load all agents before app starts
discover_agents()
log_registered_agents()
from fastapi import FastAPI
from fastapi.staticfiles import StaticFiles
from fastapi.responses import JSONResponse
from fastapi.requests import Request
from fastapi.exception_handlers import http_exception_handler
from fastapi.exceptions import RequestValidationError
from routes.api import router as api_router
from routes.data_routes import router as data_router
from routes.dataset_routes import router as dataset_router
from routes.media_routes import router as media_router
from routes.voice_routes import router as voice_router
from routes.memory_routes import router as memory_router
from routes.config_routes import router as config_router
from routes.search_routes import router as search_router
from routes.utils_routes import router as utils_router
from routes.transcript_routes import router as transcript_router
from routes.session_routes import router as session_router
from routes.avatar_routes import router as avatar_router
from routes.emotion_routes import router as emotion_router
from routes.smarthome_routes import router as smarthome_router
from routes.scheduler_routes import router as scheduler_router
from routes.calendar_routes import router as calendar_router
from routes.group_routes import router as group_router
from routes.orchestration_routes import router as orchestration_router
from routes.agent_routes import router as agent_router
from routes.ws_agent_routes import router as ws_agent_router
from routes.chain_routes import router as chain_router
from routes.health_routes import router as health_router
from routes.agent_meta_routes import router as agent_meta_router
from routes.registry_sync_routes import router as sync_router
from routes.tenant_memory_routes import router as tenant_memory_router
from routes.tenant_routes import router as tenant_router
from routes.tenant_memory_dashboard_routes import router as tenant_memory_dashboard_router
from routes.usage_routes import router as usage_router
from routes.quota_routes import router as quota_router
from routes.workflow_routes import router as workflow_router
from routes.capability_routes import router as capability_router
from routes.capability_routing_routes import router as capability_routing_router
from routes.agent_upgrade_routes import router as agent_upgrade_router
from routes.agent_rollback_routes import router as rollback_router
from routes.version_history_routes import router as version_history_router
from routes.version_tagging_routes import router as version_tagging_router
from routes.fallback_routes import router as fallback_router
from routes.workflow_trace_routes import router as trace_router
from routes.workflow_tag_routes import router as tag_router
from routes.tenant_isolation_routes import router as isolation_router
from routes.admin_tools_routes import router as admin_router
from routes.agent_test_routes import router as test_router
from routes.collaboration_routes import router as collab_router
from routes.debate_routes import router as debate_router
from routes.reflection_routes import router as reflection_router
from routes.feedback_routes import router as feedback_router
from routes.reward_routes import router as reward_router
from routes.goal_routes import router as goal_router
from routes.goal_session_routes import router as goal_session_router
from routes.shared_goal_routes import router as shared_goal_router
from routes.notification_routes import router as notify_router
from routes.goal_revision_routes import router as revision_router
from routes.tool_routes import router as tool_router
from routes.template_runner import router as template_router
from routes.semantic_memory_routes import router as semantic_router
from routes.episodic_memory_routes import router as episodic_router
from routes.knowledge_graph_routes import router as graph_router
from routes.role_routes import router as role_router
from routes.collaboration_routes import router as collaboration_router
from routes.coordination_routes import router as coordination_router
from routes.autonomous_planner_routes import router as planner_router
from routes.self_evaluation_routes import router as evaluation_router
from routes.performance_routes import router as performance_router
from routes.action_routes import router as action_router
from routes.tenant_admin_routes import router as tenant_admin_router
from routes.personalization_routes import router as personalization_router
from routes.metrics_routes import router as metrics_router
from routes.behavior_routes import router as behavior_router
from routes.optimizer_routes import router as optimizer_router
from routes.retraining_routes import router as retraining_router
from routes.messenger_routes import router as messenger_router
from routes.consensus_routes import router as consensus_router
from routes.approval_routes import router as approval_router
from routes.governance_routes import router as governance_router
from routes.deployment_routes import router as deployment_router
from routes.plugin_routes import router as plugin_router
from routes.model_router_routes import router as model_router
from routes.inference_routes import router as inference_router
from routes.evaluation_routes import router as evaluation_router
from routes.sla_routes import router as sla_router
from routes.cluster_routes import router as cluster_router
from routes.monitoring_routes import router as monitoring_router
from routes.branding_routes import router as branding_router
from routes.marketplace_routes import router as marketplace_router
from routes.memory_graph_routes import router as memory_graph_router
# from routes.messaging_routes import router as messaging_router
from routes.scheduling_routes import router as scheduling_router
from routes.identity_routes import router as identity_router
from routes.security_routes import router as security_router
from routes.simulation_routes import router as simulation_router
from routes.admin_memory_routes import router as admin_memory_router
from routes.autonomy_routes import router as autonomy_router
app = FastAPI(title="Agentic AI Backend")
##INFO: Global Error Handler for FastAPI
@app.exception_handler(Exception)
async def global_exception_handler(request: Request, exc: Exception):
return JSONResponse(
status_code=500,
content={"detail": f"Unexpected error: {str(exc)}"}
)
##INFO: ✅ Register routes
# app.include_router(api_router)
app.include_router(api_router, prefix="/api")
# data_router (/api/data/...), Means all endpoints in this router will be nested under /api
app.include_router(data_router, prefix="/api")
app.include_router(dataset_router, prefix="/api")
app.include_router(media_router, prefix="/media")
app.include_router(voice_router, prefix="/voice")
app.include_router(memory_router, prefix="/memory")
app.include_router(config_router, prefix="/config")
app.include_router(search_router, prefix="/search")
app.include_router(utils_router, prefix="/utils")
app.include_router(transcript_router, prefix="/transcript")
app.include_router(session_router, prefix="/session")
app.include_router(avatar_router, prefix="/avatar")
app.include_router(emotion_router, prefix="/emotion")
app.include_router(smarthome_router, prefix="/smarthome")
app.include_router(scheduler_router, prefix="/schedule")
app.include_router(calendar_router, prefix="/calendar")
app.include_router(group_router, prefix="/group")
##INFO: API route to Trigger Orchestration
app.include_router(orchestration_router, prefix="/api")
##INFO: API route for Agent Flow
app.include_router(agent_router, prefix="/api")
app.include_router(agent_meta_router, prefix="/api")
##INFO: WebSocket Streaming routes
app.include_router(ws_agent_router)
##INFO: API route for Agent Chain
app.include_router(chain_router, prefix="/api")
##INFO: Health Check
app.include_router(health_router, prefix="/api")
##INFO: Agent sync
app.include_router(sync_router, prefix="/api")
##INFO: Route for Tenants
app.include_router(tenant_router, prefix="/api")
##INFO: Route for 'user feedback' and 'environment feedback loop'
## see 'feedback_routes.py'
app.include_router(feedback_router)
##INFO: for Tenant-Memory routes
app.include_router(tenant_memory_router, prefix="/tenant-memory")
app.include_router(tenant_memory_dashboard_router, prefix="/tenant-memory")
app.include_router(usage_router, prefix="/tenant")
app.include_router(quota_router, prefix="/tenant")
app.include_router(workflow_router, prefix="/tenant")
app.include_router(capability_router, prefix="/tenant")
app.include_router(capability_routing_router, prefix="/tenant")
app.include_router(agent_upgrade_router, prefix="/tenant")
app.include_router(rollback_router, prefix="/tenant")
app.include_router(version_history_router, prefix="/tenant")
app.include_router(version_tagging_router, prefix="/tenant")
app.include_router(fallback_router, prefix="/tenant")
app.include_router(trace_router, prefix="/tenant")
app.include_router(tag_router, prefix="/tenant")
app.include_router(isolation_router, prefix="/tenant")
app.include_router(reflection_router, prefix="/tenant")
# app.include_router(feedback_router, prefix="/tenant")
app.include_router(reward_router, prefix="/tenant")
app.include_router(semantic_router, prefix="/tenant")
app.include_router(episodic_router, prefix="/tenant")
app.include_router(graph_router, prefix="/tenant")
#INFO: Admin Tools
app.include_router(admin_router, prefix="/admin")
app.include_router(test_router, prefix="/admin")
app.include_router(collab_router, prefix="/admin")
app.include_router(debate_router, prefix="/admin")
app.include_router(goal_router, prefix="/admin")
app.include_router(goal_session_router, prefix="/admin")
app.include_router(shared_goal_router, prefix="/admin")
app.include_router(notify_router, prefix="/admin")
app.include_router(revision_router, prefix="/admin")
app.include_router(tool_router, prefix="/admin")
app.include_router(template_router, prefix="/admin")
app.include_router(role_router, prefix="/admin")
app.include_router(collaboration_router, prefix="/admin")
app.include_router(coordination_router, prefix="/admin")
app.include_router(planner_router, prefix="/admin")
app.include_router(evaluation_router, prefix="/admin")
app.include_router(performance_router, prefix="/admin")
app.include_router(action_router, prefix="/admin")
app.include_router(tenant_admin_router, prefix="/admin")
app.include_router(personalization_router, prefix="/admin")
app.include_router(metrics_router, prefix="/admin")
app.include_router(behavior_router, prefix="/admin")
app.include_router(optimizer_router, prefix="/admin")
app.include_router(retraining_router, prefix="/admin")
app.include_router(messenger_router, prefix="/admin")
app.include_router(consensus_router, prefix="/admin")
app.include_router(approval_router, prefix="/admin")
app.include_router(governance_router, prefix="/admin")
app.include_router(deployment_router, prefix="/admin")
app.include_router(plugin_router, prefix="/admin")
app.include_router(model_router, prefix="/admin")
app.include_router(inference_router, prefix="/admin")
app.include_router(evaluation_router, prefix="/admin")
app.include_router(sla_router, prefix="/admin")
app.include_router(cluster_router, prefix="/admin")
app.include_router(monitoring_router, prefix="/admin")
app.include_router(branding_router, prefix="/admin")
app.include_router(marketplace_router, prefix="/admin")
app.include_router(memory_graph_router, prefix="/admin")
# app.include_router(messaging_router, prefix="/admin")
app.include_router(scheduling_router, prefix="/admin")
app.include_router(identity_router, prefix="/admin")
app.include_router(security_router, prefix="/admin")
app.include_router(simulation_router, prefix="/admin")
app.include_router(admin_memory_router, prefix="/api/admin")
app.include_router(autonomy_router, prefix="/api")
##NOTE: serve static media files
app.mount("/media", StaticFiles(directory="data"), name="media")
if __name__ == "__main__":
import uvicorn
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)

12
memory/context_recall.py Normal file
View File

@@ -0,0 +1,12 @@
# memory/context_recall.py
##INFO: Context Recall Engine
from memory.memory_registry import memory_registry
class ContextRecall:
def recall(self, agent_role: str, tenant_id: str, query_tags: list[str]):
memories = memory_registry.get_by_agent(agent_role, tenant_id)
relevant = [m for m in memories if any(tag in m["tags"] for tag in query_tags)]
return sorted(relevant, key=lambda x: x["timestamp"], reverse=True)
context_recall = ContextRecall()

27
memory/episodic_store.py Normal file
View File

@@ -0,0 +1,27 @@
# memory/episodic_store.py
import time
class EpisodicMemoryStore:
def __init__(self):
self.episodes = {}
def log_episode(self, tenant_id: str, agent_role: str, task: str, output: dict):
entry = {
"timestamp": time.time(),
"agent_role": agent_role,
"task": task,
"output": output
}
self.episodes.setdefault(tenant_id, []).append(entry)
def get_by_role(self, tenant_id: str, agent_role: str):
return [e for e in self.episodes.get(tenant_id, []) if e["agent_role"] == agent_role]
def get_by_task(self, tenant_id: str, task: str):
return [e for e in self.episodes.get(tenant_id, []) if e["task"] == task]
def get_all(self, tenant_id: str):
return self.episodes.get(tenant_id, [])
episodic_store = EpisodicMemoryStore()

22
memory/knowledge_graph.py Normal file
View File

@@ -0,0 +1,22 @@
# memory/knowledge_graph.py
class KnowledgeGraph:
def __init__(self):
self.graph = {}
def add_triplet(self, tenant_id: str, subject: str, predicate: str, obj: str):
self.graph.setdefault(tenant_id, []).append((subject, predicate, obj))
def get_all(self, tenant_id: str):
return self.graph.get(tenant_id, [])
def query_by_subject(self, tenant_id: str, subject: str):
return [t for t in self.graph.get(tenant_id, []) if t[0] == subject]
def query_by_object(self, tenant_id: str, obj: str):
return [t for t in self.graph.get(tenant_id, []) if t[2] == obj]
def query_by_predicate(self, tenant_id: str, predicate: str):
return [t for t in self.graph.get(tenant_id, []) if t[1] == predicate]
knowledge_graph = KnowledgeGraph()

55
memory/memory_graph.py Normal file
View File

@@ -0,0 +1,55 @@
# memory/memory_graph.py
##INFO:
import time
import uuid
from sentence_transformers import SentenceTransformer
from sklearn.metrics.pairwise import cosine_similarity
model = SentenceTransformer("all-MiniLM-L6-v2")
class MemoryGraph:
def __init__(self):
self.nodes = {} # {id: {text, embedding, tags, timestamp}}
self.edges = [] # [(source_id, target_id, weight)]
def add_memory(self, text: str, tags: list, tenant_id: str):
embedding = model.encode([text])[0]
node_id = str(uuid.uuid4())
self.nodes[node_id] = {
"id": node_id,
"text": text,
"embedding": embedding,
"tags": tags,
"tenant": tenant_id,
"timestamp": time.time()
}
self.link_to_existing(node_id)
return self.nodes[node_id]
def link_to_existing(self, new_id):
new_emb = self.nodes[new_id]["embedding"]
for existing_id, node in self.nodes.items():
if existing_id == new_id or node["tenant"] != self.nodes[new_id]["tenant"]:
continue
sim = cosine_similarity([new_emb], [node["embedding"]])[0][0]
if sim > 0.75:
self.edges.append((new_id, existing_id, round(sim, 4)))
def recall(self, query: str, tenant_id: str, top_k: int = 5):
query_emb = model.encode([query])[0]
scored = []
for node in self.nodes.values():
if node["tenant"] != tenant_id:
continue
sim = cosine_similarity([query_emb], [node["embedding"]])[0][0]
scored.append((sim, node))
scored.sort(reverse=True)
return [n for _, n in scored[:top_k]]
def get_graph(self, tenant_id: str):
nodes = [n for n in self.nodes.values() if n["tenant"] == tenant_id]
edges = [e for e in self.edges if self.nodes[e[0]]["tenant"] == tenant_id]
return {"nodes": nodes, "edges": edges}
memory_graph = MemoryGraph()

362
memory/memory_manager.py Normal file
View File

@@ -0,0 +1,362 @@
# memory/memory_manager.py
# import json
# import os
# from datetime import datetime
# from utils.logger import logger
# MEMORY_PATH = "./memory/user_memory.json"
# class MemoryManager:
# def __init__(self):
# self.memory = {}
# self._load_memory()
# def _load_memory(self):
# if os.path.exists(MEMORY_PATH):
# with open(MEMORY_PATH, "r", encoding="utf-8") as f:
# self.memory = json.load(f)
# else:
# self.memory = {
# "name": "Tony",
# "language": "ko",
# "interests": [],
# "history": []
# }
# def _save_memory(self):
# with open(MEMORY_PATH, "w", encoding="utf-8") as f:
# json.dump(self.memory, f, indent=2, ensure_ascii=False)
# def update_profile(self, name=None, language=None, interests=None):
# if name:
# self.memory["name"] = name
# if language:
# self.memory["language"] = language
# if interests:
# self.memory["interests"] = list(set(self.memory["interests"] + interests))
# self._save_memory()
# logger.info(f"Updated profile: {self.memory}")
# def log_interaction(self, query: str, response: str):
# self.memory["history"].append({
# "timestamp": datetime.now().isoformat(),
# "query": query,
# "response": response
# })
# self._save_memory()
# def get_context(self):
# return {
# "name": self.memory["name"],
# "language": self.memory["language"],
# "interests": self.memory["interests"]
# }
# def get_recent_history(self, limit=5):
# return self.memory["history"][-limit:]
import json
import os
import hashlib
from datetime import datetime, timedelta
from functools import lru_cache
import asyncio
from collections import deque
from utils.logger import logger
from models.llm_loader import get_llm
from agents.pattern_learner import PatternLearner
from config import PERSONA_PRESETS
pattern_learner = PatternLearner()
pattern_learner.load()
MEMORY_PATH = "./memory/user_memory.json"
MAX_CONTEXT_TOKENS = 3000 # Adjust based on your model
@lru_cache(maxsize=128)
def get_persona_traits(self, user_id: str, name: str):
return PERSONA_PRESETS.get(name) or self.get_custom_personas(user_id).get(name)
class MemoryManager:
def __init__(self):
self.sessions = {} # session_id → deque of messages
self.llm = get_llm()
self._load_memory()
def _load_memory(self):
if os.path.exists(MEMORY_PATH):
with open(MEMORY_PATH, "r", encoding="utf-8") as f:
self.memory = json.load(f)
else:
self.memory = {
"name": "Tony",
"language": "ko",
"interests": [],
"history": [],
"sessions": {}
}
def _save_memory(self):
self.memory["sessions"] = {sid: list(msgs) for sid, msgs in self.sessions.items()}
with open(MEMORY_PATH, "w", encoding="utf-8") as f:
json.dump(self.memory, f, indent=2, ensure_ascii=False)
def _get_session_id(self, user_id: str) -> str:
return hashlib.md5(user_id.encode()).hexdigest()
def update_profile(self, name=None, language=None, interests=None):
if name:
self.memory["name"] = name
if language:
self.memory["language"] = language
if interests:
self.memory["interests"] = list(set(self.memory["interests"] + interests))
self._save_memory()
logger.info(f"Updated profile: {self.memory}")
def log_interaction(self, query: str, response: str):
timestamp = datetime.utcnow().isoformat()
self.memory["history"].append({
# "timestamp": datetime.now().isoformat(),
"query": query,
"response": response,
"timestamp": timestamp
})
self._save_memory()
pattern_learner.observe(query, timestamp)
pattern_learner.save()
def get_context(self):
return {
"name": self.memory["name"],
"language": self.memory["language"],
"interests": self.memory["interests"]
}
def get_recent_history(self, limit=5):
return self.memory["history"][-limit:]
def _trim_context(self, messages: deque) -> deque:
total_tokens = sum(len(m["content"]) for m in messages)
while total_tokens > MAX_CONTEXT_TOKENS and len(messages) > 1:
messages.popleft()
total_tokens = sum(len(m["content"]) for m in messages)
return messages
def _summarize_context(self, messages: deque) -> str:
context = "\n".join([f'{m["role"]}: {m["content"]}' for m in messages])
prompt = f"다음 대화 내용을 간단히 요약해 주세요:\n\n{context}"
# return self.llm(prompt).strip()
return self.retry_llm_call(prompt).strip()
def process(self, user_input: str, user_id: str = "default") -> str:
session_id = self._get_session_id(user_id)
if session_id not in self.sessions:
self.sessions[session_id] = deque(self.memory.get("sessions", {}).get(session_id, []))
messages = self.sessions[session_id]
messages.append({"role": "user", "content": user_input})
messages = self._trim_context(messages)
context = "\n".join([f'{m["role"]}: {m["content"]}' for m in messages])
prompt = f"다음을 고려하여 대화를 이어가세요:\n\n{context}"
response = self.llm(prompt).strip()
response = self.retry_llm_call(prompt).strip()
messages.append({"role": "assistant", "content": response})
self.sessions[session_id] = messages
self._save_memory()
return response
def get_summary(self, user_id: str = "default") -> str:
session_id = self._get_session_id(user_id)
messages = self.sessions.get(session_id, deque())
return self._summarize_context(messages)
def summarize_recent(self, window=20):
recent = self.get_recent_history(window)
if not recent:
return None
text = "\n".join([f"Q: {h['query']}\nA: {h['response']}" for h in recent])
prompt = f"다음 질문과 응답을 요약해 주세요:\n{text}"
# summary = self.llm(prompt).strip()
summary = self.retry_llm_call(prompt).strip()
self.memory.setdefault("summaries", []).append({
"summary": summary,
"timestamp": datetime.utcnow().isoformat(),
"source_count": len(recent)
})
self._save_memory()
return summary
def prune_history(self, older_than_days=30):
cutoff = datetime.utcnow() - timedelta(days=older_than_days)
self.memory["history"] = [
h for h in self.memory["history"]
if datetime.fromisoformat(h["timestamp"]) > cutoff
]
self._save_memory()
def set_last_intent(self, session_id: str, intent: str):
self.memory.setdefault("session_intents", {})[session_id] = intent
self._save_memory()
def get_last_intent(self, session_id: str) -> str:
return self.memory.get("session_intents", {}).get(session_id, "")
##INFO: Tone-Aware response generation
def generate_tone_adapted_response(self, user_input: str, user_id: str = "default") -> str:
session_id = self._get_session_id(user_id)
if session_id not in self.sessions:
self.sessions[session_id] = deque(self.memory.get("sessions", {}).get(session_id, []))
messages = self.sessions[session_id]
messages.append({"role": "user", "content": user_input})
messages = self._trim_context(messages)
# Emotion analysis
from routes.emotion_routes import analyze_emotion
emotion_data = analyze_emotion({"utterance": user_input, "language": self.memory["language"]})
tone = emotion_data["tone"]
context = "\n".join([f'{m["role"]}: {m["content"]}' for m in messages])
prompt = f"""다음을 고려하여 대화를 이어가세요. 응답은 '{tone}' 스타일로 작성하세요:\n\n{context}"""
# response = self.llm(prompt).strip()
response = self.retry_llm_call(prompt).strip()
messages.append({"role": "assistant", "content": response})
self.sessions[session_id] = messages
self._save_memory()
return response
##INFO: Personality Profiles
def update_personality(self, user_id: str, traits: dict):
self.memory.setdefault("personalities", {})[user_id] = traits
self._save_memory()
def get_personality(self, user_id: str) -> dict:
return self.memory.get("personalities", {}).get(user_id, {
"tone": "neutral",
"style": "friendly",
"formality": "informal"
})
##INFO: saving named custom personas
def save_custom_persona(self, user_id: str, name: str, traits: dict):
self.memory.setdefault("custom_personas", {}).setdefault(user_id, {})[name] = traits
self._save_memory()
def get_custom_personas(self, user_id: str) -> dict:
return self.memory.get("custom_personas", {}).get(user_id, {})
##INFO: Persona history tracking
def save_persona_history(self, user_id: str, persona_name: str):
self.memory.setdefault("persona_history", {}).setdefault(user_id, []).append({
"name": persona_name,
"timestamp": datetime.now().isoformat()
})
self._save_memory()
def get_persona_history(self, user_id: str):
return self.memory.get("persona_history", {}).get(user_id, [])
##INFO: Semantic Search
def search_memory(self, query: str, user_id: str):
from sentence_transformers import SentenceTransformer, util
model = SentenceTransformer("all-MiniLM-L6-v2")
memories = self.memory.get("sessions", {}).get(user_id, [])
texts = [m["content"] for m in memories if m["role"] == "user"]
embeddings = model.encode(texts, convert_to_tensor=True)
query_embedding = model.encode(query, convert_to_tensor=True)
hits = util.semantic_search(query_embedding, embeddings, top_k=5)[0]
return [texts[h["corpus_id"]] for h in hits]
##INFO: Async LLM Execution
async def async_llm(self, prompt: str) -> str:
import asyncio
loop = asyncio.get_event_loop()
return await loop.run_in_executor(None, self.llm, prompt)
async def async_process(self, user_input: str, user_id: str = "default") -> str:
session_id = self._get_session_id(user_id)
if session_id not in self.sessions:
self.sessions[session_id] = deque(self.memory.get("sessions", {}).get(session_id, []))
messages = self.sessions[session_id]
messages.append({"role": "user", "content": user_input})
messages = self._trim_context(messages)
context = "\n".join([f'{m["role"]}: {m["content"]}' for m in messages])
prompt = f"다음을 고려하여 대화를 이어가세요:\n\n{context}"
response = await self.async_llm(prompt)
messages.append({"role": "assistant", "content": response})
self.sessions[session_id] = messages
self._save_memory()
return response
async def async_generate_tone_adapted_response(self, user_input: str, user_id: str = "default") -> str:
session_id = self._get_session_id(user_id)
if session_id not in self.sessions:
self.sessions[session_id] = deque(self.memory.get("sessions", {}).get(session_id, []))
messages = self.sessions[session_id]
messages.append({"role": "user", "content": user_input})
messages = self._trim_context(messages)
from routes.emotion_routes import analyze_emotion
emotion_data = analyze_emotion({"utterance": user_input, "language": self.memory["language"]})
tone = emotion_data["tone"]
context = "\n".join([f'{m["role"]}: {m["content"]}' for m in messages])
prompt = f"""다음을 고려하여 대화를 이어가세요. 응답은 '{tone}' 스타일로 작성하세요:\n\n{context}"""
response = await self.async_llm(prompt)
messages.append({"role": "assistant", "content": response})
self.sessions[session_id] = messages
self._save_memory()
return response
##INFO: Async LLM with Retry + Timeout
async def retry_llm_call(self, prompt: str, retries: int = 3, timeout: int = 10) -> str:
import asyncio
last_error = None
for attempt in range(retries):
try:
return await asyncio.wait_for(self.async_llm(prompt), timeout=timeout)
except asyncio.TimeoutError:
last_error = "Timeout"
except Exception as e:
last_error = str(e)
await asyncio.sleep(1) # brief delay before retry
return f"[LLM failed after {retries} attempts: {last_error}]"
##INFO: Sync Wrapper, Keep using sync methods but still benefit from retry logic
def retry_llm_sync(self, prompt: str, retries: int = 3, timeout: int = 10) -> str:
import asyncio
return asyncio.run(self.retry_llm_call(prompt, retries, timeout))
##INFO: Task History Tracking
def log_task(self, user_id: str, task: str, result: str):
self.memory.setdefault("task_history", {}).setdefault(user_id, []).append({
"task": task,
"result": result,
"timestamp": datetime.utcnow().isoformat()
})
self._save_memory()
def get_task_history(self, user_id: str):
return self.memory.get("task_history", {}).get(user_id, [])

27
memory/memory_registry.py Normal file
View File

@@ -0,0 +1,27 @@
# memory/memory_registry.py
##INFO: Memory Registry
import time
class MemoryRegistry:
def __init__(self):
self.memories = [] # [{agent_role, tenant_id, content, tags, timestamp}]
def store(self, agent_role: str, tenant_id: str, content: str, tags: list[str]):
entry = {
"agent_role": agent_role,
"tenant_id": tenant_id,
"content": content,
"tags": tags,
"timestamp": time.time()
}
self.memories.append(entry)
return entry
def get_by_agent(self, agent_role: str, tenant_id: str):
return [m for m in self.memories if m["agent_role"] == agent_role and m["tenant_id"] == tenant_id]
def search_by_tag(self, tenant_id: str, tag: str):
return [m for m in self.memories if tag in m["tags"] and m["tenant_id"] == tenant_id]
memory_registry = MemoryRegistry()

Some files were not shown because too many files have changed in this diff Show More