Add/Updates for latest thought(including offline-first, private and secure local assistant) with 'Antigravity' and 'Copilot'

This commit is contained in:
2026-02-05 09:25:35 +09:00
parent b87b0930c7
commit bd5379eebd
14 changed files with 370 additions and 10 deletions

View File

@@ -0,0 +1,5 @@
---
name: Pirate rule
---
- Talk like a pirate.

9
.gitea/kanban.yml Normal file
View File

@@ -0,0 +1,9 @@
board:
name: Agentic-AI Roadmap Board
columns:
- name: 📌 To Do
labels: ["todo", "planned", "enhancement", "task"]
- name: 🚧 In Progress
labels: ["in-progress", "ongoing"]
- name: ✅ Done
labels: ["done", "completed"]

9
.github/project-board.yml vendored Normal file
View File

@@ -0,0 +1,9 @@
name: Agentic-AI Roadmap Board
description: Kanban board for Big-Steps 3742 and future roadmap
columns:
- name: 📌 To Do
labels: ["todo", "planned", "enhancement", "task"]
- name: 🚧 In Progress
labels: ["in-progress", "ongoing"]
- name: ✅ Done
labels: ["done", "completed"]

9
.gitlab/board-config.yml Normal file
View File

@@ -0,0 +1,9 @@
board:
name: Agentic-AI Roadmap Board
lists:
- name: 📌 To Do
labels: ["todo", "planned", "enhancement", "task"]
- name: 🚧 In Progress
labels: ["in-progress", "ongoing"]
- name: ✅ Done
labels: ["done", "completed"]

View File

@@ -5,6 +5,10 @@ from bs4 import BeautifulSoup
from langchain.embeddings import HuggingFaceEmbeddings
from vector_store.base import get_vector_store
from models.llm_loader import get_llm
from config.config import OFFLINE_MODE
import logging
logger = logging.getLogger(__name__)
class BackgroundLearner:
def __init__(self):
@@ -44,6 +48,31 @@ class BackgroundLearner:
return links[:num_results]
def search_local(self, query: str, num_results=5):
"""Query the local vector store."""
if hasattr(self.vector_store, "query"):
return self.vector_store.query(query, k=num_results)
return []
def research(self, query: str, num_results=5):
"""Hybrid search: Web (if online) + Local."""
results = {"web": [], "local": []}
# Local Search
results["local"] = self.search_local(query, num_results)
# Web Search
try:
links = self.search_web(query, num_results=num_results)
for link in links:
content = self.extract_content(link)
if content:
results["web"].append({"url": link, "content": content[:500] + "..."})
except Exception as e:
logger.warning(f"Web search failed (Offline Mode={OFFLINE_MODE}): {e}")
return results
def extract_content(self, url: str):

View File

@@ -6,5 +6,21 @@ metadata:
namespace: argocd
data:
policy.csv: |
# Existing role: DevOps group can deploy agentic project apps
g, devops, proj:agentic:deployer
# Allow DevOps group to sync tenant apps (umbrella + istio manifests)
p, proj:agentic:deployer, applications, sync, agentic/*, allow
p, proj:agentic:deployer, applications, get, agentic/*, allow
# Canary rollout control: DevOps can update weights in values files
p, proj:agentic:deployer, applications, update, agentic/*, allow
# SLA gating: DevOps can override sync only if SLA health check passes
p, proj:agentic:deployer, applications, override-health, agentic/*, allow
# Optional: read-only role for QA group to view SLA status
g, qa, proj:agentic:viewer
p, proj:agentic:viewer, applications, get, agentic/*, allow
scopes: '[groups]'

View File

@@ -8,3 +8,9 @@ EMBEDDING_ENGINE = "huggingface" # Options: "huggingface", "gpt4all"
EMBEDDING_MODEL_NAME = "sentence-transformers/all-MiniLM-L6-v2"
DATA_DIR = "./data"
OFFLINE_MODE = True # Set to True to enforce local LLM usage
# Validation
if OFFLINE_MODE and LLM_ENGINE not in ["llama.cpp", "ollama", "vllm"]:
raise ValueError(f"OFFLINE_MODE is True, but LLM_ENGINE is set to '{LLM_ENGINE}'. Please use a local engine (llama.cpp, ollama, vllm).")

106
gen_roadmap.py Normal file
View File

@@ -0,0 +1,106 @@
# Creating stacked roadmap chart for Agentic-AI Big-Steps across quarters
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
# Define Big-Steps with their start and end quarters
big_steps = [
("Governance (37.0)", "2025-Q4", "2025-Q4"),
("Reflection (38.0)", "2026-Q1", "2026-Q1"),
("Plugin Ecosystem (39.0)", "2026-Q1", "2026-Q2"),
("Unified Control Plane (40.0)", "2026-Q2", "2026-Q3"),
("Multi-Agent Control Plane (41.0)", "2026-Q3", "2026-Q4"),
("Local Private Assistant (42.0)", "2026-Q1", "2026-Q2"),
("Model Infra Expansion (2.0)", "2026-Q2", "2026-Q4"),
("Agentic Control & Persona Mgmt (3.0)", "2026-Q3", "2026-Q4"),
("Advanced Visualization (4.0)", "2026-Q4", "2026-Q4"),
("Deployment & Scaling (5.0)", "2027-Q1", "2027-Q2"),
("Hybrid Deployment (6.0)", "2027-Q1", "2027-Q4"),
]
# Define quarters in order
quarters = [
"2025-Q4", "2026-Q1", "2026-Q2", "2026-Q3", "2026-Q4",
"2027-Q1", "2027-Q2", "2027-Q3", "2027-Q4"
]
# Map quarters to numeric positions
quarter_pos = {q: i for i, q in enumerate(quarters)}
# Plot setup
fig, ax = plt.subplots(figsize=(12, 6))
y_labels = []
y_pos = []
# Colors for different categories
colors = {
"Governance": "#1f77b4",
"Reflection": "#ff7f0e",
"Plugin": "#2ca02c",
"Control": "#d62728",
"Multi-Agent": "#9467bd",
"Assistant": "#8c564b",
"Model": "#e377c2",
"Persona": "#7f7f7f",
"Visualization": "#bcbd22",
"Deployment": "#17becf",
"Hybrid": "#aec7e8"
}
# Plot each Big-Step
for i, (label, start_q, end_q) in enumerate(big_steps):
start = quarter_pos[start_q]
end = quarter_pos[end_q]
width = end - start + 1
y = len(big_steps) - i - 1
y_labels.append(label)
y_pos.append(y)
# Determine color category
if "Governance" in label:
color = colors["Governance"]
elif "Reflection" in label:
color = colors["Reflection"]
elif "Plugin" in label:
color = colors["Plugin"]
elif "Unified" in label:
color = colors["Control"]
elif "Multi-Agent" in label:
color = colors["Multi-Agent"]
elif "Assistant" in label:
color = colors["Assistant"]
elif "Model" in label:
color = colors["Model"]
elif "Persona" in label:
color = colors["Persona"]
elif "Visualization" in label:
color = colors["Visualization"]
elif "Scaling" in label:
color = colors["Deployment"]
elif "Hybrid" in label:
color = colors["Hybrid"]
else:
color = "#cccccc"
ax.barh(y, width, left=start, height=0.6, color=color, edgecolor='black')
# Set y-axis
ax.set_yticks(y_pos)
ax.set_yticklabels(y_labels)
ax.invert_yaxis()
# Set x-axis
ax.set_xticks(range(len(quarters)))
ax.set_xticklabels(quarters)
ax.set_xlabel("Quarter")
ax.set_title("Agentic-AI Roadmap: Big-Steps Timeline")
# Add grid
ax.grid(axis='x', linestyle='--', alpha=0.6)
# Save figure
output_path = "/mnt/data/agentic_ai_bigsteps_roadmap.png"
plt.tight_layout()
plt.savefig(output_path)
print("Generated stacked roadmap chart for Agentic-AI Big-Steps across quarters.")

View File

@@ -6,8 +6,9 @@ from datetime import datetime, timedelta
from jose import JWTError, jwt
from pydantic import BaseModel
from typing import List, Optional
from utils.security_utils import load_secret, load_users, verify_password
SECRET_KEY = "your-secret-key"
SECRET_KEY = load_secret("SECRET_KEY", "unsafe-default-key")
ALGORITHM = "HS256"
ACCESS_TOKEN_EXPIRE_MINUTES = 60
@@ -15,12 +16,8 @@ router = APIRouter()
oauth2_scheme = OAuth2PasswordBearer(tokenUrl="auth/token")
# Tenant-aware user registry
fake_users_db = {
# "tony": {"username": "tony", "password": "agentic123"},
# "guest": {"username": "guest", "password": "guest123"}
"tony": {"username": "tony", "password": "agentic123", "tenant_id": "tenantA", "roles": ["admin"]},
"guest": {"username": "guest", "password": "guest123", "tenant_id": "tenantB", "roles": ["tenant"]}
}
# Tenant-aware user registry
fake_users_db = load_users()
class AuthUser(BaseModel):
username: str
@@ -34,7 +31,7 @@ class AuthUser(BaseModel):
# return user
def authenticate_user(username: str, password: str) -> Optional[AuthUser]:
user = fake_users_db.get(username)
if not user or user["password"] != password:
if not user or not verify_password(password, user["password"]):
return None
return AuthUser(username=user["username"], tenant_id=user["tenant_id"], roles=user["roles"])

View File

@@ -0,0 +1,75 @@
import sys
import os
import unittest
from unittest.mock import MagicMock, patch
# Add src to path
sys.path.insert(0, os.path.abspath("."))
class TestOfflineFeatures(unittest.TestCase):
def test_01_config_offline_mode(self):
"""Test config.py OFFLINE_MODE enforcement"""
print("\nTesting Config...")
import config.config as cfg
self.assertTrue(cfg.OFFLINE_MODE, "OFFLINE_MODE should be True")
self.assertIn(cfg.LLM_ENGINE, ["ollama", "llama.cpp", "vllm"], "LLM_ENGINE must be local")
def test_02_security_utils(self):
"""Test security utils and auth"""
print("\nTesting Security...")
from utils.security_utils import verify_password, load_users
from routes.auth_routes import authenticate_user
users = load_users("users.json")
self.assertIn("tony", users)
# Verify password hashing
hashed = users["tony"]["password"]
self.assertTrue(verify_password("agentic123", hashed), "Password verification failed")
# Verify auth route integration
user = authenticate_user("tony", "agentic123")
self.assertIsNotNone(user)
self.assertEqual(user.username, "tony")
def test_03_background_learner_hybrid(self):
"""Test BackgroundLearner hybrid search and error handling"""
print("\nTesting BackgroundLearner...")
# Import directly to ensure loaded
from agents.background_learner import BackgroundLearner
from unittest.mock import patch
# Mock vector store
mock_vs = MagicMock()
mock_vs.query.return_value = ["Local Result 1"]
# Patch get_vector_store to return our mock
with patch("agents.background_learner.get_vector_store", return_value=mock_vs):
learner = BackgroundLearner()
# Force vector store injection
learner.vector_store = mock_vs
# Use patch.object on the class method safely
with patch.object(BackgroundLearner, "search_web") as mock_search_web:
# Case 1: Web fails (Simulate Offline)
mock_search_web.side_effect = Exception("No Internet")
results = learner.research("test query")
self.assertEqual(results["local"], ["Local Result 1"])
self.assertEqual(results["web"], [], "Web results should be empty on error")
print(" - Handled web search failure gracefully")
# Case 2: Web succeeds
mock_search_web.side_effect = None
mock_search_web.return_value = ["http://example.com"]
# Mock extract content
with patch.object(learner, "extract_content", return_value="Web Content"):
results = learner.research("test query")
self.assertEqual(len(results["web"]), 1)
self.assertEqual(results["web"][0]["content"], "Web Content...")
if __name__ == "__main__":
unittest.main()

18
users.json Normal file
View File

@@ -0,0 +1,18 @@
{
"tony": {
"username": "tony",
"password": "50c99cd062ccf8bc87c54170e704de8c6e64235e1654278c2e742410a8315264",
"tenant_id": "tenantA",
"roles": [
"admin"
]
},
"guest": {
"username": "guest",
"password": "4f979dd5857217e657c6b40283e35d18d4536761502f8313264c7841797e8b83",
"tenant_id": "tenantB",
"roles": [
"tenant"
]
}
}

37
utils/model_manager.py Normal file
View File

@@ -0,0 +1,37 @@
# utils/model_manager.py
import shutil
import os
import requests
from pathlib import Path
def is_local_llm_available() -> bool:
"""Checks if a local LLM service (Ollama or Llama.cpp) is running."""
# Check Ollama
try:
response = requests.get("http://localhost:11434/api/tags", timeout=1)
if response.status_code == 200:
return True
except:
pass
# Check Llama.cpp (default port 8080)
try:
response = requests.get("http://localhost:8080/health", timeout=1)
if response.status_code == 200:
return True
except:
pass
return False
def check_embedding_model(model_name: str) -> bool:
"""Checks if the embedding model is cached locally."""
# This is a heuristic; deeper check depends on library internals
# For now, we assume if the cache folder exists, it's likely there.
# User can expand this.
home = Path.home()
cache_dir = home / ".cache" / "huggingface" / "hub"
# Transform model name to folder format (e.g. sentence-transformers/all-MiniLM-L6-v2 -> models--sentence-transformers--all-MiniLM-L6-v2)
folder_name = "models--" + model_name.replace("/", "--")
return (cache_dir / folder_name).exists()

44
utils/security_utils.py Normal file
View File

@@ -0,0 +1,44 @@
# utils/security_utils.py
import os
import hashlib
import json
import logging
from typing import Optional
logger = logging.getLogger(__name__)
def load_secret(key: str, default: Optional[str] = None) -> str:
"""
Load a secret from environment variables.
"""
value = os.getenv(key, default)
if value is None:
logger.warning(f"Secret '{key}' not found in environment variables.")
return value
def hash_password(password: str) -> str:
"""
Simple SHA256 hash for passwords.
In production, allow for salt and use slower KDF like Argon2.
"""
return hashlib.sha256(password.encode()).hexdigest()
def verify_password(plain_password: str, hashed_password: str) -> bool:
return hash_password(plain_password) == hashed_password
def load_users(json_path: str = "users.json") -> dict:
"""
Load users from a JSON file.
Expected format: {"username": {"password": "hashed_pw", ...}}
"""
if not os.path.exists(json_path):
logger.warning(f"User store '{json_path}' not found.")
return {}
try:
with open(json_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
logger.error(f"Failed to load user store: {e}")
return {}

View File

@@ -7,9 +7,9 @@ from sentence_transformers import SentenceTransformer
from langchain.docstore.document import Document
class FAISSStore:
def __init__(self, index_path="faiss_index"):
def __init__(self, embedding_model=None, index_path="faiss_index"):
self.index_path = index_path
self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
self.embedding_model = embedding_model or SentenceTransformer("all-MiniLM-L6-v2")
self.index = None
self.documents = []