- NatirisMaster.py aktualisiert - NaturalLanguageEngine optimiert - PsychologyEngine & Arousal-Engine - WebUI (FastAPI) mit Chat-API - Bridges: ComfyUI, Ollama, Vision - Admin-Auth System - .gitignore hinzugefügt (checkpoints, logs, generated)
280 lines
9.8 KiB
Python
Executable File
280 lines
9.8 KiB
Python
Executable File
#!/usr/bin/env python3
|
||
"""
|
||
Natiris Master Orchestrator – Vollständige System-Integration
|
||
Verknüpft alle Komponenten: Core, ComfyUI, Vision, NaturalLanguage
|
||
|
||
Workflow:
|
||
1. User Input → State-Update
|
||
2. State → ComfyBridge (Bild-Generierung)
|
||
3. Bild → VisionBridge (Analyse)
|
||
4. Vision → Core-State-Update
|
||
5. State → NaturalLanguage (Response-Generation)
|
||
6. Response + Bild → User
|
||
|
||
Author: Recovery-Agent
|
||
Phase: FINAL INTEGRATION
|
||
"""
|
||
|
||
import json
|
||
import os
|
||
import subprocess
|
||
from datetime import datetime, timezone
|
||
from pathlib import Path
|
||
|
||
PROJECT_ROOT = os.path.expanduser("~/natiris")
|
||
PATHS = {
|
||
"state": os.path.join(PROJECT_ROOT, "core/natiris_full_state.json"),
|
||
"comfy_script": os.path.join(PROJECT_ROOT, "bridges/ComfyBridge.py"),
|
||
"vision_script": os.path.join(PROJECT_ROOT, "bridges/VisionBridge_v2.py"),
|
||
"natural_script": os.path.join(PROJECT_ROOT, "core/NaturalLanguageEngine.py"),
|
||
"generated_dir": os.path.join(PROJECT_ROOT, "generated"),
|
||
}
|
||
|
||
class NatirisMaster:
|
||
"""Haupt-Orchestrator für das vollständige Natiris-System"""
|
||
|
||
def __init__(self):
|
||
self.state = self._load_state()
|
||
self.last_image = None
|
||
|
||
def _load_state(self):
|
||
"""Lädt aktuellen State"""
|
||
if os.path.exists(PATHS["state"]):
|
||
with open(PATHS["state"]) as f:
|
||
return json.load(f)
|
||
return {"core_state": {}, "modules": {}}
|
||
|
||
def _save_state(self):
|
||
"""Speichert State"""
|
||
with open(PATHS["state"], "w") as f:
|
||
json.dump(self.state, f, indent=2)
|
||
|
||
def update_state_from_input(self, user_input):
|
||
"""Aktualisiert State basierend auf User-Input"""
|
||
core = self.state.get("core_state", {})
|
||
|
||
# Einfache Sentiment-Analyse
|
||
positive_words = ["lieb", "schön", "gut", "danke", "vermiss", "nah", "warm", ".trust"]
|
||
negative_words = ["weck", "distanz", "kalt", "weg", "ignore"]
|
||
|
||
text_lower = user_input.lower()
|
||
|
||
# Trust-Updates
|
||
if any(w in text_lower for w in ["vermiss", "lieb", "nah", "intim", "nah"]):
|
||
core["trust"] = min(10, core.get("trust", 7) + 0.5)
|
||
core["mood"] = min(10, core.get("mood", 5) + 1)
|
||
elif any(w in text_lower for w in ["weck", "kalt", "distanz"]):
|
||
core["trust"] = max(0, core.get("trust", 7) - 0.3)
|
||
core["mood"] = max(0, core.get("mood", 5) - 0.5)
|
||
|
||
# Stimmungs-Keywords
|
||
if any(w in text_lower for w in ["glücklich", "freu", "lach"]):
|
||
core["mood"] = min(10, core.get("mood", 5) + 0.8)
|
||
elif any(w in text_lower for w in ["traurig", "wein", "schade"]):
|
||
core["mood"] = max(0, core.get("mood", 5) - 0.8)
|
||
|
||
self.state["core_state"] = core
|
||
self._save_state()
|
||
return core
|
||
|
||
def generate_image(self):
|
||
"""Generiert Bild über ComfyBridge"""
|
||
print("🎨 Generating image...")
|
||
|
||
try:
|
||
result = subprocess.run(
|
||
["python3", PATHS["comfy_script"]],
|
||
capture_output=True,
|
||
text=True,
|
||
timeout=120
|
||
)
|
||
|
||
# Finde neuestes Bild
|
||
gen_dir = Path(PATHS["generated_dir"])
|
||
images = sorted(gen_dir.glob("natiris_*.png"), key=lambda x: x.stat().st_mtime, reverse=True)
|
||
|
||
if images:
|
||
self.last_image = str(images[0])
|
||
print(f"✅ Image generated: {images[0].name}")
|
||
return {"success": True, "path": self.last_image}
|
||
|
||
return {"success": False, "error": "No image found"}
|
||
|
||
except Exception as e:
|
||
return {"success": False, "error": str(e)}
|
||
|
||
def analyze_image(self, image_path=None):
|
||
"""Analysiert Bild über VisionBridge"""
|
||
if not image_path:
|
||
image_path = self.last_image
|
||
|
||
if not image_path or not os.path.exists(image_path):
|
||
print("⚠ No image to analyze")
|
||
return None
|
||
|
||
print("👁 Analyzing image...")
|
||
|
||
try:
|
||
result = subprocess.run(
|
||
["python3", PATHS["vision_script"], "--image", image_path, "--no-update-core"],
|
||
capture_output=True,
|
||
text=True,
|
||
timeout=60
|
||
)
|
||
|
||
# Vision-Daten extrahieren
|
||
vision_output = os.path.join(PROJECT_ROOT, "bridges/vision_analysis.json")
|
||
if os.path.exists(vision_output):
|
||
with open(vision_output) as f:
|
||
analysis = json.load(f)
|
||
|
||
# Update Core mit Vision-Daten
|
||
if analysis.get("success"):
|
||
scores = analysis.get("parsed_scores", {})
|
||
core = self.state.get("core_state", {})
|
||
|
||
if "mood_delta" in scores:
|
||
core["mood"] = max(0, min(10, core.get("mood", 5) + scores["mood_delta"]))
|
||
if "anxiety_delta" in scores:
|
||
core["anxiety"] = max(0, min(10, core.get("anxiety", 0) + scores["anxiety_delta"]))
|
||
|
||
self.state["core_state"] = core
|
||
self._save_state()
|
||
|
||
print(f"✅ Vision analysis: Mood={scores.get('mood', 'N/A')}")
|
||
return analysis
|
||
|
||
return None
|
||
|
||
except Exception as e:
|
||
print(f"Vision error: {e}")
|
||
return None
|
||
|
||
def generate_response(self, user_input):
|
||
"""Generiert Response über NaturalLanguageEngine"""
|
||
core = self.state.get("core_state", {})
|
||
|
||
# Simple Trust-basierte Responses wenn kein Ollama verfügbar
|
||
trust = core.get("trust", 7)
|
||
|
||
if trust >= 8:
|
||
if any(w in user_input.lower() for w in ["vermiss", "lieb"]):
|
||
return "Ich auch. Das sagst du nicht einfach so."
|
||
elif "hallo" in user_input.lower():
|
||
return "Da bist du ja."
|
||
else:
|
||
return "Ich bin ganz bei dir."
|
||
elif trust >= 4:
|
||
return "Okay. Verstehe."
|
||
else:
|
||
return "..."
|
||
|
||
def process_interaction(self, user_input):
|
||
"""Verarbeitet komplette Interaktion"""
|
||
print("\n" + "="*60)
|
||
print("NATIRIS MASTER PROCESSING")
|
||
print("="*60)
|
||
|
||
# 1. State-Update
|
||
print("\n📝 Step 1: Analyzing input...")
|
||
core = self.update_state_from_input(user_input)
|
||
print(f" Trust: {core.get('trust', 7):.1f}, Mood: {core.get('mood', 5):.1f}")
|
||
|
||
# 2. Bild-Generierung (optional, basierend auf Trust/Mood)
|
||
print("\n🎨 Step 2: Generating image...")
|
||
img_result = self.generate_image()
|
||
|
||
# 3. Vision-Analyse
|
||
print("\n👁 Step 3: Analyzing image...")
|
||
vision_result = None
|
||
if img_result.get("success"):
|
||
vision_result = self.analyze_image(img_result["path"])
|
||
|
||
# 4. Response-Generierung
|
||
print("\n💬 Step 4: Generating response...")
|
||
response = self.generate_response(user_input)
|
||
|
||
# 5. Final Output
|
||
print("\n" + "="*60)
|
||
print("RESULT")
|
||
print("="*60)
|
||
print(f"User: {user_input}")
|
||
print(f"Natiris: {response}")
|
||
if img_result.get("success"):
|
||
print(f"Image: {img_result['path']}")
|
||
if vision_result:
|
||
scores = vision_result.get("parsed_scores", {})
|
||
print(f"Vision: Mood={scores.get('mood', 'N/A')}, Anxiety={scores.get('anxiety', 'N/A')}")
|
||
print("="*60)
|
||
|
||
return {
|
||
"user_input": user_input,
|
||
"response": response,
|
||
"image": img_result.get("path") if img_result.get("success") else None,
|
||
"vision": vision_result,
|
||
"state": self.state["core_state"],
|
||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||
}
|
||
|
||
def demo_mode(self):
|
||
"""Demo-Modus mit Test-Interaktionen"""
|
||
test_inputs = [
|
||
"Hallo, wie geht es dir?",
|
||
"Ich habe dich vermisst",
|
||
"Wie war dein Tag?",
|
||
"Du siehst schön aus",
|
||
"Gute Nacht",
|
||
]
|
||
|
||
for inp in test_inputs:
|
||
result = self.process_interaction(inp)
|
||
print(f"\n{'─'*60}")
|
||
|
||
|
||
def main():
|
||
import argparse
|
||
parser = argparse.ArgumentParser(description="Natiris Master Orchestrator")
|
||
parser.add_argument("--demo", action="store_true", help="Run demo mode")
|
||
parser.add_argument("--input", help="Single user input")
|
||
parser.add_argument("--status", action="store_true", help="Show system status")
|
||
|
||
args = parser.parse_args()
|
||
|
||
if args.status:
|
||
print("Natiris System Status Check...")
|
||
print("="*60)
|
||
|
||
# Prüfe Komponenten
|
||
checks = [
|
||
("Core State", os.path.exists(PATHS["state"])),
|
||
("ComfyBridge", os.path.exists(PATHS["comfy_script"])),
|
||
("VisionBridge", os.path.exists(PATHS["vision_script"])),
|
||
("Generated Images", len(list(Path(PATHS["generated_dir"]).glob("*.png"))) > 0),
|
||
]
|
||
|
||
for name, ok in checks:
|
||
print(f" {'✅' if ok else '❌'} {name}")
|
||
|
||
print("="*60)
|
||
return
|
||
|
||
master = NatirisMaster()
|
||
|
||
if args.demo:
|
||
master.demo_mode()
|
||
elif args.input:
|
||
result = master.process_interaction(args.input)
|
||
print(json.dumps(result, indent=2))
|
||
else:
|
||
# Interactive mode
|
||
print("Natiris Interactive Mode (quit to exit)")
|
||
while True:
|
||
user_in = input("\nYou: ")
|
||
if user_in.lower() in ["quit", "exit", "q"]:
|
||
break
|
||
master.process_interaction(user_in)
|
||
|
||
|
||
if __name__ == "__main__":
|
||
main()
|