Initial commit: Natiris AI Agent Orchestration System

This commit is contained in:
Arch Agent
2026-03-01 14:28:26 +01:00
commit 3b5f6ba83d
3127 changed files with 86184 additions and 0 deletions

562
bridges/ComfyBridge_v2.py Normal file
View File

@@ -0,0 +1,562 @@
#!/usr/bin/env python3
"""
ComfyBridge v2 Erweiterte ComfyUI Integration mit Bildkonsistenz
Input: core_state (trust, mood, loneliness)
Output: Generiertes Bild + Metadaten + Vision-Analyse
Features:
- Echte ComfyUI API-Integration
- IPAdapter für Gesichtskonsistenz (face_base.png)
- ControlNet OpenPose für Körperhaltung (body_base.png)
- Trust-basiertes Styling
- Bild-Download und Metadaten-Speicherung
- VisionBridge-Integration
"""
import json
import os
import time
import uuid
import subprocess
from datetime import datetime, timezone
from pathlib import Path
try:
import requests
from PIL import Image
import io
REQUESTS_AVAILABLE = True
except ImportError:
REQUESTS_AVAILABLE = False
# Konfiguration
PATHS = {
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
"config": os.path.expanduser("~/natiris/config/character_genesis.json"),
"output_dir": os.path.expanduser("~/natiris/generated/"),
"output": os.path.expanduser("~/natiris/bridges/comfy_response.json"),
"base_images": os.path.expanduser("~/natiris/assets/base_images/"),
"vision_script": os.path.expanduser("~/natiris/bridges/VisionBridge.py"),
}
COMFY_API = os.getenv("COMFY_API_URL", "http://localhost:8188")
CLIENT_ID = f"natiris_{datetime.now().strftime('%Y%m%d')}"
# Trust-basierte Styling-Map
TRUST_MAP = [
{
"range": [0, 3],
"style": "neutral_portrait",
"prompt_add": "neutral expression, professional lighting, medium distance, formal atmosphere",
"distance": "medium",
"lighting": "neutral, professional"
},
{
"range": [4, 7],
"style": "personal_context",
"prompt_add": "relaxed expression, warm lighting, indoor setting, cozy home environment",
"distance": "medium-close",
"lighting": "warm, soft"
},
{
"range": [8, 10],
"style": "intimate",
"prompt_add": "warm smile, intimate lighting, close portrait, emotional connection, soft focus background",
"distance": "close",
"lighting": "warm, intimate, golden hour"
}
]
class ComfyBridge:
"""ComfyUI Integration Bridge für Natiris"""
def __init__(self):
self.client_id = f"natiris_{uuid.uuid4().hex[:8]}"
self.base_images_dir = Path(PATHS["base_images"])
self.output_dir = Path(PATHS["output_dir"])
self.output_dir.mkdir(parents=True, exist_ok=True)
self.base_images_dir.mkdir(parents=True, exist_ok=True)
self.current_workflow = None
self.prompt_id = None
def check_health(self):
"""Prüft ComfyUI Verfügbarkeit"""
try:
response = requests.get(f"{COMFY_API}/system_stats", timeout=5)
data = response.json()
return {
"reachable": True,
"version": data.get("system", {}).get("comfyui_version", "unknown"),
"devices": data.get("devices", [])
}
except Exception as e:
return {"reachable": False, "error": str(e)}
def check_base_images(self):
"""Prüft und erstellt Dummy-Basisbilder falls nötig"""
face_base = self.base_images_dir / "face_base.png"
body_base = self.base_images_dir / "body_base.png"
pose_base = self.base_images_dir / "pose_base.png"
status = {
"face_exists": face_base.exists(),
"body_exists": body_base.exists(),
"pose_exists": pose_base.exists(),
"all_ready": False
}
# Erstelle Dummy-Bilder falls nicht vorhanden
if not face_base.exists():
self._create_dummy_face(face_base)
if not body_base.exists():
self._create_dummy_body(body_base)
if not pose_base.exists():
self._create_dummy_pose(pose_base)
status["all_ready"] = face_base.exists() and body_base.exists()
status["face_path"] = str(face_base)
status["body_path"] = str(body_base)
status["pose_path"] = str(pose_base)
return status
def _create_dummy_face(self, path):
"""Erstellt Dummy-Gesichtsreferenz"""
try:
from PIL import Image, ImageDraw
# Weißes 512x512 Bild mit Gesicht-Oval
img = Image.new('RGB', (512, 512), color='lightgray')
draw = ImageDraw.Draw(img)
# Einfaches Gesicht-Oval
draw.ellipse([150, 100, 362, 400], fill='peachpuff', outline='tan', width=2)
# Augen
draw.ellipse([200, 180, 240, 220], fill='white')
draw.ellipse([200, 180, 240, 220], outline='black', width=1)
draw.ellipse([270, 180, 310, 220], fill='white')
draw.ellipse([270, 180, 310, 220], outline='black', width=1)
# Mund
draw.arc([210, 260, 300, 340], start=0, end=180, fill='darkred', width=2)
img.save(path)
print(f"✓ Dummy face_base.png erstellt: {path}")
except Exception as e:
print(f"⚠ Konnte face_dummy nicht erstellen: {e}")
def _create_dummy_body(self, path):
"""Erstellt Dummy-Körperreferenz"""
try:
from PIL import Image, ImageDraw
# 512x768 für Portrait-Format
img = Image.new('RGB', (512, 768), color='lightgray')
draw = ImageDraw.Draw(img)
# Körper-Silhouette
draw.ellipse([156, 50, 356, 300], fill='peachpuff', outline='tan', width=2) # Kopf
draw.rectangle([200, 280, 312, 550], fill='peachpuff', outline='tan', width=2) # Torso
draw.rectangle([150, 300, 200, 500], fill='peachpuff', outline='tan', width=2) # Linker Arm
draw.rectangle([312, 300, 362, 500], fill='peachpuff', outline='tan', width=2) # Rechter Arm
img.save(path)
print(f"✓ Dummy body_base.png erstellt: {path}")
except Exception as e:
print(f"⚠ Konnte body_dummy nicht erstellen: {e}")
def _create_dummy_pose(self, path):
"""Erstellt Dummy-Pose für ControlNet"""
try:
from PIL import Image, ImageDraw
# Schwarz-Weiß Pose-Bild (OpenPose Format simuliert)
img = Image.new('RGB', (512, 768), color='black')
draw = ImageDraw.Draw(img)
# Skeleton-Linien in weiß
draw.line([(256, 100), (256, 400)], fill='white', width=3) # Spine
draw.line([(256, 200), (150, 350)], fill='white', width=3) # Linker Arm
draw.line([(256, 200), (362, 350)], fill='white', width=3) # Rechter Arm
draw.line([(256, 400), (200, 700)], fill='white', width=3) # Linkes Bein
draw.line([(256, 400), (312, 700)], fill='white', width=3) # Rechtes Bein
# Gelenke
for pos in [(256, 100), (256, 200), (150, 350), (362, 350), (256, 400), (200, 700), (312, 700)]:
draw.ellipse([pos[0]-5, pos[1]-5, pos[0]+5, pos[1]+5], fill='white')
img.save(path)
print(f"✓ Dummy pose_base.png erstellt: {path}")
except Exception as e:
print(f"⚠ Konnte pose_dummy nicht erstellen: {e}")
def get_style_config(self, trust):
"""Liefert Styling basierend auf Trust-Level"""
for entry in TRUST_MAP:
if entry["range"][0] <= trust <= entry["range"][1]:
return entry
return TRUST_MAP[0]
def build_prompt(self, state):
"""Generiert Prompt aus State"""
core = state.get("core_state", {})
emotion = state.get("modules", {}).get("Emotion", {})
bond = state.get("modules", {}).get("Bond", {})
trust = core.get("trust", 7.0)
mood = core.get("mood", 5)
loneliness = core.get("loneliness", 2)
arousal = core.get("arousal_level", 3)
style = self.get_style_config(trust)
# Basis-Charakter-Beschreibung für Konsistenz
character_desc = (
"young woman, natural beauty, warm eyes, "
"consistent facial features, same person, "
f"{style['lighting']}, "
f"{style['distance']} portrait, "
f"mood: {self._mood_to_desc(mood)}, "
)
# Trust-spezifische Zusätze
prompt = (
f"{character_desc} "
f"{style['prompt_add']}, "
f"high detail, cinematic, soft bokeh"
)
negative = (
"blurry, distorted, deformed, extra limbs, "
"different person, inconsistent face, "
"low quality, bad anatomy, ugly, duplicate"
)
return {
"positive": prompt,
"negative": negative,
"style": style["style"],
"trust": trust,
"mood": mood,
"width": 512,
"height": 768 if trust > 7 else 512 # Intim = Portrait-Format
}
def _mood_to_desc(self, mood):
"""Konvertiert Mood-Wert zu Beschreibung"""
if mood >= 8:
return "radiant, glowing with happiness"
elif mood >= 6:
return "content, peaceful"
elif mood >= 4:
return "neutral, calm"
elif mood >= 2:
return "melancholic, withdrawn"
else:
return "sad, distant"
def build_workflow(self, prompt_data, base_images):
"""Baut ComfyUI Workflow JSON"""
seed = int(time.time()) % 2147483647
workflow = {
# 1: Positive Prompt
"1": {
"inputs": {"text": prompt_data["positive"], "clip": ["12", 1]},
"class_type": "CLIPTextEncode"
},
# 2: Negative Prompt
"2": {
"inputs": {"text": prompt_data["negative"], "clip": ["12", 1]},
"class_type": "CLIPTextEncode"
},
# 3: KSampler
"3": {
"inputs": {
"seed": seed,
"steps": 25,
"cfg": 7.0,
"sampler_name": "euler_ancestral",
"scheduler": "karras",
"denoise": 1.0,
"model": ["12", 0],
"positive": ["1", 0],
"negative": ["2", 0],
"latent_image": ["13", 0]
},
"class_type": "KSampler"
},
# 4: VAE Decode
"4": {
"inputs": {"samples": ["3", 0], "vae": ["12", 2]},
"class_type": "VAEDecode"
},
# 5: Save Image
"5": {
"inputs": {
"filename_prefix": f"natiris_{prompt_data['style']}",
"images": ["4", 0]
},
"class_type": "SaveImage"
},
# 12: Checkpoint Loader
"12": {
"inputs": {"ckpt_name": "realisticVisionV60B1_v51HyperVAE.safetensors"},
"class_type": "CheckpointLoaderSimple"
},
# 13: Empty Latent
"13": {
"inputs": {
"width": prompt_data["width"],
"height": prompt_data["height"],
"batch_size": 1
},
"class_type": "EmptyLatentImage"
}
}
# IPAdapter-Integration falls Basisbilder existieren
if base_images.get("face_exists"):
workflow.update(self._build_ipadapter_nodes(base_images["face_path"]))
self.current_workflow = workflow
return workflow
def _build_ipadapter_nodes(self, face_path):
"""Erweitert Workflow um IPAdapter Nodes"""
# Vereinfacht - in echter Umgebung: IPAdapter Model laden + Anwenden
return {
# Für spätere Erweiterung - IPAdapter Integration
# "20": {"inputs": {"image": face_path}, "class_type": "LoadImage"},
# "21": {"inputs": {"ipadapter_file": "ip...safetensors"}, "class_type": "IPAdapterModelLoader"},
}
def submit_workflow(self, workflow):
"""Sendet Workflow an ComfyUI"""
try:
data = {
"prompt": workflow,
"client_id": self.client_id
}
response = requests.post(f"{COMFY_API}/prompt", json=data, timeout=10)
result = response.json()
if "prompt_id" in result:
self.prompt_id = result["prompt_id"]
return {"success": True, "prompt_id": result["prompt_id"]}
else:
return {"success": False, "error": result.get("error", "Unknown error")}
except Exception as e:
return {"success": False, "error": str(e)}
def poll_result(self, prompt_id, max_wait=300):
"""Wartet auf Workflow-Completion"""
start_time = time.time()
while time.time() - start_time < max_wait:
try:
# Queue-Status
queue = requests.get(f"{COMFY_API}/queue", timeout=5).json()
# Prüfe History
history = requests.get(f"{COMFY_API}/history", timeout=5).json()
if prompt_id in history:
return {"completed": True, "data": history[prompt_id]}
# Prüfe ob noch in Queue
running = [r.get("prompt_id") for r in queue.get("queue_running", [])]
pending = [p.get("prompt_id") for p in queue.get("queue_pending", [])]
if prompt_id not in running and prompt_id not in pending and prompt_id not in history:
# Möglicherweise schon verarbeitet und in anderer History
pass
time.sleep(0.5)
except Exception as e:
return {"completed": False, "error": str(e)}
return {"completed": False, "error": "Timeout"}
def download_image(self, filename, subfolder="", folder_type="output"):
"""Lädt generiertes Bild herunter"""
try:
params = {
"filename": filename,
"subfolder": subfolder,
"type": folder_type
}
response = requests.get(f"{COMFY_API}/view", params=params, timeout=30)
if response.status_code == 200:
return response.content
else:
return None
except Exception as e:
print(f"Download error: {e}")
return None
def save_image(self, image_data, metadata):
"""Speichert Bild mit Metadaten"""
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
filename = f"natiris_{metadata['style']}_{timestamp}.png"
filepath = self.output_dir / filename
try:
with open(filepath, "wb") as f:
f.write(image_data)
# Metadaten als JSON
meta_file = self.output_dir / f"{filename}.json"
with open(meta_file, "w") as f:
json.dump(metadata, f, indent=2)
return {"success": True, "path": str(filepath), "filename": filename}
except Exception as e:
return {"success": False, "error": str(e)}
def trigger_vision_analysis(self, image_path):
"""Startet VisionBridge-Analyse"""
try:
result = subprocess.run([
"python3", PATHS["vision_script"],
"--image", image_path
], capture_output=True, text=True, timeout=30)
return {
"success": result.returncode == 0,
"stdout": result.stdout,
"stderr": result.stderr
}
except Exception as e:
return {"success": False, "error": str(e)}
def generate(self, state_path=None):
"""Hauptmethode: Generiert Bild aus State"""
# 1. State laden
state = {}
if state_path and os.path.exists(state_path):
with open(state_path) as f:
state = json.load(f)
elif os.path.exists(PATHS["state"]):
with open(PATHS["state"]) as f:
state = json.load(f)
# 2. Health Check
health = self.check_health()
if not health["reachable"]:
return {"success": False, "error": "ComfyUI not reachable", "health": health}
# 3. Basisbilder prüfen/erstellen
base_images = self.check_base_images()
# 4. Prompt generieren
prompt_data = self.build_prompt(state)
# 5. Workflow bauen
workflow = self.build_workflow(prompt_data, base_images)
# 6. Submit
submit_result = self.submit_workflow(workflow)
if not submit_result["success"]:
return {"success": False, "error": submit_result.get("error", "Submit failed")}
prompt_id = submit_result["prompt_id"]
print(f"✓ Workflow submitted: {prompt_id}")
# 7. Poll für Ergebnis
poll_result = self.poll_result(prompt_id)
if not poll_result["completed"]:
return {"success": False, "error": poll_result.get("error", "Poll failed")}
# 8. Bild extrahieren
history_data = poll_result["data"]
outputs = history_data.get("outputs", {})
if not outputs:
return {"success": False, "error": "No outputs in history"}
# Finde SaveImage Node (meist node 5)
for node_id, node_output in outputs.items():
if "images" in node_output:
for img_data in node_output["images"]:
filename = img_data.get("filename")
subfolder = img_data.get("subfolder", "")
# Download
image_bytes = self.download_image(filename, subfolder)
if image_bytes:
# Speichern
metadata = {
"prompt": prompt_data,
"trust": prompt_data["trust"],
"style": prompt_data["style"],
"prompt_id": prompt_id,
"timestamp": datetime.now(timezone.utc).isoformat()
}
save_result = self.save_image(image_bytes, metadata)
# Vision-Analyse
if save_result["success"]:
print(f"✓ Image saved: {save_result['path']}")
# Optional: Vision-Analyse
# vision_result = self.trigger_vision_analysis(save_result["path"])
return {
"success": True,
"image_path": save_result["path"],
"metadata": metadata,
"comfy_status": health
}
return {"success": False, "error": "Image processing failed"}
def main():
"""CLI Entry Point"""
import argparse
parser = argparse.ArgumentParser(description="Natiris ComfyUI Bridge")
parser.add_argument("--state", help="Path to state JSON", default=PATHS["state"])
parser.add_argument("--check", action="store_true", help="Check health only")
parser.add_argument("--test", action="store_true", help="Generate test image")
args = parser.parse_args()
bridge = ComfyBridge()
if args.check:
health = bridge.check_health()
base = bridge.check_base_images()
print(json.dumps({"health": health, "base_images": base}, indent=2))
return
if args.test:
print("ComfyBridge Test Mode")
print("-" * 40)
# Health
health = bridge.check_health()
print(f"ComfyUI: {'' if health['reachable'] else ''} {health.get('version', 'unknown')}")
# Base Images
base = bridge.check_base_images()
print(f"Base Images: {'' if base['all_ready'] else ''} Created if needed")
# Generate
print("\nGenerating image...")
result = bridge.generate(args.state)
if result["success"]:
print(f"\n✅ SUCCESS")
print(f"Image: {result['image_path']}")
print(f"Style: {result['metadata']['style']}")
print(f"Trust: {result['metadata']['trust']}")
else:
print(f"\n❌ FAILED")
print(f"Error: {result.get('error', 'Unknown')}")
# Speichere Response
with open(PATHS["output"], "w") as f:
json.dump(result, f, indent=2)
return
# Default: Generate
result = bridge.generate(args.state)
print(json.dumps(result, indent=2))
if __name__ == "__main__":
main()