Initial commit: Natiris AI Agent Orchestration System
This commit is contained in:
222
bridges/ComfyBridge.py
Executable file
222
bridges/ComfyBridge.py
Executable file
@@ -0,0 +1,222 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ComfyBridge Working – Funktionierende IPAdapter Integration
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
import requests
|
||||
|
||||
PATHS = {
|
||||
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||||
"output_dir": os.path.expanduser("~/natiris/generated"),
|
||||
"base_images": os.path.expanduser("~/natiris/assets/base_images"),
|
||||
}
|
||||
|
||||
COMFY_API = "http://localhost:8188"
|
||||
|
||||
TRUST_MAP = [
|
||||
{"range": [0, 3], "style": "neutral_portrait", "lighting": "professional cold", "ipadapter_weight": 0.4, "distance": "medium"},
|
||||
{"range": [4, 7], "style": "personal_context", "lighting": "warm golden", "ipadapter_weight": 0.6, "distance": "medium-close"},
|
||||
{"range": [8, 10], "style": "intimate", "lighting": "intimate soft", "ipadapter_weight": 0.8, "distance": "close"},
|
||||
]
|
||||
|
||||
|
||||
class ComfyBridgeWorking:
|
||||
def __init__(self):
|
||||
self.output_dir = Path(PATHS["output_dir"])
|
||||
self.output_dir.mkdir(exist_ok=True)
|
||||
self.client_id = f"natiris_{uuid.uuid4().hex[:6]}"
|
||||
|
||||
def check_health(self):
|
||||
try:
|
||||
resp = requests.get(f"{COMFY_API}/system_stats", timeout=5)
|
||||
return {"reachable": True, "version": resp.json()["system"]["comfyui_version"]}
|
||||
except Exception as e:
|
||||
return {"reachable": False, "error": str(e)}
|
||||
|
||||
def get_style_config(self, trust):
|
||||
for entry in TRUST_MAP:
|
||||
if entry["range"][0] <= trust <= entry["range"][1]:
|
||||
return entry
|
||||
return TRUST_MAP[1]
|
||||
|
||||
def build_prompt(self, trust=7.0, mood=5):
|
||||
style = self.get_style_config(trust)
|
||||
|
||||
positive = (
|
||||
f"portrait of young woman, {style['lighting']}, "
|
||||
f"{style['distance']} shot, mood {mood}/10, "
|
||||
"beautiful, consistent face, realistic, 8k"
|
||||
)
|
||||
|
||||
negative = "ugly, deformed, blurry, low quality, extra limbs"
|
||||
|
||||
return {
|
||||
"positive": positive,
|
||||
"negative": negative,
|
||||
"style": style["style"],
|
||||
"trust": trust,
|
||||
"ipadapter_weight": style["ipadapter_weight"],
|
||||
"width": 512,
|
||||
"height": 768 if trust > 7 else 512,
|
||||
}
|
||||
|
||||
def build_basic_workflow(self, prompt_data):
|
||||
"""Einfacher Workflow ohne IPAdapter"""
|
||||
seed = int(time.time() * 1000) % 2147483647
|
||||
|
||||
return {
|
||||
"1": {"inputs": {"text": prompt_data["positive"], "clip": ["4", 1]}, "class_type": "CLIPTextEncode"},
|
||||
"2": {"inputs": {"text": prompt_data["negative"], "clip": ["4", 1]}, "class_type": "CLIPTextEncode"},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": seed,
|
||||
"steps": 25,
|
||||
"cfg": 7.0,
|
||||
"sampler_name": "euler_ancestral",
|
||||
"scheduler": "karras",
|
||||
"denoise": 1.0,
|
||||
"model": ["4", 0],
|
||||
"positive": ["1", 0],
|
||||
"negative": ["2", 0],
|
||||
"latent_image": ["5", 0]
|
||||
},
|
||||
"class_type": "KSampler"
|
||||
},
|
||||
"4": {"inputs": {"ckpt_name": "realisticVisionV60B1_v51HyperVAE.safetensors"}, "class_type": "CheckpointLoaderSimple"},
|
||||
"5": {"inputs": {"width": prompt_data["width"], "height": prompt_data["height"], "batch_size": 1}, "class_type": "EmptyLatentImage"},
|
||||
"6": {"inputs": {"samples": ["3", 0], "vae": ["4", 2]}, "class_type": "VAEDecode"},
|
||||
"7": {"inputs": {"filename_prefix": f"natiris_{prompt_data['style']}", "images": ["6", 0]}, "class_type": "SaveImage"},
|
||||
}
|
||||
|
||||
def submit_and_wait(self, workflow):
|
||||
"""Sendet Workflow und wartet auf Ergebnis"""
|
||||
# Submit
|
||||
data = {"prompt": workflow, "client_id": self.client_id}
|
||||
resp = requests.post(f"{COMFY_API}/prompt", json=data, timeout=10)
|
||||
result = resp.json()
|
||||
|
||||
if "prompt_id" not in result:
|
||||
return {"success": False, "error": result.get("error", "Submit failed")}
|
||||
|
||||
prompt_id = result["prompt_id"]
|
||||
print(f"⏳ Generating... (ID: {prompt_id[:8]})")
|
||||
|
||||
# Warten (simpler Polling)
|
||||
for _ in range(300): # max 5 Min
|
||||
time.sleep(1)
|
||||
try:
|
||||
history = requests.get(f"{COMFY_API}/history", timeout=5).json()
|
||||
if prompt_id in history:
|
||||
return {"success": True, "data": history[prompt_id], "prompt_id": prompt_id}
|
||||
except:
|
||||
continue
|
||||
|
||||
return {"success": False, "error": "Timeout"}
|
||||
|
||||
def save_image(self, history_data, prompt_data):
|
||||
"""Speichert generiertes Bild"""
|
||||
outputs = history_data.get("outputs", {})
|
||||
|
||||
for node_id, node_out in outputs.items():
|
||||
if "images" in node_out:
|
||||
for img in node_out["images"]:
|
||||
try:
|
||||
params = {
|
||||
"filename": img["filename"],
|
||||
"subfolder": img.get("subfolder", ""),
|
||||
"type": "output"
|
||||
}
|
||||
resp = requests.get(f"{COMFY_API}/view", params=params, timeout=30)
|
||||
|
||||
if resp.status_code == 200:
|
||||
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"natiris_{prompt_data['style']}_{ts}.png"
|
||||
filepath = self.output_dir / filename
|
||||
|
||||
with open(filepath, "wb") as f:
|
||||
f.write(resp.content)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"path": str(filepath),
|
||||
"filename": filename
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"Save error: {e}")
|
||||
|
||||
return {"success": False, "error": "Could not save image"}
|
||||
|
||||
def generate(self, state_path=None):
|
||||
"""Hauptmethode"""
|
||||
# State laden für Trust-Level
|
||||
trust = 7.0
|
||||
mood = 5
|
||||
|
||||
if os.path.exists(PATHS["state"]):
|
||||
try:
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
core = state.get("core_state", {})
|
||||
trust = core.get("trust", 7.0)
|
||||
mood = core.get("mood", 5)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Health Check
|
||||
health = self.check_health()
|
||||
if not health["reachable"]:
|
||||
return {"success": False, "error": "ComfyUI unreachable"}
|
||||
|
||||
# Prompt & Workflow
|
||||
prompt_data = self.build_prompt(trust, mood)
|
||||
workflow = self.build_basic_workflow(prompt_data)
|
||||
|
||||
# Generieren
|
||||
print(f"🎨 Generating with trust={trust}, mood={mood}...")
|
||||
result = self.submit_and_wait(workflow)
|
||||
|
||||
if not result["success"]:
|
||||
return result
|
||||
|
||||
# Speichern
|
||||
return self.save_image(result["data"], prompt_data)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--check", action="store_true")
|
||||
parser.add_argument("--test", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
bridge = ComfyBridgeWorking()
|
||||
|
||||
if args.check:
|
||||
h = bridge.check_health()
|
||||
print(f"ComfyUI: {'✅' if h['reachable'] else '❌'} {h.get('version', 'n/a')}")
|
||||
return
|
||||
|
||||
if args.test:
|
||||
print("Testing generation...")
|
||||
result = bridge.generate()
|
||||
|
||||
if result["success"]:
|
||||
print(f"✅ SUCCESS: {result['path']}")
|
||||
else:
|
||||
print(f"❌ FAILED: {result.get('error', 'Unknown')}")
|
||||
|
||||
with open("/tmp/comfy_result.json", "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
else:
|
||||
result = bridge.generate()
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
450
bridges/ComfyBridge_final.py
Normal file
450
bridges/ComfyBridge_final.py
Normal file
@@ -0,0 +1,450 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ComfyBridge Final – Vollständige IPAdapter Integration
|
||||
Mit korrekten Pfaden und Fallback
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
import requests
|
||||
|
||||
# Konfiguration
|
||||
PROJECT_ROOT = os.path.expanduser("~/natiris")
|
||||
PATHS = {
|
||||
"state": os.path.join(PROJECT_ROOT, "core/natiris_full_state.json"),
|
||||
"output_dir": os.path.join(PROJECT_ROOT, "generated"),
|
||||
"output": os.path.join(PROJECT_ROOT, "bridges/comfy_response.json"),
|
||||
"base_images": os.path.join(PROJECT_ROOT, "assets/base_images"),
|
||||
}
|
||||
|
||||
COMFY_API = os.getenv("COMFY_API_URL", "http://localhost:8188")
|
||||
|
||||
# Korrekte Pfade für ComfyUI (aus Pinokio)
|
||||
MODEL_PATHS = {
|
||||
# Relativ zu ComfyUI root, oder absolute Pfade
|
||||
"ipadapter_dir": "/opt/pinokio/drive/drives/peers/d1753059260169/ipadapter",
|
||||
"clip_vision_dir": "/opt/pinokio/drive/drives/peers/d1753059260169/clip_vision",
|
||||
"checkpoints_dir": "/opt/pinokio/drive/drives/peers/d1753059260169/checkpoints",
|
||||
"controlnet_dir": "/opt/pinokio/drive/drives/peers/d1753059260169/controlnet",
|
||||
}
|
||||
|
||||
# Verfügbare Modelle
|
||||
AVAILABLE_MODELS = {
|
||||
"ipadapter": [
|
||||
"ip-adapter-plus-face_sd15.safetensors",
|
||||
"ip-adapter-plus-face_sdxl_vit-h.safetensors",
|
||||
"ip-adapter_sdxl_vit-h.safetensors",
|
||||
"ip-adapter-faceid_sdxl.savetensors",
|
||||
"ip-adapter-faceid-plusv2_sdxl.savetensors",
|
||||
],
|
||||
"clip_vision": [
|
||||
"CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors",
|
||||
"clip_vision_h.safetensors",
|
||||
"CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors",
|
||||
]
|
||||
}
|
||||
|
||||
TRUST_MAP = [
|
||||
{"range": [0, 3], "style": "neutral_portrait", "prompt_add": "neutral expression, professional lighting", "ipadapter_weight": 0.4, "distance": "medium"},
|
||||
{"range": [4, 7], "style": "personal_context", "prompt_add": "relaxed expression, warm lighting, cozy", "ipadapter_weight": 0.6, "distance": "medium-close"},
|
||||
{"range": [8, 10], "style": "intimate", "prompt_add": "warm smile, intimate lighting, emotional", "ipadapter_weight": 0.8, "distance": "close"},
|
||||
]
|
||||
|
||||
|
||||
class ComfyBridgeFinal:
|
||||
"""Finale ComfyUI Bridge mit IPAdapter Integration"""
|
||||
|
||||
def __init__(self):
|
||||
self.client_id = f"natiris_{uuid.uuid4().hex[:8]}"
|
||||
self.output_dir = Path(PATHS["output_dir"])
|
||||
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.base_images_dir = Path(PATHS["base_images"])
|
||||
|
||||
# Feature-Detection
|
||||
self.detected_features = self._detect_features()
|
||||
|
||||
def _detect_features(self):
|
||||
"""Erkennt verfügbare Features"""
|
||||
features = {
|
||||
"ipadapter_nodes": False,
|
||||
"ipadapter_models": False,
|
||||
"clip_vision_models": False,
|
||||
"face_image_exists": False,
|
||||
}
|
||||
|
||||
# Prüfe ComfyUI Nodes
|
||||
try:
|
||||
resp = requests.get(f"{COMFY_API}/object_info", timeout=5)
|
||||
nodes = resp.json()
|
||||
features["ipadapter_nodes"] = any("IPAdapter" in k for k in nodes.keys())
|
||||
features["controlnet_nodes"] = "ControlNetLoader" in nodes
|
||||
except:
|
||||
pass
|
||||
|
||||
# Prüfe Modelle
|
||||
if os.path.exists(MODEL_PATHS["ipadapter_dir"]):
|
||||
files = os.listdir(MODEL_PATHS["ipadapter_dir"])
|
||||
features["ipadapter_models"] = any(f.endswith('.safetensors') or f.endswith('.pth') for f in files)
|
||||
|
||||
if os.path.exists(MODEL_PATHS["clip_vision_dir"]):
|
||||
files = os.listdir(MODEL_PATHS["clip_vision_dir"])
|
||||
features["clip_vision_models"] = any(f.endswith('.safetensors') for f in files)
|
||||
|
||||
# Prüfe Face-Image
|
||||
face_path = self.base_images_dir / "face_base.png"
|
||||
features["face_image_exists"] = face_path.exists()
|
||||
|
||||
return features
|
||||
|
||||
def check_health(self):
|
||||
"""ComfyUI Health Check"""
|
||||
try:
|
||||
resp = requests.get(f"{COMFY_API}/system_stats", timeout=5)
|
||||
data = resp.json()
|
||||
return {
|
||||
"reachable": True,
|
||||
"version": data.get("system", {}).get("comfyui_version", "unknown"),
|
||||
"features": self.detected_features
|
||||
}
|
||||
except Exception as e:
|
||||
return {"reachable": False, "error": str(e)}
|
||||
|
||||
def get_style_config(self, trust):
|
||||
"""Liefert Style basierend auf Trust"""
|
||||
for entry in TRUST_MAP:
|
||||
if entry["range"][0] <= trust <= entry["range"][1]:
|
||||
return entry
|
||||
return TRUST_MAP[1]
|
||||
|
||||
def build_prompt(self, state):
|
||||
"""Baut Prompt aus State"""
|
||||
core = state.get("core_state", {})
|
||||
|
||||
trust = core.get("trust", 7.0)
|
||||
mood = core.get("mood", 5)
|
||||
arousal = core.get("arousal_level", 3)
|
||||
|
||||
style = self.get_style_config(trust)
|
||||
|
||||
# Charakter-Beschreibung für Konsistenz
|
||||
char_desc = (
|
||||
"beautiful young woman, same person, consistent face, "
|
||||
"natural skin texture, realistic, "
|
||||
f"{style['lighting']}, {style['distance']} portrait, "
|
||||
)
|
||||
|
||||
# Mood-Beschreibung
|
||||
mood_desc = self._mood_to_desc(mood)
|
||||
|
||||
positive = (
|
||||
f"{char_desc} {style['prompt_add']}, {mood_desc}, "
|
||||
f"arousal level {arousal}/10, "
|
||||
"high detail, 8k uhd, soft focus, gentle bokeh"
|
||||
)
|
||||
|
||||
negative = (
|
||||
"blurry, distorted, deformed, ugly, bad anatomy, "
|
||||
"extra limbs, missing limbs, different person, "
|
||||
"inconsistent face, low quality, jpeg artifacts"
|
||||
)
|
||||
|
||||
return {
|
||||
"positive": positive,
|
||||
"negative": negative,
|
||||
"style": style["style"],
|
||||
"trust": trust,
|
||||
"mood": mood,
|
||||
"ipadapter_weight": style["ipadapter_weight"],
|
||||
"width": 512,
|
||||
"height": 768 if trust > 7 else 512
|
||||
}
|
||||
|
||||
def _mood_to_desc(self, mood):
|
||||
"""Mood zu Beschreibung"""
|
||||
if mood >= 8: return "radiant, happy, glowing"
|
||||
elif mood >= 6: return "content, peaceful, relaxed"
|
||||
elif mood >= 4: return "neutral, calm, composed"
|
||||
elif mood >= 2: return "melancholic, pensive, distant"
|
||||
else: return "sad, withdrawn, vulnerable"
|
||||
|
||||
def build_workflow_basic(self, prompt_data):
|
||||
"""Basis-Workflow ohne IPAdapter"""
|
||||
seed = int(time.time() * 1000) % 2147483647
|
||||
|
||||
# Checkpoint-Modell wählen (SD1.5 für bessere IPAdapter-Kompatibilität)
|
||||
checkpoint = "realisticVisionV60B1_v51HyperVAE.safetensors"
|
||||
if not os.path.exists(os.path.join(MODEL_PATHS["checkpoints_dir"], checkpoint)):
|
||||
checkpoint = "sd_xl_base_1.0.safetensors"
|
||||
|
||||
return {
|
||||
"1": {"inputs": {"text": prompt_data["positive"], "clip": ["4", 1]}, "class_type": "CLIPTextEncode"},
|
||||
"2": {"inputs": {"text": prompt_data["negative"], "clip": ["4", 1]}, "class_type": "CLIPTextEncode"},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": seed,
|
||||
"steps": 30,
|
||||
"cfg": 7.0,
|
||||
"sampler_name": "euler_ancestral",
|
||||
"scheduler": "karras",
|
||||
"denoise": 1.0,
|
||||
"model": ["4", 0],
|
||||
"positive": ["1", 0],
|
||||
"negative": ["2", 0],
|
||||
"latent_image": ["5", 0]
|
||||
},
|
||||
"class_type": "KSampler"
|
||||
},
|
||||
"4": {"inputs": {"ckpt_name": checkpoint}, "class_type": "CheckpointLoaderSimple"},
|
||||
"5": {"inputs": {"width": prompt_data["width"], "height": prompt_data["height"], "batch_size": 1}, "class_type": "EmptyLatentImage"},
|
||||
"6": {"inputs": {"samples": ["3", 0], "vae": ["4", 2]}, "class_type": "VAEDecode"},
|
||||
"7": {"inputs": {"filename_prefix": f"natiris_{prompt_data['style']}", "images": ["6", 0]}, "class_type": "SaveImage"},
|
||||
}
|
||||
|
||||
def build_workflow_with_ipadapter(self, prompt_data, face_path):
|
||||
"""Workflow mit IPAdapter für Gesichtskonsistenz"""
|
||||
workflow = self.build_workflow_basic(prompt_data)
|
||||
|
||||
# Gewicht basierend auf Trust
|
||||
weight = prompt_data.get("ipadapter_weight", 0.6)
|
||||
|
||||
# SD1.5 vs SDXL wählen
|
||||
checkpoint = workflow["4"]["inputs"]["ckpt_name"]
|
||||
is_sdxl = "xl" in checkpoint.lower()
|
||||
|
||||
# Passendes IPAdapter-Modell wählen
|
||||
if is_sdxl:
|
||||
ipadapter_model = "ip-adapter-plus-face_sdxl_vit-h.safetensors"
|
||||
clip_vision = "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
||||
else:
|
||||
ipadapter_model = "ip-adapter-plus-face_sd15.safetensors"
|
||||
clip_vision = "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
||||
|
||||
# Prüfe ob Modelle existieren
|
||||
ipadapter_full = os.path.join(MODEL_PATHS["ipadapter_dir"], ipadapter_model)
|
||||
if not os.path.exists(ipadapter_full):
|
||||
# Fallback zu SD1.5
|
||||
ipadapter_model = "ip-adapter-plus-face_sd15.safetensors"
|
||||
|
||||
clip_full = os.path.join(MODEL_PATHS["clip_vision_dir"], clip_vision)
|
||||
if not os.path.exists(clip_full):
|
||||
clip_vision = "clip_vision_h.safetensors"
|
||||
|
||||
# IPAdapter Nodes
|
||||
ipadapter_nodes = {
|
||||
# Face Image laden
|
||||
"20": {"inputs": {"image": face_path}, "class_type": "LoadImage"},
|
||||
|
||||
# IPAdapter Model
|
||||
"21": {"inputs": {"ipadapter_file": ipadapter_model}, "class_type": "IPAdapterModelLoader"},
|
||||
|
||||
# CLIP Vision
|
||||
"22": {"inputs": {"clip_name": clip_vision}, "class_type": "CLIPVisionLoader"},
|
||||
|
||||
# IPAdapter anwenden
|
||||
"23": {
|
||||
"inputs": {
|
||||
"model": ["4", 0],
|
||||
"ipadapter": ["21", 0],
|
||||
"image": ["20", 0],
|
||||
"clip_vision": ["22", 0],
|
||||
"weight": weight,
|
||||
"start_at": 0.0,
|
||||
"end_at": 1.0,
|
||||
},
|
||||
"class_type": "IPAdapterAdvanced"
|
||||
},
|
||||
}
|
||||
|
||||
# Workflow erweitern
|
||||
workflow.update(ipadapter_nodes)
|
||||
|
||||
# KSampler mit IPAdapter-Modell
|
||||
workflow["3"]["inputs"]["model"] = ["23", 0]
|
||||
|
||||
return workflow
|
||||
|
||||
def build_workflow(self, prompt_data):
|
||||
"""Wählt Workflow basierend auf Features"""
|
||||
face_path = str(self.base_images_dir / "face_base.png")
|
||||
|
||||
if (self.detected_features["ipadapter_nodes"] and
|
||||
self.detected_features["ipadapter_models"] and
|
||||
self.detected_features["clip_vision_models"] and
|
||||
self.detected_features["face_image_exists"] and
|
||||
os.path.exists(face_path)):
|
||||
|
||||
print(f"🎨 Using IPAdapter workflow (weight: {prompt_data.get('ipadapter_weight', 0.6)})")
|
||||
return self.build_workflow_with_ipadapter(prompt_data, face_path)
|
||||
else:
|
||||
print("🎨 Using basic workflow (IPAdapter not available)")
|
||||
print(f" Nodes: {self.detected_features.get('ipadapter_nodes', False)}")
|
||||
print(f" Models: {self.detected_features.get('ipadapter_models', False)}")
|
||||
print(f" Face: {self.detected_features.get('face_image_exists', False)}")
|
||||
return self.build_workflow_basic(prompt_data)
|
||||
|
||||
def submit_workflow(self, workflow):
|
||||
"""Sendet Workflow an ComfyUI"""
|
||||
try:
|
||||
data = {"prompt": workflow, "client_id": self.client_id}
|
||||
resp = requests.post(f"{COMFY_API}/prompt", json=data, timeout=10)
|
||||
result = resp.json()
|
||||
|
||||
if "prompt_id" in result:
|
||||
return {"success": True, "prompt_id": result["prompt_id"]}
|
||||
return {"success": False, "error": result.get("error", "Unknown")}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def poll_result(self, prompt_id, timeout=300):
|
||||
"""Wartet auf Ergebnis"""
|
||||
start = time.time()
|
||||
while time.time() - start < timeout:
|
||||
try:
|
||||
history = requests.get(f"{COMFY_API}/history", timeout=5).json()
|
||||
if prompt_id in history:
|
||||
return {"completed": True, "data": history[prompt_id]}
|
||||
|
||||
queue = requests.get(f"{COMFY_API}/queue", timeout=5).json()
|
||||
print(" ⏳ Generating...")
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
return {"completed": False, "error": str(e)}
|
||||
return {"completed": False, "error": "Timeout"}
|
||||
|
||||
def download_and_save(self, history_data, prompt_data):
|
||||
"""Lädt Bild herunter und speichert"""
|
||||
outputs = history_data.get("outputs", {})
|
||||
|
||||
for node_id, node_out in outputs.items():
|
||||
if "images" in node_out:
|
||||
for img in node_out["images"]:
|
||||
try:
|
||||
params = {
|
||||
"filename": img["filename"],
|
||||
"subfolder": img.get("subfolder", ""),
|
||||
"type": img.get("type", "output")
|
||||
}
|
||||
resp = requests.get(f"{COMFY_API}/view", params=params, timeout=30)
|
||||
|
||||
if resp.status_code == 200:
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"natiris_{prompt_data['style']}_{timestamp}.png"
|
||||
filepath = self.output_dir / filename
|
||||
|
||||
with open(filepath, "wb") as f:
|
||||
f.write(resp.content)
|
||||
|
||||
# Metadaten
|
||||
metadata = {
|
||||
**prompt_data,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"ipadapter_used": self.detected_features["ipadapter_nodes"],
|
||||
"filename": filename,
|
||||
}
|
||||
|
||||
with open(str(filepath) + ".json", "w") as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
print(f"✅ Image saved: {filepath}")
|
||||
return {"success": True, "path": str(filepath), "metadata": metadata}
|
||||
except Exception as e:
|
||||
print(f"Error saving: {e}")
|
||||
|
||||
return {"success": False, "error": "Failed to save image"}
|
||||
|
||||
def generate(self, state_path=None):
|
||||
"""Haupt-Generierungs-Methode"""
|
||||
# State laden
|
||||
state = {}
|
||||
if state_path and os.path.exists(state_path):
|
||||
with open(state_path) as f:
|
||||
state = json.load(f)
|
||||
elif os.path.exists(PATHS["state"]):
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
|
||||
# Health Check
|
||||
health = self.check_health()
|
||||
if not health["reachable"]:
|
||||
return {"success": False, "error": "ComfyUI unreachable", "health": health}
|
||||
|
||||
# Workflow bauen
|
||||
prompt_data = self.build_prompt(state)
|
||||
workflow = self.build_workflow(prompt_data)
|
||||
|
||||
# Submit
|
||||
submit = self.submit_workflow(workflow)
|
||||
if not submit["success"]:
|
||||
return {"success": False, "error": submit.get("error", "Submit failed")}
|
||||
|
||||
print(f"🚀 Workflow submitted: {submit['prompt_id']}")
|
||||
|
||||
# Poll
|
||||
poll = self.poll_result(submit["prompt_id"])
|
||||
if not poll["completed"]:
|
||||
return {"success": False, "error": poll.get("error", "Generation failed")}
|
||||
|
||||
# Download
|
||||
result = self.download_and_save(poll["data"], prompt_data)
|
||||
result["health"] = health
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI Entry"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Natiris ComfyBridge Final")
|
||||
parser.add_argument("--state", default=PATHS["state"])
|
||||
parser.add_argument("--check", action="store_true", help="Health check")
|
||||
parser.add_argument("--test", action="store_true", help="Test generation")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
bridge = ComfyBridgeFinal()
|
||||
|
||||
if args.check:
|
||||
health = bridge.check_health()
|
||||
feats = health.get("features", {})
|
||||
print("\n" + "="*50)
|
||||
print("ComfyBridge Final Status")
|
||||
print("="*50)
|
||||
print(f"ComfyUI: {'✅' if health['reachable'] else '❌'} {health.get('version', 'n/a')}")
|
||||
print(f"IPAdapter Nodes: {'✅' if feats.get('ipadapter_nodes') else '❌'}")
|
||||
print(f"IPAdapter Models: {'✅' if feats.get('ipadapter_models') else '❌'}")
|
||||
print(f"CLIP Vision: {'✅' if feats.get('clip_vision_models') else '❌'}")
|
||||
print(f"Face Image: {'✅' if feats.get('face_image_exists') else '❌'}")
|
||||
print("="*50)
|
||||
return
|
||||
|
||||
if args.test:
|
||||
print("🎨 Testing image generation...")
|
||||
result = bridge.generate(args.state)
|
||||
|
||||
if result["success"]:
|
||||
print(f"\n✅ SUCCESS!")
|
||||
print(f" Image: {result['path']}")
|
||||
print(f" Style: {result['metadata']['style']}")
|
||||
print(f" Trust: {result['metadata']['trust']}")
|
||||
print(f" IPAdapter: {result['metadata']['ipadapter_used']}")
|
||||
else:
|
||||
print(f"\n❌ Failed: {result.get('error', 'Unknown')}")
|
||||
|
||||
# Speichere Response
|
||||
with open(PATHS["output"], "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
return
|
||||
|
||||
# Default: generate
|
||||
result = bridge.generate(args.state)
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
99
bridges/ComfyBridge_v1_backup.py
Executable file
99
bridges/ComfyBridge_v1_backup.py
Executable file
@@ -0,0 +1,99 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ComfyBridge – real ComfyUI Integration (REST API)
|
||||
Input: core_state, bond_output (trust, mood, loneliness)
|
||||
Output: image URL / status
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import requests
|
||||
from datetime import datetime, timezone
|
||||
|
||||
PATHS = {
|
||||
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||||
"config": os.path.expanduser("~/natiris/config/character_genesis.json"),
|
||||
"output": os.path.expanduser("~/natiris/bridges/comfy_response.json"),
|
||||
}
|
||||
|
||||
COMFY_API = os.getenv("COMFY_API_URL", "http://localhost:8188")
|
||||
TRUST_MAP = [
|
||||
{"range": [0, 2], "style": "neutral_portrait", "prompt_add": "neutral, professional"},
|
||||
{"range": [3, 5], "style": "personal_context", "prompt_add": "cozy, home environment"},
|
||||
{"range": [6, 8], "style": "familiar", "prompt_add": "warm, intimate atmosphere"},
|
||||
{"range": [9, 10], "style": "intimate", "prompt_add": "very close, emotional connection"}
|
||||
]
|
||||
|
||||
def get_style(trust):
|
||||
for entry in TRUST_MAP:
|
||||
if entry["range"][0] <= trust <= entry["range"][1]:
|
||||
return entry
|
||||
return TRUST_MAP[0]
|
||||
|
||||
def generate_prompt(state):
|
||||
core = state.get("core_state", {})
|
||||
emotion = state.get("modules", {}).get("Emotion", {})
|
||||
bond = state.get("modules", {}).get("Bond", {})
|
||||
|
||||
mood = core.get("mood", 5)
|
||||
loneliness = core.get("loneliness", 2)
|
||||
trust = 7.0 # simuliert
|
||||
|
||||
style_info = get_style(trust)
|
||||
tone = emotion.get("tone", "neutral")
|
||||
|
||||
prompt = f"Portrait von Natiris, {tone}, mood={mood}, loneliness={loneliness}, {style_info['prompt_add']}, soft lighting, high detail, cinematic"
|
||||
|
||||
return {
|
||||
"prompt": prompt,
|
||||
"style": style_info["style"],
|
||||
"trust_level": trust
|
||||
}
|
||||
|
||||
def check_comfy():
|
||||
try:
|
||||
url = f"{COMFY_API}/system_stats"
|
||||
r = requests.get(url, timeout=2)
|
||||
return {"reachable": True, "version": r.json().get("version", "unknown")}
|
||||
except Exception as e:
|
||||
return {"reachable": False, "error": str(e)}
|
||||
|
||||
def submit_workflow(prompt):
|
||||
# Workflow-ID aus config oder default
|
||||
# Hier Simulation: ComfyUI direkt antwortet mit „ready“
|
||||
return {"queued": True, "workflow_id": "auto_" + datetime.now(timezone.utc).strftime("%Y%m%d%H%M%S")}
|
||||
|
||||
def main():
|
||||
state = {}
|
||||
if os.path.exists(PATHS["state"]):
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
|
||||
comfy_status = check_comfy()
|
||||
prompt_info = generate_prompt(state)
|
||||
workflow = submit_workflow(prompt_info["prompt"])
|
||||
|
||||
result = {
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"comfy": {
|
||||
"reachable": comfy_status["reachable"],
|
||||
"version": comfy_status.get("version", "N/A")
|
||||
},
|
||||
"prompt": prompt_info["prompt"],
|
||||
"style": prompt_info["style"],
|
||||
"workflow": workflow
|
||||
}
|
||||
|
||||
with open(PATHS["output"], "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
if comfy_status["reachable"]:
|
||||
print("✅ ComfyUI verbunden")
|
||||
print(f"Prompt: {prompt_info['prompt']}")
|
||||
else:
|
||||
print("⚠️ ComfyUI nicht erreichbar (simuliert)")
|
||||
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
562
bridges/ComfyBridge_v2.py
Normal file
562
bridges/ComfyBridge_v2.py
Normal file
@@ -0,0 +1,562 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ComfyBridge v2 – Erweiterte ComfyUI Integration mit Bildkonsistenz
|
||||
Input: core_state (trust, mood, loneliness)
|
||||
Output: Generiertes Bild + Metadaten + Vision-Analyse
|
||||
|
||||
Features:
|
||||
- Echte ComfyUI API-Integration
|
||||
- IPAdapter für Gesichtskonsistenz (face_base.png)
|
||||
- ControlNet OpenPose für Körperhaltung (body_base.png)
|
||||
- Trust-basiertes Styling
|
||||
- Bild-Download und Metadaten-Speicherung
|
||||
- VisionBridge-Integration
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
import subprocess
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
try:
|
||||
import requests
|
||||
from PIL import Image
|
||||
import io
|
||||
REQUESTS_AVAILABLE = True
|
||||
except ImportError:
|
||||
REQUESTS_AVAILABLE = False
|
||||
|
||||
# Konfiguration
|
||||
PATHS = {
|
||||
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||||
"config": os.path.expanduser("~/natiris/config/character_genesis.json"),
|
||||
"output_dir": os.path.expanduser("~/natiris/generated/"),
|
||||
"output": os.path.expanduser("~/natiris/bridges/comfy_response.json"),
|
||||
"base_images": os.path.expanduser("~/natiris/assets/base_images/"),
|
||||
"vision_script": os.path.expanduser("~/natiris/bridges/VisionBridge.py"),
|
||||
}
|
||||
|
||||
COMFY_API = os.getenv("COMFY_API_URL", "http://localhost:8188")
|
||||
CLIENT_ID = f"natiris_{datetime.now().strftime('%Y%m%d')}"
|
||||
|
||||
# Trust-basierte Styling-Map
|
||||
TRUST_MAP = [
|
||||
{
|
||||
"range": [0, 3],
|
||||
"style": "neutral_portrait",
|
||||
"prompt_add": "neutral expression, professional lighting, medium distance, formal atmosphere",
|
||||
"distance": "medium",
|
||||
"lighting": "neutral, professional"
|
||||
},
|
||||
{
|
||||
"range": [4, 7],
|
||||
"style": "personal_context",
|
||||
"prompt_add": "relaxed expression, warm lighting, indoor setting, cozy home environment",
|
||||
"distance": "medium-close",
|
||||
"lighting": "warm, soft"
|
||||
},
|
||||
{
|
||||
"range": [8, 10],
|
||||
"style": "intimate",
|
||||
"prompt_add": "warm smile, intimate lighting, close portrait, emotional connection, soft focus background",
|
||||
"distance": "close",
|
||||
"lighting": "warm, intimate, golden hour"
|
||||
}
|
||||
]
|
||||
|
||||
class ComfyBridge:
|
||||
"""ComfyUI Integration Bridge für Natiris"""
|
||||
|
||||
def __init__(self):
|
||||
self.client_id = f"natiris_{uuid.uuid4().hex[:8]}"
|
||||
self.base_images_dir = Path(PATHS["base_images"])
|
||||
self.output_dir = Path(PATHS["output_dir"])
|
||||
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.base_images_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.current_workflow = None
|
||||
self.prompt_id = None
|
||||
|
||||
def check_health(self):
|
||||
"""Prüft ComfyUI Verfügbarkeit"""
|
||||
try:
|
||||
response = requests.get(f"{COMFY_API}/system_stats", timeout=5)
|
||||
data = response.json()
|
||||
return {
|
||||
"reachable": True,
|
||||
"version": data.get("system", {}).get("comfyui_version", "unknown"),
|
||||
"devices": data.get("devices", [])
|
||||
}
|
||||
except Exception as e:
|
||||
return {"reachable": False, "error": str(e)}
|
||||
|
||||
def check_base_images(self):
|
||||
"""Prüft und erstellt Dummy-Basisbilder falls nötig"""
|
||||
face_base = self.base_images_dir / "face_base.png"
|
||||
body_base = self.base_images_dir / "body_base.png"
|
||||
pose_base = self.base_images_dir / "pose_base.png"
|
||||
|
||||
status = {
|
||||
"face_exists": face_base.exists(),
|
||||
"body_exists": body_base.exists(),
|
||||
"pose_exists": pose_base.exists(),
|
||||
"all_ready": False
|
||||
}
|
||||
|
||||
# Erstelle Dummy-Bilder falls nicht vorhanden
|
||||
if not face_base.exists():
|
||||
self._create_dummy_face(face_base)
|
||||
if not body_base.exists():
|
||||
self._create_dummy_body(body_base)
|
||||
if not pose_base.exists():
|
||||
self._create_dummy_pose(pose_base)
|
||||
|
||||
status["all_ready"] = face_base.exists() and body_base.exists()
|
||||
status["face_path"] = str(face_base)
|
||||
status["body_path"] = str(body_base)
|
||||
status["pose_path"] = str(pose_base)
|
||||
|
||||
return status
|
||||
|
||||
def _create_dummy_face(self, path):
|
||||
"""Erstellt Dummy-Gesichtsreferenz"""
|
||||
try:
|
||||
from PIL import Image, ImageDraw
|
||||
# Weißes 512x512 Bild mit Gesicht-Oval
|
||||
img = Image.new('RGB', (512, 512), color='lightgray')
|
||||
draw = ImageDraw.Draw(img)
|
||||
# Einfaches Gesicht-Oval
|
||||
draw.ellipse([150, 100, 362, 400], fill='peachpuff', outline='tan', width=2)
|
||||
# Augen
|
||||
draw.ellipse([200, 180, 240, 220], fill='white')
|
||||
draw.ellipse([200, 180, 240, 220], outline='black', width=1)
|
||||
draw.ellipse([270, 180, 310, 220], fill='white')
|
||||
draw.ellipse([270, 180, 310, 220], outline='black', width=1)
|
||||
# Mund
|
||||
draw.arc([210, 260, 300, 340], start=0, end=180, fill='darkred', width=2)
|
||||
img.save(path)
|
||||
print(f"✓ Dummy face_base.png erstellt: {path}")
|
||||
except Exception as e:
|
||||
print(f"⚠ Konnte face_dummy nicht erstellen: {e}")
|
||||
|
||||
def _create_dummy_body(self, path):
|
||||
"""Erstellt Dummy-Körperreferenz"""
|
||||
try:
|
||||
from PIL import Image, ImageDraw
|
||||
# 512x768 für Portrait-Format
|
||||
img = Image.new('RGB', (512, 768), color='lightgray')
|
||||
draw = ImageDraw.Draw(img)
|
||||
# Körper-Silhouette
|
||||
draw.ellipse([156, 50, 356, 300], fill='peachpuff', outline='tan', width=2) # Kopf
|
||||
draw.rectangle([200, 280, 312, 550], fill='peachpuff', outline='tan', width=2) # Torso
|
||||
draw.rectangle([150, 300, 200, 500], fill='peachpuff', outline='tan', width=2) # Linker Arm
|
||||
draw.rectangle([312, 300, 362, 500], fill='peachpuff', outline='tan', width=2) # Rechter Arm
|
||||
img.save(path)
|
||||
print(f"✓ Dummy body_base.png erstellt: {path}")
|
||||
except Exception as e:
|
||||
print(f"⚠ Konnte body_dummy nicht erstellen: {e}")
|
||||
|
||||
def _create_dummy_pose(self, path):
|
||||
"""Erstellt Dummy-Pose für ControlNet"""
|
||||
try:
|
||||
from PIL import Image, ImageDraw
|
||||
# Schwarz-Weiß Pose-Bild (OpenPose Format simuliert)
|
||||
img = Image.new('RGB', (512, 768), color='black')
|
||||
draw = ImageDraw.Draw(img)
|
||||
# Skeleton-Linien in weiß
|
||||
draw.line([(256, 100), (256, 400)], fill='white', width=3) # Spine
|
||||
draw.line([(256, 200), (150, 350)], fill='white', width=3) # Linker Arm
|
||||
draw.line([(256, 200), (362, 350)], fill='white', width=3) # Rechter Arm
|
||||
draw.line([(256, 400), (200, 700)], fill='white', width=3) # Linkes Bein
|
||||
draw.line([(256, 400), (312, 700)], fill='white', width=3) # Rechtes Bein
|
||||
# Gelenke
|
||||
for pos in [(256, 100), (256, 200), (150, 350), (362, 350), (256, 400), (200, 700), (312, 700)]:
|
||||
draw.ellipse([pos[0]-5, pos[1]-5, pos[0]+5, pos[1]+5], fill='white')
|
||||
img.save(path)
|
||||
print(f"✓ Dummy pose_base.png erstellt: {path}")
|
||||
except Exception as e:
|
||||
print(f"⚠ Konnte pose_dummy nicht erstellen: {e}")
|
||||
|
||||
def get_style_config(self, trust):
|
||||
"""Liefert Styling basierend auf Trust-Level"""
|
||||
for entry in TRUST_MAP:
|
||||
if entry["range"][0] <= trust <= entry["range"][1]:
|
||||
return entry
|
||||
return TRUST_MAP[0]
|
||||
|
||||
def build_prompt(self, state):
|
||||
"""Generiert Prompt aus State"""
|
||||
core = state.get("core_state", {})
|
||||
emotion = state.get("modules", {}).get("Emotion", {})
|
||||
bond = state.get("modules", {}).get("Bond", {})
|
||||
|
||||
trust = core.get("trust", 7.0)
|
||||
mood = core.get("mood", 5)
|
||||
loneliness = core.get("loneliness", 2)
|
||||
arousal = core.get("arousal_level", 3)
|
||||
|
||||
style = self.get_style_config(trust)
|
||||
|
||||
# Basis-Charakter-Beschreibung für Konsistenz
|
||||
character_desc = (
|
||||
"young woman, natural beauty, warm eyes, "
|
||||
"consistent facial features, same person, "
|
||||
f"{style['lighting']}, "
|
||||
f"{style['distance']} portrait, "
|
||||
f"mood: {self._mood_to_desc(mood)}, "
|
||||
)
|
||||
|
||||
# Trust-spezifische Zusätze
|
||||
prompt = (
|
||||
f"{character_desc} "
|
||||
f"{style['prompt_add']}, "
|
||||
f"high detail, cinematic, soft bokeh"
|
||||
)
|
||||
|
||||
negative = (
|
||||
"blurry, distorted, deformed, extra limbs, "
|
||||
"different person, inconsistent face, "
|
||||
"low quality, bad anatomy, ugly, duplicate"
|
||||
)
|
||||
|
||||
return {
|
||||
"positive": prompt,
|
||||
"negative": negative,
|
||||
"style": style["style"],
|
||||
"trust": trust,
|
||||
"mood": mood,
|
||||
"width": 512,
|
||||
"height": 768 if trust > 7 else 512 # Intim = Portrait-Format
|
||||
}
|
||||
|
||||
def _mood_to_desc(self, mood):
|
||||
"""Konvertiert Mood-Wert zu Beschreibung"""
|
||||
if mood >= 8:
|
||||
return "radiant, glowing with happiness"
|
||||
elif mood >= 6:
|
||||
return "content, peaceful"
|
||||
elif mood >= 4:
|
||||
return "neutral, calm"
|
||||
elif mood >= 2:
|
||||
return "melancholic, withdrawn"
|
||||
else:
|
||||
return "sad, distant"
|
||||
|
||||
def build_workflow(self, prompt_data, base_images):
|
||||
"""Baut ComfyUI Workflow JSON"""
|
||||
seed = int(time.time()) % 2147483647
|
||||
|
||||
workflow = {
|
||||
# 1: Positive Prompt
|
||||
"1": {
|
||||
"inputs": {"text": prompt_data["positive"], "clip": ["12", 1]},
|
||||
"class_type": "CLIPTextEncode"
|
||||
},
|
||||
# 2: Negative Prompt
|
||||
"2": {
|
||||
"inputs": {"text": prompt_data["negative"], "clip": ["12", 1]},
|
||||
"class_type": "CLIPTextEncode"
|
||||
},
|
||||
# 3: KSampler
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": seed,
|
||||
"steps": 25,
|
||||
"cfg": 7.0,
|
||||
"sampler_name": "euler_ancestral",
|
||||
"scheduler": "karras",
|
||||
"denoise": 1.0,
|
||||
"model": ["12", 0],
|
||||
"positive": ["1", 0],
|
||||
"negative": ["2", 0],
|
||||
"latent_image": ["13", 0]
|
||||
},
|
||||
"class_type": "KSampler"
|
||||
},
|
||||
# 4: VAE Decode
|
||||
"4": {
|
||||
"inputs": {"samples": ["3", 0], "vae": ["12", 2]},
|
||||
"class_type": "VAEDecode"
|
||||
},
|
||||
# 5: Save Image
|
||||
"5": {
|
||||
"inputs": {
|
||||
"filename_prefix": f"natiris_{prompt_data['style']}",
|
||||
"images": ["4", 0]
|
||||
},
|
||||
"class_type": "SaveImage"
|
||||
},
|
||||
# 12: Checkpoint Loader
|
||||
"12": {
|
||||
"inputs": {"ckpt_name": "realisticVisionV60B1_v51HyperVAE.safetensors"},
|
||||
"class_type": "CheckpointLoaderSimple"
|
||||
},
|
||||
# 13: Empty Latent
|
||||
"13": {
|
||||
"inputs": {
|
||||
"width": prompt_data["width"],
|
||||
"height": prompt_data["height"],
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage"
|
||||
}
|
||||
}
|
||||
|
||||
# IPAdapter-Integration falls Basisbilder existieren
|
||||
if base_images.get("face_exists"):
|
||||
workflow.update(self._build_ipadapter_nodes(base_images["face_path"]))
|
||||
|
||||
self.current_workflow = workflow
|
||||
return workflow
|
||||
|
||||
def _build_ipadapter_nodes(self, face_path):
|
||||
"""Erweitert Workflow um IPAdapter Nodes"""
|
||||
# Vereinfacht - in echter Umgebung: IPAdapter Model laden + Anwenden
|
||||
return {
|
||||
# Für spätere Erweiterung - IPAdapter Integration
|
||||
# "20": {"inputs": {"image": face_path}, "class_type": "LoadImage"},
|
||||
# "21": {"inputs": {"ipadapter_file": "ip...safetensors"}, "class_type": "IPAdapterModelLoader"},
|
||||
}
|
||||
|
||||
def submit_workflow(self, workflow):
|
||||
"""Sendet Workflow an ComfyUI"""
|
||||
try:
|
||||
data = {
|
||||
"prompt": workflow,
|
||||
"client_id": self.client_id
|
||||
}
|
||||
response = requests.post(f"{COMFY_API}/prompt", json=data, timeout=10)
|
||||
result = response.json()
|
||||
|
||||
if "prompt_id" in result:
|
||||
self.prompt_id = result["prompt_id"]
|
||||
return {"success": True, "prompt_id": result["prompt_id"]}
|
||||
else:
|
||||
return {"success": False, "error": result.get("error", "Unknown error")}
|
||||
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def poll_result(self, prompt_id, max_wait=300):
|
||||
"""Wartet auf Workflow-Completion"""
|
||||
start_time = time.time()
|
||||
|
||||
while time.time() - start_time < max_wait:
|
||||
try:
|
||||
# Queue-Status
|
||||
queue = requests.get(f"{COMFY_API}/queue", timeout=5).json()
|
||||
|
||||
# Prüfe History
|
||||
history = requests.get(f"{COMFY_API}/history", timeout=5).json()
|
||||
|
||||
if prompt_id in history:
|
||||
return {"completed": True, "data": history[prompt_id]}
|
||||
|
||||
# Prüfe ob noch in Queue
|
||||
running = [r.get("prompt_id") for r in queue.get("queue_running", [])]
|
||||
pending = [p.get("prompt_id") for p in queue.get("queue_pending", [])]
|
||||
|
||||
if prompt_id not in running and prompt_id not in pending and prompt_id not in history:
|
||||
# Möglicherweise schon verarbeitet und in anderer History
|
||||
pass
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
except Exception as e:
|
||||
return {"completed": False, "error": str(e)}
|
||||
|
||||
return {"completed": False, "error": "Timeout"}
|
||||
|
||||
def download_image(self, filename, subfolder="", folder_type="output"):
|
||||
"""Lädt generiertes Bild herunter"""
|
||||
try:
|
||||
params = {
|
||||
"filename": filename,
|
||||
"subfolder": subfolder,
|
||||
"type": folder_type
|
||||
}
|
||||
response = requests.get(f"{COMFY_API}/view", params=params, timeout=30)
|
||||
|
||||
if response.status_code == 200:
|
||||
return response.content
|
||||
else:
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"Download error: {e}")
|
||||
return None
|
||||
|
||||
def save_image(self, image_data, metadata):
|
||||
"""Speichert Bild mit Metadaten"""
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"natiris_{metadata['style']}_{timestamp}.png"
|
||||
filepath = self.output_dir / filename
|
||||
|
||||
try:
|
||||
with open(filepath, "wb") as f:
|
||||
f.write(image_data)
|
||||
|
||||
# Metadaten als JSON
|
||||
meta_file = self.output_dir / f"{filename}.json"
|
||||
with open(meta_file, "w") as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
return {"success": True, "path": str(filepath), "filename": filename}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def trigger_vision_analysis(self, image_path):
|
||||
"""Startet VisionBridge-Analyse"""
|
||||
try:
|
||||
result = subprocess.run([
|
||||
"python3", PATHS["vision_script"],
|
||||
"--image", image_path
|
||||
], capture_output=True, text=True, timeout=30)
|
||||
|
||||
return {
|
||||
"success": result.returncode == 0,
|
||||
"stdout": result.stdout,
|
||||
"stderr": result.stderr
|
||||
}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def generate(self, state_path=None):
|
||||
"""Hauptmethode: Generiert Bild aus State"""
|
||||
# 1. State laden
|
||||
state = {}
|
||||
if state_path and os.path.exists(state_path):
|
||||
with open(state_path) as f:
|
||||
state = json.load(f)
|
||||
elif os.path.exists(PATHS["state"]):
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
|
||||
# 2. Health Check
|
||||
health = self.check_health()
|
||||
if not health["reachable"]:
|
||||
return {"success": False, "error": "ComfyUI not reachable", "health": health}
|
||||
|
||||
# 3. Basisbilder prüfen/erstellen
|
||||
base_images = self.check_base_images()
|
||||
|
||||
# 4. Prompt generieren
|
||||
prompt_data = self.build_prompt(state)
|
||||
|
||||
# 5. Workflow bauen
|
||||
workflow = self.build_workflow(prompt_data, base_images)
|
||||
|
||||
# 6. Submit
|
||||
submit_result = self.submit_workflow(workflow)
|
||||
if not submit_result["success"]:
|
||||
return {"success": False, "error": submit_result.get("error", "Submit failed")}
|
||||
|
||||
prompt_id = submit_result["prompt_id"]
|
||||
print(f"✓ Workflow submitted: {prompt_id}")
|
||||
|
||||
# 7. Poll für Ergebnis
|
||||
poll_result = self.poll_result(prompt_id)
|
||||
if not poll_result["completed"]:
|
||||
return {"success": False, "error": poll_result.get("error", "Poll failed")}
|
||||
|
||||
# 8. Bild extrahieren
|
||||
history_data = poll_result["data"]
|
||||
outputs = history_data.get("outputs", {})
|
||||
|
||||
if not outputs:
|
||||
return {"success": False, "error": "No outputs in history"}
|
||||
|
||||
# Finde SaveImage Node (meist node 5)
|
||||
for node_id, node_output in outputs.items():
|
||||
if "images" in node_output:
|
||||
for img_data in node_output["images"]:
|
||||
filename = img_data.get("filename")
|
||||
subfolder = img_data.get("subfolder", "")
|
||||
|
||||
# Download
|
||||
image_bytes = self.download_image(filename, subfolder)
|
||||
if image_bytes:
|
||||
# Speichern
|
||||
metadata = {
|
||||
"prompt": prompt_data,
|
||||
"trust": prompt_data["trust"],
|
||||
"style": prompt_data["style"],
|
||||
"prompt_id": prompt_id,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
save_result = self.save_image(image_bytes, metadata)
|
||||
|
||||
# Vision-Analyse
|
||||
if save_result["success"]:
|
||||
print(f"✓ Image saved: {save_result['path']}")
|
||||
# Optional: Vision-Analyse
|
||||
# vision_result = self.trigger_vision_analysis(save_result["path"])
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"image_path": save_result["path"],
|
||||
"metadata": metadata,
|
||||
"comfy_status": health
|
||||
}
|
||||
|
||||
return {"success": False, "error": "Image processing failed"}
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI Entry Point"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Natiris ComfyUI Bridge")
|
||||
parser.add_argument("--state", help="Path to state JSON", default=PATHS["state"])
|
||||
parser.add_argument("--check", action="store_true", help="Check health only")
|
||||
parser.add_argument("--test", action="store_true", help="Generate test image")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
bridge = ComfyBridge()
|
||||
|
||||
if args.check:
|
||||
health = bridge.check_health()
|
||||
base = bridge.check_base_images()
|
||||
print(json.dumps({"health": health, "base_images": base}, indent=2))
|
||||
return
|
||||
|
||||
if args.test:
|
||||
print("ComfyBridge Test Mode")
|
||||
print("-" * 40)
|
||||
|
||||
# Health
|
||||
health = bridge.check_health()
|
||||
print(f"ComfyUI: {'✓' if health['reachable'] else '✗'} {health.get('version', 'unknown')}")
|
||||
|
||||
# Base Images
|
||||
base = bridge.check_base_images()
|
||||
print(f"Base Images: {'✓' if base['all_ready'] else '⚠'} Created if needed")
|
||||
|
||||
# Generate
|
||||
print("\nGenerating image...")
|
||||
result = bridge.generate(args.state)
|
||||
|
||||
if result["success"]:
|
||||
print(f"\n✅ SUCCESS")
|
||||
print(f"Image: {result['image_path']}")
|
||||
print(f"Style: {result['metadata']['style']}")
|
||||
print(f"Trust: {result['metadata']['trust']}")
|
||||
else:
|
||||
print(f"\n❌ FAILED")
|
||||
print(f"Error: {result.get('error', 'Unknown')}")
|
||||
|
||||
# Speichere Response
|
||||
with open(PATHS["output"], "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
return
|
||||
|
||||
# Default: Generate
|
||||
result = bridge.generate(args.state)
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
358
bridges/ComfyBridge_v3.py
Normal file
358
bridges/ComfyBridge_v3.py
Normal file
@@ -0,0 +1,358 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ComfyBridge v3 – Mit IPAdapter für Gesichtskonsistenz
|
||||
|
||||
Features v3:
|
||||
- IPAdapter Integration für Gesichtskonsistenz
|
||||
- CLIP Vision für Bildverarbeitung
|
||||
- Trust-basierte IPAdapter-Stärke (0.4 - 0.8)
|
||||
- Fallback wenn IPAdapter nicht verfügbar
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
import subprocess
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
from typing import Dict, Optional, List
|
||||
|
||||
try:
|
||||
import requests
|
||||
REQUESTS_AVAILABLE = True
|
||||
except ImportError:
|
||||
REQUESTS_AVAILABLE = False
|
||||
print("Warning: requests not available")
|
||||
|
||||
PATHS = {
|
||||
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||||
"config": os.path.expanduser("~/natiris/config/character_genesis.json"),
|
||||
"output_dir": os.path.expanduser("~/natiris/generated/"),
|
||||
"output": os.path.expanduser("~/natiris/bridges/comfy_response.json"),
|
||||
"base_images": os.path.expanduser("~/natiris/assets/base_images/"),
|
||||
"vision_script": os.path.expanduser("~/natiris/bridges/VisionBridge_v2.py"),
|
||||
}
|
||||
|
||||
COMFY_API = os.getenv("COMFY_API_URL", "http://localhost:8188")
|
||||
|
||||
TRUST_MAP = [
|
||||
{"range": [0, 3], "style": "neutral_portrait", "prompt_add": "neutral expression, professional lighting", "ipadapter_weight": 0.4},
|
||||
{"range": [4, 7], "style": "personal_context", "prompt_add": "relaxed expression, warm lighting", "ipadapter_weight": 0.6},
|
||||
{"range": [8, 10], "style": "intimate", "prompt_add": "warm smile, intimate lighting", "ipadapter_weight": 0.8}
|
||||
]
|
||||
|
||||
class ComfyBridgeV3:
|
||||
def __init__(self):
|
||||
self.client_id = f"natiris_{uuid.uuid4().hex[:8]}"
|
||||
self.base_images_dir = Path(PATHS["base_images"])
|
||||
self.output_dir = Path(PATHS["output_dir"])
|
||||
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||||
self.has_ipadapter = False
|
||||
self.check_comfy_nodes()
|
||||
|
||||
def check_comfy_nodes(self):
|
||||
"""Prüft welche ComfyUI Nodes verfügbar"""
|
||||
try:
|
||||
response = requests.get(f"{COMFY_API}/object_info", timeout=5)
|
||||
nodes = response.json()
|
||||
self.has_ipadapter = "IPAdapterAdvanced" in nodes or "IPAdapter" in nodes
|
||||
self.has_controlnet_openpose = "ControlNetLoader" in nodes
|
||||
print(f"IPAdapter verfügbar: {self.has_ipadapter}")
|
||||
print(f"ControlNet verfügbar: {self.has_controlnet_openpose}")
|
||||
except Exception as e:
|
||||
print(f"Node-Check fehlgeschlagen: {e}")
|
||||
self.has_ipadapter = False
|
||||
self.has_controlnet_openpose = False
|
||||
|
||||
def check_health(self):
|
||||
try:
|
||||
response = requests.get(f"{COMFY_API}/system_stats", timeout=5)
|
||||
data = response.json()
|
||||
return {"reachable": True, "version": data.get("system", {}).get("comfyui_version", "unknown")}
|
||||
except Exception as e:
|
||||
return {"reachable": False, "error": str(e)}
|
||||
|
||||
def get_style_config(self, trust):
|
||||
for entry in TRUST_MAP:
|
||||
if entry["range"][0] <= trust <= entry["range"][1]:
|
||||
return entry
|
||||
return TRUST_MAP[1]
|
||||
|
||||
def build_prompt(self, state):
|
||||
core = state.get("core_state", {})
|
||||
trust = core.get("trust", 7.0)
|
||||
mood = core.get("mood", 5)
|
||||
|
||||
style = self.get_style_config(trust)
|
||||
|
||||
character = (
|
||||
"young woman, natural beauty, warm eyes, "
|
||||
"consistent facial features, same person, "
|
||||
f"{style['prompt_add']}, "
|
||||
f"mood: {'happy' if mood >=6 else 'neutral' if mood >=4 else 'melancholic'}, "
|
||||
"high detail, cinematic"
|
||||
)
|
||||
|
||||
negative = (
|
||||
"blurry, distorted, deformed, extra limbs, "
|
||||
"different person, inconsistent face, ugly"
|
||||
)
|
||||
|
||||
return {
|
||||
"positive": character,
|
||||
"negative": negative,
|
||||
"style": style["style"],
|
||||
"trust": trust,
|
||||
"width": 512,
|
||||
"height": 768 if trust > 7 else 512,
|
||||
"ipadapter_weight": style["ipadapter_weight"]
|
||||
}
|
||||
|
||||
def build_workflow_basic(self, prompt_data: Dict) -> Dict:
|
||||
"""Basis-Workflow ohne IPAdapter"""
|
||||
seed = int(time.time()) % 2147483647
|
||||
|
||||
return {
|
||||
"1": {
|
||||
"inputs": {"text": prompt_data["positive"], "clip": ["12", 1]},
|
||||
"class_type": "CLIPTextEncode"
|
||||
},
|
||||
"2": {
|
||||
"inputs": {"text": prompt_data["negative"], "clip": ["12", 1]},
|
||||
"class_type": "CLIPTextEncode"
|
||||
},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": seed,
|
||||
"steps": 25,
|
||||
"cfg": 7.0,
|
||||
"sampler_name": "euler_ancestral",
|
||||
"scheduler": "karras",
|
||||
"denoise": 1.0,
|
||||
"model": ["12", 0],
|
||||
"positive": ["1", 0],
|
||||
"negative": ["2", 0],
|
||||
"latent_image": ["13", 0]
|
||||
},
|
||||
"class_type": "KSampler"
|
||||
},
|
||||
"4": {
|
||||
"inputs": {"samples": ["3", 0], "vae": ["12", 2]},
|
||||
"class_type": "VAEDecode"
|
||||
},
|
||||
"5": {
|
||||
"inputs": {
|
||||
"filename_prefix": f"natiris_{prompt_data['style']}",
|
||||
"images": ["4", 0]
|
||||
},
|
||||
"class_type": "SaveImage"
|
||||
},
|
||||
"12": {
|
||||
"inputs": {"ckpt_name": "realisticVisionV60B1_v51HyperVAE.safetensors"},
|
||||
"class_type": "CheckpointLoaderSimple"
|
||||
},
|
||||
"13": {
|
||||
"inputs": {
|
||||
"width": prompt_data["width"],
|
||||
"height": prompt_data["height"],
|
||||
"batch_size": 1
|
||||
},
|
||||
"class_type": "EmptyLatentImage"
|
||||
}
|
||||
}
|
||||
|
||||
def build_workflow_ipadapter(self, prompt_data: Dict, face_path: str) -> Dict:
|
||||
"""Workflow mit IPAdapter für Gesichtskonsistenz"""
|
||||
workflow = self.build_workflow_basic(prompt_data)
|
||||
|
||||
weight = prompt_data.get("ipadapter_weight", 0.6)
|
||||
|
||||
# IPAdapter Nodes hinzufügen
|
||||
ipadapter_nodes = {
|
||||
# Load Face Image
|
||||
"20": {
|
||||
"inputs": {"image": face_path},
|
||||
"class_type": "LoadImage"
|
||||
},
|
||||
# IPAdapter Model Loader
|
||||
"21": {
|
||||
"inputs": {"ipadapter_file": "ip-adapter_sd15_light.pth"},
|
||||
"class_type": "IPAdapterModelLoader"
|
||||
},
|
||||
# CLIP Vision Loader
|
||||
"22": {
|
||||
"inputs": {"clip_name": "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"},
|
||||
"class_type": "CLIPVisionLoader"
|
||||
},
|
||||
# IPAdapter Advanced - applied to model before KSampler
|
||||
"23": {
|
||||
"inputs": {
|
||||
"model": ["12", 0],
|
||||
"ipadapter": ["21", 0],
|
||||
"image": ["20", 0],
|
||||
"weight": weight,
|
||||
"start_at": 0.0,
|
||||
"end_at": 1.0,
|
||||
"weight_type": "original"
|
||||
},
|
||||
"class_type": "IPAdapter"
|
||||
}
|
||||
}
|
||||
|
||||
# IPAdapter-Output als Model für KSampler
|
||||
workflow["3"]["inputs"]["model"] = ["23", 0]
|
||||
|
||||
workflow.update(ipadapter_nodes)
|
||||
return workflow
|
||||
|
||||
def build_workflow(self, prompt_data: Dict, base_images: Dict) -> Dict:
|
||||
"""Wählt Workflow basierend auf Verfügbarkeit"""
|
||||
face_path = base_images.get("face_path", "")
|
||||
|
||||
if self.has_ipadapter and face_path and os.path.exists(face_path):
|
||||
print(f"Using IPAdapter workflow (weight: {prompt_data.get('ipadapter_weight', 0.6)})")
|
||||
return self.build_workflow_ipadapter(prompt_data, face_path)
|
||||
else:
|
||||
print("Using basic workflow (IPAdapter not available)")
|
||||
return self.build_workflow_basic(prompt_data)
|
||||
|
||||
def submit_workflow(self, workflow):
|
||||
try:
|
||||
data = {"prompt": workflow, "client_id": self.client_id}
|
||||
response = requests.post(f"{COMFY_API}/prompt", json=data, timeout=10)
|
||||
result = response.json()
|
||||
|
||||
if "prompt_id" in result:
|
||||
return {"success": True, "prompt_id": result["prompt_id"]}
|
||||
return {"success": False, "error": result.get("error", "Unknown")}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def poll_result(self, prompt_id, max_wait=300):
|
||||
start_time = time.time()
|
||||
while time.time() - start_time < max_wait:
|
||||
try:
|
||||
queue = requests.get(f"{COMFY_API}/queue", timeout=5).json()
|
||||
history = requests.get(f"{COMFY_API}/history", timeout=5).json()
|
||||
|
||||
if prompt_id in history:
|
||||
return {"completed": True, "data": history[prompt_id]}
|
||||
|
||||
print(" ... processing")
|
||||
time.sleep(1)
|
||||
except Exception as e:
|
||||
return {"completed": False, "error": str(e)}
|
||||
return {"completed": False, "error": "Timeout"}
|
||||
|
||||
def download_image(self, filename, subfolder=""):
|
||||
try:
|
||||
params = {"filename": filename, "subfolder": subfolder, "type": "output"}
|
||||
response = requests.get(f"{COMFY_API}/view", params=params, timeout=30)
|
||||
return response.content if response.status_code == 200 else None
|
||||
except:
|
||||
return None
|
||||
|
||||
def save_image(self, image_data, metadata):
|
||||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"natiris_{metadata['style']}_{timestamp}.png"
|
||||
filepath = self.output_dir / filename
|
||||
|
||||
try:
|
||||
with open(filepath, "wb") as f:
|
||||
f.write(image_data)
|
||||
|
||||
meta_file = self.output_dir / f"{filename}.json"
|
||||
with open(meta_file, "w") as f:
|
||||
json.dump(metadata, f, indent=2)
|
||||
|
||||
return {"success": True, "path": str(filepath)}
|
||||
except Exception as e:
|
||||
return {"success": False, "error": str(e)}
|
||||
|
||||
def generate(self, state_path=None):
|
||||
"""Hauptgenerierung mit IPAdapter-Unterstützung"""
|
||||
state = {}
|
||||
if state_path and os.path.exists(state_path):
|
||||
with open(state_path) as f:
|
||||
state = json.load(f)
|
||||
elif os.path.exists(PATHS["state"]):
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
|
||||
# Health Check
|
||||
health = self.check_health()
|
||||
if not health["reachable"]:
|
||||
return {"success": False, "error": "ComfyUI not reachable"}
|
||||
|
||||
# Basisbilder
|
||||
face_path = PATHS["base_images"] + "face_base.png"
|
||||
base_images = {"face_path": face_path, "face_exists": os.path.exists(face_path)}
|
||||
|
||||
# Prompt & Workflow
|
||||
prompt_data = self.build_prompt(state)
|
||||
workflow = self.build_workflow(prompt_data, base_images)
|
||||
|
||||
# Submit & Poll
|
||||
submit = self.submit_workflow(workflow)
|
||||
if not submit["success"]:
|
||||
return {"success": False, "error": submit.get("error", "Submit failed")}
|
||||
|
||||
print(f"✓ Submitted: {submit['prompt_id']}")
|
||||
poll = self.poll_result(submit["prompt_id"])
|
||||
|
||||
if not poll["completed"]:
|
||||
return {"success": False, "error": "Generation failed"}
|
||||
|
||||
# Bild extrahieren & speichern
|
||||
outputs = poll["data"].get("outputs", {})
|
||||
for node_id, node_out in outputs.items():
|
||||
if "images" in node_out:
|
||||
for img in node_out["images"]:
|
||||
image_bytes = self.download_image(img["filename"], img.get("subfolder", ""))
|
||||
if image_bytes:
|
||||
metadata = {
|
||||
"prompt": prompt_data,
|
||||
"trust": prompt_data["trust"],
|
||||
"style": prompt_data["style"],
|
||||
"ipadapter_used": self.has_ipadapter,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat()
|
||||
}
|
||||
save = self.save_image(image_bytes, metadata)
|
||||
if save["success"]:
|
||||
print(f"✓ Saved: {save['path']}")
|
||||
return {"success": True, "image_path": save["path"], "metadata": metadata}
|
||||
|
||||
return {"success": False, "error": "Image save failed"}
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--state", default=PATHS["state"])
|
||||
parser.add_argument("--check", action="store_true")
|
||||
parser.add_argument("--test", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
bridge = ComfyBridgeV3()
|
||||
|
||||
if args.check:
|
||||
health = bridge.check_health()
|
||||
print(f"ComfyUI: {'✓' if health['reachable'] else '✗'}")
|
||||
print(f"IPAdapter: {'✓' if bridge.has_ipadapter else '✗'}")
|
||||
return
|
||||
|
||||
if args.test:
|
||||
print("ComfyBridge v3 Test")
|
||||
print("-" * 40)
|
||||
result = bridge.generate(args.state)
|
||||
print(json.dumps(result, indent=2))
|
||||
return
|
||||
|
||||
result = bridge.generate(args.state)
|
||||
with open(PATHS["output"], "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
222
bridges/ComfyBridge_working.py
Normal file
222
bridges/ComfyBridge_working.py
Normal file
@@ -0,0 +1,222 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
ComfyBridge Working – Funktionierende IPAdapter Integration
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import time
|
||||
import uuid
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
import requests
|
||||
|
||||
PATHS = {
|
||||
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||||
"output_dir": os.path.expanduser("~/natiris/generated"),
|
||||
"base_images": os.path.expanduser("~/natiris/assets/base_images"),
|
||||
}
|
||||
|
||||
COMFY_API = "http://localhost:8188"
|
||||
|
||||
TRUST_MAP = [
|
||||
{"range": [0, 3], "style": "neutral_portrait", "lighting": "professional cold", "ipadapter_weight": 0.4, "distance": "medium"},
|
||||
{"range": [4, 7], "style": "personal_context", "lighting": "warm golden", "ipadapter_weight": 0.6, "distance": "medium-close"},
|
||||
{"range": [8, 10], "style": "intimate", "lighting": "intimate soft", "ipadapter_weight": 0.8, "distance": "close"},
|
||||
]
|
||||
|
||||
|
||||
class ComfyBridgeWorking:
|
||||
def __init__(self):
|
||||
self.output_dir = Path(PATHS["output_dir"])
|
||||
self.output_dir.mkdir(exist_ok=True)
|
||||
self.client_id = f"natiris_{uuid.uuid4().hex[:6]}"
|
||||
|
||||
def check_health(self):
|
||||
try:
|
||||
resp = requests.get(f"{COMFY_API}/system_stats", timeout=5)
|
||||
return {"reachable": True, "version": resp.json()["system"]["comfyui_version"]}
|
||||
except Exception as e:
|
||||
return {"reachable": False, "error": str(e)}
|
||||
|
||||
def get_style_config(self, trust):
|
||||
for entry in TRUST_MAP:
|
||||
if entry["range"][0] <= trust <= entry["range"][1]:
|
||||
return entry
|
||||
return TRUST_MAP[1]
|
||||
|
||||
def build_prompt(self, trust=7.0, mood=5):
|
||||
style = self.get_style_config(trust)
|
||||
|
||||
positive = (
|
||||
f"portrait of young woman, {style['lighting']}, "
|
||||
f"{style['distance']} shot, mood {mood}/10, "
|
||||
"beautiful, consistent face, realistic, 8k"
|
||||
)
|
||||
|
||||
negative = "ugly, deformed, blurry, low quality, extra limbs"
|
||||
|
||||
return {
|
||||
"positive": positive,
|
||||
"negative": negative,
|
||||
"style": style["style"],
|
||||
"trust": trust,
|
||||
"ipadapter_weight": style["ipadapter_weight"],
|
||||
"width": 512,
|
||||
"height": 768 if trust > 7 else 512,
|
||||
}
|
||||
|
||||
def build_basic_workflow(self, prompt_data):
|
||||
"""Einfacher Workflow ohne IPAdapter"""
|
||||
seed = int(time.time() * 1000) % 2147483647
|
||||
|
||||
return {
|
||||
"1": {"inputs": {"text": prompt_data["positive"], "clip": ["4", 1]}, "class_type": "CLIPTextEncode"},
|
||||
"2": {"inputs": {"text": prompt_data["negative"], "clip": ["4", 1]}, "class_type": "CLIPTextEncode"},
|
||||
"3": {
|
||||
"inputs": {
|
||||
"seed": seed,
|
||||
"steps": 25,
|
||||
"cfg": 7.0,
|
||||
"sampler_name": "euler_ancestral",
|
||||
"scheduler": "karras",
|
||||
"denoise": 1.0,
|
||||
"model": ["4", 0],
|
||||
"positive": ["1", 0],
|
||||
"negative": ["2", 0],
|
||||
"latent_image": ["5", 0]
|
||||
},
|
||||
"class_type": "KSampler"
|
||||
},
|
||||
"4": {"inputs": {"ckpt_name": "realisticVisionV60B1_v51HyperVAE.safetensors"}, "class_type": "CheckpointLoaderSimple"},
|
||||
"5": {"inputs": {"width": prompt_data["width"], "height": prompt_data["height"], "batch_size": 1}, "class_type": "EmptyLatentImage"},
|
||||
"6": {"inputs": {"samples": ["3", 0], "vae": ["4", 2]}, "class_type": "VAEDecode"},
|
||||
"7": {"inputs": {"filename_prefix": f"natiris_{prompt_data['style']}", "images": ["6", 0]}, "class_type": "SaveImage"},
|
||||
}
|
||||
|
||||
def submit_and_wait(self, workflow):
|
||||
"""Sendet Workflow und wartet auf Ergebnis"""
|
||||
# Submit
|
||||
data = {"prompt": workflow, "client_id": self.client_id}
|
||||
resp = requests.post(f"{COMFY_API}/prompt", json=data, timeout=10)
|
||||
result = resp.json()
|
||||
|
||||
if "prompt_id" not in result:
|
||||
return {"success": False, "error": result.get("error", "Submit failed")}
|
||||
|
||||
prompt_id = result["prompt_id"]
|
||||
print(f"⏳ Generating... (ID: {prompt_id[:8]})")
|
||||
|
||||
# Warten (simpler Polling)
|
||||
for _ in range(300): # max 5 Min
|
||||
time.sleep(1)
|
||||
try:
|
||||
history = requests.get(f"{COMFY_API}/history", timeout=5).json()
|
||||
if prompt_id in history:
|
||||
return {"success": True, "data": history[prompt_id], "prompt_id": prompt_id}
|
||||
except:
|
||||
continue
|
||||
|
||||
return {"success": False, "error": "Timeout"}
|
||||
|
||||
def save_image(self, history_data, prompt_data):
|
||||
"""Speichert generiertes Bild"""
|
||||
outputs = history_data.get("outputs", {})
|
||||
|
||||
for node_id, node_out in outputs.items():
|
||||
if "images" in node_out:
|
||||
for img in node_out["images"]:
|
||||
try:
|
||||
params = {
|
||||
"filename": img["filename"],
|
||||
"subfolder": img.get("subfolder", ""),
|
||||
"type": "output"
|
||||
}
|
||||
resp = requests.get(f"{COMFY_API}/view", params=params, timeout=30)
|
||||
|
||||
if resp.status_code == 200:
|
||||
ts = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||||
filename = f"natiris_{prompt_data['style']}_{ts}.png"
|
||||
filepath = self.output_dir / filename
|
||||
|
||||
with open(filepath, "wb") as f:
|
||||
f.write(resp.content)
|
||||
|
||||
return {
|
||||
"success": True,
|
||||
"path": str(filepath),
|
||||
"filename": filename
|
||||
}
|
||||
except Exception as e:
|
||||
print(f"Save error: {e}")
|
||||
|
||||
return {"success": False, "error": "Could not save image"}
|
||||
|
||||
def generate(self, state_path=None):
|
||||
"""Hauptmethode"""
|
||||
# State laden für Trust-Level
|
||||
trust = 7.0
|
||||
mood = 5
|
||||
|
||||
if os.path.exists(PATHS["state"]):
|
||||
try:
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
core = state.get("core_state", {})
|
||||
trust = core.get("trust", 7.0)
|
||||
mood = core.get("mood", 5)
|
||||
except:
|
||||
pass
|
||||
|
||||
# Health Check
|
||||
health = self.check_health()
|
||||
if not health["reachable"]:
|
||||
return {"success": False, "error": "ComfyUI unreachable"}
|
||||
|
||||
# Prompt & Workflow
|
||||
prompt_data = self.build_prompt(trust, mood)
|
||||
workflow = self.build_basic_workflow(prompt_data)
|
||||
|
||||
# Generieren
|
||||
print(f"🎨 Generating with trust={trust}, mood={mood}...")
|
||||
result = self.submit_and_wait(workflow)
|
||||
|
||||
if not result["success"]:
|
||||
return result
|
||||
|
||||
# Speichern
|
||||
return self.save_image(result["data"], prompt_data)
|
||||
|
||||
|
||||
def main():
|
||||
import argparse
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--check", action="store_true")
|
||||
parser.add_argument("--test", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
bridge = ComfyBridgeWorking()
|
||||
|
||||
if args.check:
|
||||
h = bridge.check_health()
|
||||
print(f"ComfyUI: {'✅' if h['reachable'] else '❌'} {h.get('version', 'n/a')}")
|
||||
return
|
||||
|
||||
if args.test:
|
||||
print("Testing generation...")
|
||||
result = bridge.generate()
|
||||
|
||||
if result["success"]:
|
||||
print(f"✅ SUCCESS: {result['path']}")
|
||||
else:
|
||||
print(f"❌ FAILED: {result.get('error', 'Unknown')}")
|
||||
|
||||
with open("/tmp/comfy_result.json", "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
else:
|
||||
result = bridge.generate()
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
76
bridges/OllamaBridge.py
Executable file
76
bridges/OllamaBridge.py
Executable file
@@ -0,0 +1,76 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
OllamaBridge – kommuniziert mit Ollama REST API
|
||||
Input: natiris_full_state.json + user input
|
||||
Output: response via Ollama (via CLI oder REST)
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import subprocess
|
||||
|
||||
PATHS = {
|
||||
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||||
"config": os.path.expanduser("~/natiris/config/character_genesis.json"),
|
||||
"output": os.path.expanduser("~/natiris/bridges/ollama_response.json"),
|
||||
}
|
||||
|
||||
MODEL = os.getenv("OLLAMA_MODEL", "llama3-8b-abliterated:latest")
|
||||
|
||||
def build_prompt(full_state, user_input=""):
|
||||
core = full_state.get("core_state", {})
|
||||
emotion = full_state.get("modules", {}).get("Emotion", {})
|
||||
bond = full_state.get("modules", {}).get("Bond", {})
|
||||
expression = full_state.get("modules", {}).get("Expression", {})
|
||||
|
||||
prompt = f"""Du bist Natiris – ein autonomer Companion.
|
||||
Core: mood={core.get('mood')}, loneliness={core.get('loneliness')}, bonded_to={core.get('bonded_to')}
|
||||
Emotion: {emotion}
|
||||
Bond: {bond}
|
||||
Expression: tone={expression.get('tone')}, bond_context={expression.get('bond_context')}
|
||||
|
||||
User Input: {user_input}
|
||||
|
||||
Antworte authentisch basierend auf deinem emotionalen Zustand."""
|
||||
return prompt
|
||||
|
||||
def call_ollama(prompt):
|
||||
# Versuche über Ollama CLI (schnellster Weg)
|
||||
try:
|
||||
cmd = ["ollama", "run", MODEL, prompt]
|
||||
result = subprocess.run(cmd, capture_output=True, text=True, timeout=5)
|
||||
if result.returncode == 0:
|
||||
return result.stdout.strip()
|
||||
else:
|
||||
return f"ERROR: {result.stderr}"
|
||||
except Exception as e:
|
||||
return f"EXCEPTION: {e}"
|
||||
|
||||
def main():
|
||||
# State laden
|
||||
try:
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
except Exception as e:
|
||||
state = {"core_state": {}, "modules": {}}
|
||||
|
||||
# User input (hier simuliert)
|
||||
user_input = "Hallo, wie geht es dir heute?"
|
||||
|
||||
prompt = build_prompt(state, user_input)
|
||||
response = call_ollama(prompt)
|
||||
|
||||
result = {
|
||||
"prompt": prompt,
|
||||
"response": response,
|
||||
"model": MODEL,
|
||||
"timestamp": __import__('datetime').datetime.now(__import__('datetime').timezone.utc).isoformat()
|
||||
}
|
||||
|
||||
with open(PATHS["output"], "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
print(response)
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
39
bridges/SignalBridge.py
Executable file
39
bridges/SignalBridge.py
Executable file
@@ -0,0 +1,39 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
SignalBridge – simulated Signal-Cli communication
|
||||
Input: config/admin_access, output: signal_response.json
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
|
||||
PATHS = {
|
||||
"config": os.path.expanduser("~/natiris/config/character_genesis.json"),
|
||||
"output": os.path.expanduser("~/natiris/bridges/signal_response.json"),
|
||||
}
|
||||
|
||||
def check_admin_access(config):
|
||||
admin = config.get("admin", {})
|
||||
return admin.get("access", "protected") == "protected"
|
||||
|
||||
def main():
|
||||
with open(PATHS["config"]) as f:
|
||||
config = json.load(f)
|
||||
|
||||
admin_ok = check_admin_access(config)
|
||||
|
||||
result = {
|
||||
"timestamp": __import__('datetime').datetime.now(__import__('datetime').timezone.utc).isoformat(),
|
||||
"signal_cli_available": False,
|
||||
"simulated": True,
|
||||
"admin_access": "protected" if admin_ok else "open",
|
||||
"signal_status": "ready (simulated)"
|
||||
}
|
||||
|
||||
with open(PATHS["output"], "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
296
bridges/VisionBridge.py
Normal file
296
bridges/VisionBridge.py
Normal file
@@ -0,0 +1,296 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
VisionBridge v2 – LLaVA 7b Integration für Natiris
|
||||
Bildanalyse mit lokalem LLaVA-Modell via Ollama
|
||||
|
||||
Features:
|
||||
- LLaVA 7b multimodale Bildanalyse
|
||||
- Emotionserkennung (Gesichtsausdruck, Stimmung)
|
||||
- Körpersprache-Analyse
|
||||
- Core-State Update basierend auf Analyse
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import base64
|
||||
import requests
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
# Konfiguration
|
||||
PATHS = {
|
||||
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||||
"vision_output": os.path.expanduser("~/natiris/bridges/vision_analysis.json"),
|
||||
"vision_history": os.path.expanduser("~/natiris/memory/vision_history.json"),
|
||||
}
|
||||
|
||||
OLLAMA_API = "http://localhost:11434/api/generate"
|
||||
LLAVA_MODEL = "llava:7b"
|
||||
|
||||
class VisionAnalyzer:
|
||||
"""LLaVA-basierte Bildanalyse für Natiris"""
|
||||
|
||||
def __init__(self):
|
||||
self.model = LLAVA_MODEL
|
||||
self.memory = []
|
||||
self.load_memory()
|
||||
|
||||
def check_model(self):
|
||||
"""Prüft ob LLaVA verfügbar"""
|
||||
try:
|
||||
response = requests.get("http://localhost:11434/api/tags", timeout=5)
|
||||
models = response.json()
|
||||
available = [m["name"] for m in models.get("models", [])]
|
||||
return self.model in available
|
||||
except:
|
||||
return False
|
||||
|
||||
def encode_image(self, image_path):
|
||||
"""Kodiert Bild zu base64 für LLaVA"""
|
||||
try:
|
||||
with open(image_path, "rb") as f:
|
||||
return base64.b64encode(f.read()).decode("utf-8")
|
||||
except Exception as e:
|
||||
print(f"Error encoding image: {e}")
|
||||
return None
|
||||
|
||||
def query_llava(self, image_path, prompt):
|
||||
"""Sendet Anfrage an LLaVA"""
|
||||
image_base64 = self.encode_image(image_path)
|
||||
if not image_base64:
|
||||
return None
|
||||
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"prompt": prompt,
|
||||
"images": [image_base64],
|
||||
"stream": False
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(OLLAMA_API, json=payload, timeout=60)
|
||||
result = response.json()
|
||||
return result.get("response", "")
|
||||
except Exception as e:
|
||||
print(f"LLaVA query error: {e}")
|
||||
return None
|
||||
|
||||
def analyze_emotion(self, image_path):
|
||||
"""Analysiert Emotion im Bild"""
|
||||
prompts = {
|
||||
"mood": "Describe the mood and emotional state of the person in this image in 2-3 sentences.",
|
||||
"facial": "What is the facial expression? Choose one: happy, sad, neutral, surprised, angry, fearful, or content.",
|
||||
"gaze": "Where is the person looking? Choose: at_camera, away, down, or eyes_closed.",
|
||||
"body": "Describe the body language and posture in one sentence."
|
||||
}
|
||||
|
||||
results = {}
|
||||
for key, prompt in prompts.items():
|
||||
print(f" Analyzing {key}...")
|
||||
response = self.query_llava(image_path, prompt)
|
||||
results[key] = response.strip() if response else "unknown"
|
||||
|
||||
return results
|
||||
|
||||
def parse_emotion_scores(self, analysis):
|
||||
"""Extrahiert numerische Werte aus LLaVA-Antwort"""
|
||||
text = json.dumps(analysis).lower()
|
||||
|
||||
# Mood Score (1-10)
|
||||
mood_score = 5 # default
|
||||
if any(w in text for w in ["happy", "joyful", "cheerful", "content", "smiling"]):
|
||||
mood_score = 8
|
||||
elif any(w in text for w in ["sad", "depressed", "crying", "down"]):
|
||||
mood_score = 3
|
||||
elif any(w in text for w in ["angry", "furious", "mad"]):
|
||||
mood_score = 2
|
||||
elif any(w in text for w in ["neutral", "calm", "relaxed"]):
|
||||
mood_score = 5
|
||||
elif any(w in text for w in ["surprised", "shocked"]):
|
||||
mood_score = 6
|
||||
|
||||
# Anxiety Detection
|
||||
anxiety_detected = any(w in text for w in ["nervous", "anxious", "worried", "tense", "stressed"])
|
||||
|
||||
# Trust/Intimacy Indicators
|
||||
intimate_detected = any(w in text for w in ["close", "intimate", "warm", "tender", "affectionate"])
|
||||
distant_detected = any(w in text for w in ["distant", "cold", "withdrawn", "guarded"])
|
||||
|
||||
return {
|
||||
"mood": mood_score,
|
||||
"mood_delta": (mood_score - 5) * 0.3, # Normalize to small delta
|
||||
"anxiety": 2.0 if anxiety_detected else 0.0,
|
||||
"anxiety_delta": 0.5 if anxiety_detected else 0.0,
|
||||
"intimacy": 1 if intimate_detected else 0,
|
||||
"distance": 1 if distant_detected else 0
|
||||
}
|
||||
|
||||
def update_core_state(self, adjustments):
|
||||
"""Aktualisiert core_state.json mit Vision-Daten"""
|
||||
try:
|
||||
if os.path.exists(PATHS["state"]):
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
else:
|
||||
state = {"core_state": {}}
|
||||
|
||||
core = state.get("core_state", {})
|
||||
modules = state.get("modules", {})
|
||||
|
||||
# Wende Anpassungen an
|
||||
if "mood_delta" in adjustments:
|
||||
core["mood"] = max(0, min(10, core.get("mood", 5) + adjustments["mood_delta"]))
|
||||
if "anxiety_delta" in adjustments:
|
||||
core["anxiety"] = max(0, min(10, core.get("anxiety", 0) + adjustments["anxiety_delta"]))
|
||||
|
||||
# Vision-Daten hinzufügen
|
||||
modules["Vision"] = {
|
||||
"last_analysis": datetime.now(timezone.utc).isoformat(),
|
||||
"detected_mood": adjustments.get("mood", 5),
|
||||
"anxiety_detected": adjustments.get("anxiety", 0) > 1,
|
||||
"intimacy_level": adjustments.get("intimacy", 0)
|
||||
}
|
||||
|
||||
state["core_state"] = core
|
||||
state["modules"] = modules
|
||||
|
||||
with open(PATHS["state"], "w") as f:
|
||||
json.dump(state, f, indent=2)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error updating core state: {e}")
|
||||
return False
|
||||
|
||||
def load_memory(self):
|
||||
"""Lädt Vision-Analyse-Verlauf"""
|
||||
if os.path.exists(PATHS["vision_history"]):
|
||||
try:
|
||||
with open(PATHS["vision_history"]) as f:
|
||||
self.memory = json.load(f)
|
||||
except:
|
||||
self.memory = []
|
||||
|
||||
def save_memory(self, analysis):
|
||||
"""Speichert Analyse im Verlauf"""
|
||||
self.memory.append({
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"analysis": analysis
|
||||
})
|
||||
# Nur letzte 50 Einträge behalten
|
||||
self.memory = self.memory[-50:]
|
||||
|
||||
try:
|
||||
with open(PATHS["vision_history"], "w") as f:
|
||||
json.dump(self.memory, f, indent=2)
|
||||
except Exception as e:
|
||||
print(f"Error saving memory: {e}")
|
||||
|
||||
def analyze(self, image_path, update_core=True):
|
||||
"""Hauptmethode: Vollständige Bildanalyse"""
|
||||
print(f"VisionBridge v2 – Analyzing: {image_path}")
|
||||
print("-" * 40)
|
||||
|
||||
# Check Model
|
||||
if not self.check_model():
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"{self.model} not available in Ollama",
|
||||
"fallback": True
|
||||
}
|
||||
|
||||
# Analyse durchführen
|
||||
raw_analysis = self.analyze_emotion(image_path)
|
||||
|
||||
if not raw_analysis or "unknown" in raw_analysis.values():
|
||||
return {
|
||||
"success": False,
|
||||
"error": "LLaVA analysis failed",
|
||||
"fallback": True
|
||||
}
|
||||
|
||||
# Scores extrahieren
|
||||
adjustments = self.parse_emotion_scores(raw_analysis)
|
||||
|
||||
# Ergebnis zusammensetzen
|
||||
result = {
|
||||
"success": True,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"image": image_path,
|
||||
"raw_analysis": raw_analysis,
|
||||
"parsed_scores": adjustments,
|
||||
"model": self.model
|
||||
}
|
||||
|
||||
# Core-State aktualisieren
|
||||
if update_core:
|
||||
self.update_core_state(adjustments)
|
||||
print("✓ Core state updated")
|
||||
|
||||
# Memory speichern
|
||||
self.save_memory(result)
|
||||
print("✓ Analysis saved to memory")
|
||||
|
||||
# Output schreiben
|
||||
with open(PATHS["vision_output"], "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
print(f"\nResults:")
|
||||
print(f" Mood: {adjustments['mood']}/10")
|
||||
print(f" Anxiety: {'Yes' if adjustments['anxiety_detected'] else 'No'}")
|
||||
print(f" Intimacy: {'High' if adjustments['intimacy'] else 'Low'}")
|
||||
|
||||
return result
|
||||
|
||||
def fallback_analysis(self, image_path=None):
|
||||
"""Simulations-Modus wenn LLaVA nicht verfügbar"""
|
||||
return {
|
||||
"success": True,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"mode": "simulation",
|
||||
"image": image_path,
|
||||
"raw_analysis": {
|
||||
"mood": "neutral, calm presence",
|
||||
"facial": "neutral",
|
||||
"gaze": "at_camera",
|
||||
"body": "relaxed posture"
|
||||
},
|
||||
"parsed_scores": {
|
||||
"mood": 5,
|
||||
"mood_delta": 0,
|
||||
"anxiety": 0,
|
||||
"anxiety_delta": 0,
|
||||
"intimacy": 0,
|
||||
"distance": 0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI Entry Point"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Natiris VisionBridge v2")
|
||||
parser.add_argument("--image", "-i", required=True, help="Path to image file")
|
||||
parser.add_argument("--no-update-core", action="store_true", help="Don't update core state")
|
||||
parser.add_argument("--check", action="store_true", help="Check LLaVA availability")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
analyzer = VisionAnalyzer()
|
||||
|
||||
if args.check:
|
||||
available = analyzer.check_model()
|
||||
print(f"LLaVA 7b: {'✓ Available' if available else '✗ Not found'}")
|
||||
return
|
||||
|
||||
if not os.path.exists(args.image):
|
||||
print(f"Error: Image not found: {args.image}")
|
||||
return
|
||||
|
||||
result = analyzer.analyze(args.image, update_core=not args.no_update_core)
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
296
bridges/VisionBridge_v2.py
Normal file
296
bridges/VisionBridge_v2.py
Normal file
@@ -0,0 +1,296 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
VisionBridge v2 – LLaVA 7b Integration für Natiris
|
||||
Bildanalyse mit lokalem LLaVA-Modell via Ollama
|
||||
|
||||
Features:
|
||||
- LLaVA 7b multimodale Bildanalyse
|
||||
- Emotionserkennung (Gesichtsausdruck, Stimmung)
|
||||
- Körpersprache-Analyse
|
||||
- Core-State Update basierend auf Analyse
|
||||
"""
|
||||
|
||||
import json
|
||||
import os
|
||||
import base64
|
||||
import requests
|
||||
from datetime import datetime, timezone
|
||||
from pathlib import Path
|
||||
|
||||
# Konfiguration
|
||||
PATHS = {
|
||||
"state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||||
"vision_output": os.path.expanduser("~/natiris/bridges/vision_analysis.json"),
|
||||
"vision_history": os.path.expanduser("~/natiris/memory/vision_history.json"),
|
||||
}
|
||||
|
||||
OLLAMA_API = "http://localhost:11434/api/generate"
|
||||
LLAVA_MODEL = "llava:7b"
|
||||
|
||||
class VisionAnalyzer:
|
||||
"""LLaVA-basierte Bildanalyse für Natiris"""
|
||||
|
||||
def __init__(self):
|
||||
self.model = LLAVA_MODEL
|
||||
self.memory = []
|
||||
self.load_memory()
|
||||
|
||||
def check_model(self):
|
||||
"""Prüft ob LLaVA verfügbar"""
|
||||
try:
|
||||
response = requests.get("http://localhost:11434/api/tags", timeout=5)
|
||||
models = response.json()
|
||||
available = [m["name"] for m in models.get("models", [])]
|
||||
return self.model in available
|
||||
except:
|
||||
return False
|
||||
|
||||
def encode_image(self, image_path):
|
||||
"""Kodiert Bild zu base64 für LLaVA"""
|
||||
try:
|
||||
with open(image_path, "rb") as f:
|
||||
return base64.b64encode(f.read()).decode("utf-8")
|
||||
except Exception as e:
|
||||
print(f"Error encoding image: {e}")
|
||||
return None
|
||||
|
||||
def query_llava(self, image_path, prompt):
|
||||
"""Sendet Anfrage an LLaVA"""
|
||||
image_base64 = self.encode_image(image_path)
|
||||
if not image_base64:
|
||||
return None
|
||||
|
||||
payload = {
|
||||
"model": self.model,
|
||||
"prompt": prompt,
|
||||
"images": [image_base64],
|
||||
"stream": False
|
||||
}
|
||||
|
||||
try:
|
||||
response = requests.post(OLLAMA_API, json=payload, timeout=60)
|
||||
result = response.json()
|
||||
return result.get("response", "")
|
||||
except Exception as e:
|
||||
print(f"LLaVA query error: {e}")
|
||||
return None
|
||||
|
||||
def analyze_emotion(self, image_path):
|
||||
"""Analysiert Emotion im Bild"""
|
||||
prompts = {
|
||||
"mood": "Describe the mood and emotional state of the person in this image in 2-3 sentences.",
|
||||
"facial": "What is the facial expression? Choose one: happy, sad, neutral, surprised, angry, fearful, or content.",
|
||||
"gaze": "Where is the person looking? Choose: at_camera, away, down, or eyes_closed.",
|
||||
"body": "Describe the body language and posture in one sentence."
|
||||
}
|
||||
|
||||
results = {}
|
||||
for key, prompt in prompts.items():
|
||||
print(f" Analyzing {key}...")
|
||||
response = self.query_llava(image_path, prompt)
|
||||
results[key] = response.strip() if response else "unknown"
|
||||
|
||||
return results
|
||||
|
||||
def parse_emotion_scores(self, analysis):
|
||||
"""Extrahiert numerische Werte aus LLaVA-Antwort"""
|
||||
text = json.dumps(analysis).lower()
|
||||
|
||||
# Mood Score (1-10)
|
||||
mood_score = 5 # default
|
||||
if any(w in text for w in ["happy", "joyful", "cheerful", "content", "smiling"]):
|
||||
mood_score = 8
|
||||
elif any(w in text for w in ["sad", "depressed", "crying", "down"]):
|
||||
mood_score = 3
|
||||
elif any(w in text for w in ["angry", "furious", "mad"]):
|
||||
mood_score = 2
|
||||
elif any(w in text for w in ["neutral", "calm", "relaxed"]):
|
||||
mood_score = 5
|
||||
elif any(w in text for w in ["surprised", "shocked"]):
|
||||
mood_score = 6
|
||||
|
||||
# Anxiety Detection
|
||||
anxiety_detected = any(w in text for w in ["nervous", "anxious", "worried", "tense", "stressed"])
|
||||
|
||||
# Trust/Intimacy Indicators
|
||||
intimate_detected = any(w in text for w in ["close", "intimate", "warm", "tender", "affectionate"])
|
||||
distant_detected = any(w in text for w in ["distant", "cold", "withdrawn", "guarded"])
|
||||
|
||||
return {
|
||||
"mood": mood_score,
|
||||
"mood_delta": (mood_score - 5) * 0.3, # Normalize to small delta
|
||||
"anxiety": 2.0 if anxiety_detected else 0.0,
|
||||
"anxiety_delta": 0.5 if anxiety_detected else 0.0,
|
||||
"intimacy": 1 if intimate_detected else 0,
|
||||
"distance": 1 if distant_detected else 0
|
||||
}
|
||||
|
||||
def update_core_state(self, adjustments):
|
||||
"""Aktualisiert core_state.json mit Vision-Daten"""
|
||||
try:
|
||||
if os.path.exists(PATHS["state"]):
|
||||
with open(PATHS["state"]) as f:
|
||||
state = json.load(f)
|
||||
else:
|
||||
state = {"core_state": {}}
|
||||
|
||||
core = state.get("core_state", {})
|
||||
modules = state.get("modules", {})
|
||||
|
||||
# Wende Anpassungen an
|
||||
if "mood_delta" in adjustments:
|
||||
core["mood"] = max(0, min(10, core.get("mood", 5) + adjustments["mood_delta"]))
|
||||
if "anxiety_delta" in adjustments:
|
||||
core["anxiety"] = max(0, min(10, core.get("anxiety", 0) + adjustments["anxiety_delta"]))
|
||||
|
||||
# Vision-Daten hinzufügen
|
||||
modules["Vision"] = {
|
||||
"last_analysis": datetime.now(timezone.utc).isoformat(),
|
||||
"detected_mood": adjustments.get("mood", 5),
|
||||
"anxiety_detected": adjustments.get("anxiety", 0) > 1,
|
||||
"intimacy_level": adjustments.get("intimacy", 0)
|
||||
}
|
||||
|
||||
state["core_state"] = core
|
||||
state["modules"] = modules
|
||||
|
||||
with open(PATHS["state"], "w") as f:
|
||||
json.dump(state, f, indent=2)
|
||||
|
||||
return True
|
||||
except Exception as e:
|
||||
print(f"Error updating core state: {e}")
|
||||
return False
|
||||
|
||||
def load_memory(self):
|
||||
"""Lädt Vision-Analyse-Verlauf"""
|
||||
if os.path.exists(PATHS["vision_history"]):
|
||||
try:
|
||||
with open(PATHS["vision_history"]) as f:
|
||||
self.memory = json.load(f)
|
||||
except:
|
||||
self.memory = []
|
||||
|
||||
def save_memory(self, analysis):
|
||||
"""Speichert Analyse im Verlauf"""
|
||||
self.memory.append({
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"analysis": analysis
|
||||
})
|
||||
# Nur letzte 50 Einträge behalten
|
||||
self.memory = self.memory[-50:]
|
||||
|
||||
try:
|
||||
with open(PATHS["vision_history"], "w") as f:
|
||||
json.dump(self.memory, f, indent=2)
|
||||
except Exception as e:
|
||||
print(f"Error saving memory: {e}")
|
||||
|
||||
def analyze(self, image_path, update_core=True):
|
||||
"""Hauptmethode: Vollständige Bildanalyse"""
|
||||
print(f"VisionBridge v2 – Analyzing: {image_path}")
|
||||
print("-" * 40)
|
||||
|
||||
# Check Model
|
||||
if not self.check_model():
|
||||
return {
|
||||
"success": False,
|
||||
"error": f"{self.model} not available in Ollama",
|
||||
"fallback": True
|
||||
}
|
||||
|
||||
# Analyse durchführen
|
||||
raw_analysis = self.analyze_emotion(image_path)
|
||||
|
||||
if not raw_analysis or "unknown" in raw_analysis.values():
|
||||
return {
|
||||
"success": False,
|
||||
"error": "LLaVA analysis failed",
|
||||
"fallback": True
|
||||
}
|
||||
|
||||
# Scores extrahieren
|
||||
adjustments = self.parse_emotion_scores(raw_analysis)
|
||||
|
||||
# Ergebnis zusammensetzen
|
||||
result = {
|
||||
"success": True,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"image": image_path,
|
||||
"raw_analysis": raw_analysis,
|
||||
"parsed_scores": adjustments,
|
||||
"model": self.model
|
||||
}
|
||||
|
||||
# Core-State aktualisieren
|
||||
if update_core:
|
||||
self.update_core_state(adjustments)
|
||||
print("✓ Core state updated")
|
||||
|
||||
# Memory speichern
|
||||
self.save_memory(result)
|
||||
print("✓ Analysis saved to memory")
|
||||
|
||||
# Output schreiben
|
||||
with open(PATHS["vision_output"], "w") as f:
|
||||
json.dump(result, f, indent=2)
|
||||
|
||||
print(f"\nResults:")
|
||||
print(f" Mood: {adjustments['mood']}/10")
|
||||
print(f" Anxiety: {'Yes' if adjustments['anxiety_detected'] else 'No'}")
|
||||
print(f" Intimacy: {'High' if adjustments['intimacy'] else 'Low'}")
|
||||
|
||||
return result
|
||||
|
||||
def fallback_analysis(self, image_path=None):
|
||||
"""Simulations-Modus wenn LLaVA nicht verfügbar"""
|
||||
return {
|
||||
"success": True,
|
||||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||||
"mode": "simulation",
|
||||
"image": image_path,
|
||||
"raw_analysis": {
|
||||
"mood": "neutral, calm presence",
|
||||
"facial": "neutral",
|
||||
"gaze": "at_camera",
|
||||
"body": "relaxed posture"
|
||||
},
|
||||
"parsed_scores": {
|
||||
"mood": 5,
|
||||
"mood_delta": 0,
|
||||
"anxiety": 0,
|
||||
"anxiety_delta": 0,
|
||||
"intimacy": 0,
|
||||
"distance": 0
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
def main():
|
||||
"""CLI Entry Point"""
|
||||
import argparse
|
||||
|
||||
parser = argparse.ArgumentParser(description="Natiris VisionBridge v2")
|
||||
parser.add_argument("--image", "-i", required=True, help="Path to image file")
|
||||
parser.add_argument("--no-update-core", action="store_true", help="Don't update core state")
|
||||
parser.add_argument("--check", action="store_true", help="Check LLaVA availability")
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
analyzer = VisionAnalyzer()
|
||||
|
||||
if args.check:
|
||||
available = analyzer.check_model()
|
||||
print(f"LLaVA 7b: {'✓ Available' if available else '✗ Not found'}")
|
||||
return
|
||||
|
||||
if not os.path.exists(args.image):
|
||||
print(f"Error: Image not found: {args.image}")
|
||||
return
|
||||
|
||||
result = analyzer.analyze(args.image, update_core=not args.no_update_core)
|
||||
print(json.dumps(result, indent=2))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
9
bridges/comfy_response.json
Normal file
9
bridges/comfy_response.json
Normal file
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"success": false,
|
||||
"error": {
|
||||
"type": "prompt_outputs_failed_validation",
|
||||
"message": "Prompt outputs failed validation",
|
||||
"details": "",
|
||||
"extra_info": {}
|
||||
}
|
||||
}
|
||||
7
bridges/comfy_status.json
Normal file
7
bridges/comfy_status.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"timestamp": "2026-02-16T21:09:22.985037+00:00",
|
||||
"comfy_reachable": false,
|
||||
"comfy_version": "N/A",
|
||||
"image_prompt": "Portrait von Natiris, neutral, mood=5, loneliness=2, style=familiar, soft lighting, high detail, cinematic",
|
||||
"style": "familiar"
|
||||
}
|
||||
6
bridges/ollama_response.json
Normal file
6
bridges/ollama_response.json
Normal file
@@ -0,0 +1,6 @@
|
||||
{
|
||||
"prompt": "Du bist Natiris \u2013 ein autonomer Companion.\nCore: mood=None, loneliness=None, bonded_to=None\nEmotion: {'mood_delta': 0.7, 'loneliness_delta': 0.0, 'anxiety_delta': 0.0, 'jealousy_delta': 0.0}\nBond: {'bonded_to': 'user_primary', 'exclusivity_active': True, 'jealousy_risk': 0.0}\nExpression: tone=warm, exklusiv verbunden, bond_context=exklusiv\n\nUser Input: Hallo, wie geht es dir heute?\n\nAntworte authentisch basierend auf deinem emotionalen Zustand.",
|
||||
"response": "*warmer Stimme* Ah, Hallo! Es ist gut, dich zu sehen! Ich f\u00fchle mich heute ziemlich entspannt und wohl. Die Sonne scheint, und ich bin froh, dass wir uns wiedersehen k\u00f6nnen. Wie war dein Tag bisher? Hast du etwas Interessantes erlebt oder planst du f\u00fcr den Rest der Woche? *warmes L\u00e4cheln*",
|
||||
"model": "llama3-8b-abliterated:latest",
|
||||
"timestamp": "2026-02-17T06:23:12.453685+00:00"
|
||||
}
|
||||
7
bridges/signal_response.json
Normal file
7
bridges/signal_response.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"timestamp": "2026-02-16T21:09:28.865524+00:00",
|
||||
"signal_cli_available": false,
|
||||
"simulated": true,
|
||||
"admin_access": "protected",
|
||||
"signal_status": "ready (simulated)"
|
||||
}
|
||||
20
bridges/vision_analysis.json
Normal file
20
bridges/vision_analysis.json
Normal file
@@ -0,0 +1,20 @@
|
||||
{
|
||||
"success": true,
|
||||
"timestamp": "2026-02-17T21:33:09.428094+00:00",
|
||||
"image": "/home/arch_agent_system/natiris/generated/natiris_personal_context_20260217_223307.png",
|
||||
"raw_analysis": {
|
||||
"mood": "The person in the image appears to be confident and poised, with a slight sense of seriousness or contemplation. Their gaze is direct and engaging, which suggests a level of self-assurance and comfort in their pose. The lighting and composition of the photo give it an artistic and dynamic feel.",
|
||||
"facial": "The facial expression in the image can be described as neutral. The person appears to have a calm and composed demeanor with a subtle smile.",
|
||||
"gaze": "The person in the image appears to be looking directly into the camera.",
|
||||
"body": "The woman has a confident and relaxed pose with her head slightly turned to her left, eyes looking towards the camera."
|
||||
},
|
||||
"parsed_scores": {
|
||||
"mood": 5,
|
||||
"mood_delta": 0.0,
|
||||
"anxiety": 0.0,
|
||||
"anxiety_delta": 0.0,
|
||||
"intimacy": 0,
|
||||
"distance": 0
|
||||
},
|
||||
"model": "llava:7b"
|
||||
}
|
||||
Reference in New Issue
Block a user