- NatirisMaster.py aktualisiert - NaturalLanguageEngine optimiert - PsychologyEngine & Arousal-Engine - WebUI (FastAPI) mit Chat-API - Bridges: ComfyUI, Ollama, Vision - Admin-Auth System - .gitignore hinzugefügt (checkpoints, logs, generated)
451 lines
18 KiB
Python
Executable File
451 lines
18 KiB
Python
Executable File
#!/usr/bin/env python3
|
||
"""
|
||
ComfyBridge Final – Vollständige IPAdapter Integration
|
||
Mit korrekten Pfaden und Fallback
|
||
"""
|
||
|
||
import json
|
||
import os
|
||
import time
|
||
import uuid
|
||
from datetime import datetime, timezone
|
||
from pathlib import Path
|
||
import requests
|
||
|
||
# Konfiguration
|
||
PROJECT_ROOT = os.path.expanduser("~/natiris")
|
||
PATHS = {
|
||
"state": os.path.join(PROJECT_ROOT, "core/natiris_full_state.json"),
|
||
"output_dir": os.path.join(PROJECT_ROOT, "generated"),
|
||
"output": os.path.join(PROJECT_ROOT, "bridges/comfy_response.json"),
|
||
"base_images": os.path.join(PROJECT_ROOT, "assets/base_images"),
|
||
}
|
||
|
||
COMFY_API = os.getenv("COMFY_API_URL", "http://localhost:8188")
|
||
|
||
# Korrekte Pfade für ComfyUI (aus Pinokio)
|
||
MODEL_PATHS = {
|
||
# Relativ zu ComfyUI root, oder absolute Pfade
|
||
"ipadapter_dir": "/opt/pinokio/drive/drives/peers/d1753059260169/ipadapter",
|
||
"clip_vision_dir": "/opt/pinokio/drive/drives/peers/d1753059260169/clip_vision",
|
||
"checkpoints_dir": "/opt/pinokio/drive/drives/peers/d1753059260169/checkpoints",
|
||
"controlnet_dir": "/opt/pinokio/drive/drives/peers/d1753059260169/controlnet",
|
||
}
|
||
|
||
# Verfügbare Modelle
|
||
AVAILABLE_MODELS = {
|
||
"ipadapter": [
|
||
"ip-adapter-plus-face_sd15.safetensors",
|
||
"ip-adapter-plus-face_sdxl_vit-h.safetensors",
|
||
"ip-adapter_sdxl_vit-h.safetensors",
|
||
"ip-adapter-faceid_sdxl.savetensors",
|
||
"ip-adapter-faceid-plusv2_sdxl.savetensors",
|
||
],
|
||
"clip_vision": [
|
||
"CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors",
|
||
"clip_vision_h.safetensors",
|
||
"CLIP-ViT-bigG-14-laion2B-39B-b160k.safetensors",
|
||
]
|
||
}
|
||
|
||
TRUST_MAP = [
|
||
{"range": [0, 3], "style": "neutral_portrait", "prompt_add": "neutral expression, professional lighting", "ipadapter_weight": 0.4, "distance": "medium"},
|
||
{"range": [4, 7], "style": "personal_context", "prompt_add": "relaxed expression, warm lighting, cozy", "ipadapter_weight": 0.6, "distance": "medium-close"},
|
||
{"range": [8, 10], "style": "intimate", "prompt_add": "warm smile, intimate lighting, emotional", "ipadapter_weight": 0.8, "distance": "close"},
|
||
]
|
||
|
||
|
||
class ComfyBridgeFinal:
|
||
"""Finale ComfyUI Bridge mit IPAdapter Integration"""
|
||
|
||
def __init__(self):
|
||
self.client_id = f"natiris_{uuid.uuid4().hex[:8]}"
|
||
self.output_dir = Path(PATHS["output_dir"])
|
||
self.output_dir.mkdir(parents=True, exist_ok=True)
|
||
self.base_images_dir = Path(PATHS["base_images"])
|
||
|
||
# Feature-Detection
|
||
self.detected_features = self._detect_features()
|
||
|
||
def _detect_features(self):
|
||
"""Erkennt verfügbare Features"""
|
||
features = {
|
||
"ipadapter_nodes": False,
|
||
"ipadapter_models": False,
|
||
"clip_vision_models": False,
|
||
"face_image_exists": False,
|
||
}
|
||
|
||
# Prüfe ComfyUI Nodes
|
||
try:
|
||
resp = requests.get(f"{COMFY_API}/object_info", timeout=5)
|
||
nodes = resp.json()
|
||
features["ipadapter_nodes"] = any("IPAdapter" in k for k in nodes.keys())
|
||
features["controlnet_nodes"] = "ControlNetLoader" in nodes
|
||
except:
|
||
pass
|
||
|
||
# Prüfe Modelle
|
||
if os.path.exists(MODEL_PATHS["ipadapter_dir"]):
|
||
files = os.listdir(MODEL_PATHS["ipadapter_dir"])
|
||
features["ipadapter_models"] = any(f.endswith('.safetensors') or f.endswith('.pth') for f in files)
|
||
|
||
if os.path.exists(MODEL_PATHS["clip_vision_dir"]):
|
||
files = os.listdir(MODEL_PATHS["clip_vision_dir"])
|
||
features["clip_vision_models"] = any(f.endswith('.safetensors') for f in files)
|
||
|
||
# Prüfe Face-Image
|
||
face_path = self.base_images_dir / "face_base.png"
|
||
features["face_image_exists"] = face_path.exists()
|
||
|
||
return features
|
||
|
||
def check_health(self):
|
||
"""ComfyUI Health Check"""
|
||
try:
|
||
resp = requests.get(f"{COMFY_API}/system_stats", timeout=5)
|
||
data = resp.json()
|
||
return {
|
||
"reachable": True,
|
||
"version": data.get("system", {}).get("comfyui_version", "unknown"),
|
||
"features": self.detected_features
|
||
}
|
||
except Exception as e:
|
||
return {"reachable": False, "error": str(e)}
|
||
|
||
def get_style_config(self, trust):
|
||
"""Liefert Style basierend auf Trust"""
|
||
for entry in TRUST_MAP:
|
||
if entry["range"][0] <= trust <= entry["range"][1]:
|
||
return entry
|
||
return TRUST_MAP[1]
|
||
|
||
def build_prompt(self, state):
|
||
"""Baut Prompt aus State"""
|
||
core = state.get("core_state", {})
|
||
|
||
trust = core.get("trust", 7.0)
|
||
mood = core.get("mood", 5)
|
||
arousal = core.get("arousal_level", 3)
|
||
|
||
style = self.get_style_config(trust)
|
||
|
||
# Charakter-Beschreibung für Konsistenz
|
||
char_desc = (
|
||
"beautiful young woman, same person, consistent face, "
|
||
"natural skin texture, realistic, "
|
||
f"{style['lighting']}, {style['distance']} portrait, "
|
||
)
|
||
|
||
# Mood-Beschreibung
|
||
mood_desc = self._mood_to_desc(mood)
|
||
|
||
positive = (
|
||
f"{char_desc} {style['prompt_add']}, {mood_desc}, "
|
||
f"arousal level {arousal}/10, "
|
||
"high detail, 8k uhd, soft focus, gentle bokeh"
|
||
)
|
||
|
||
negative = (
|
||
"blurry, distorted, deformed, ugly, bad anatomy, "
|
||
"extra limbs, missing limbs, different person, "
|
||
"inconsistent face, low quality, jpeg artifacts"
|
||
)
|
||
|
||
return {
|
||
"positive": positive,
|
||
"negative": negative,
|
||
"style": style["style"],
|
||
"trust": trust,
|
||
"mood": mood,
|
||
"ipadapter_weight": style["ipadapter_weight"],
|
||
"width": 512,
|
||
"height": 768 if trust > 7 else 512
|
||
}
|
||
|
||
def _mood_to_desc(self, mood):
|
||
"""Mood zu Beschreibung"""
|
||
if mood >= 8: return "radiant, happy, glowing"
|
||
elif mood >= 6: return "content, peaceful, relaxed"
|
||
elif mood >= 4: return "neutral, calm, composed"
|
||
elif mood >= 2: return "melancholic, pensive, distant"
|
||
else: return "sad, withdrawn, vulnerable"
|
||
|
||
def build_workflow_basic(self, prompt_data):
|
||
"""Basis-Workflow ohne IPAdapter"""
|
||
seed = int(time.time() * 1000) % 2147483647
|
||
|
||
# Checkpoint-Modell wählen (SD1.5 für bessere IPAdapter-Kompatibilität)
|
||
checkpoint = "realisticVisionV60B1_v51HyperVAE.safetensors"
|
||
if not os.path.exists(os.path.join(MODEL_PATHS["checkpoints_dir"], checkpoint)):
|
||
checkpoint = "sd_xl_base_1.0.safetensors"
|
||
|
||
return {
|
||
"1": {"inputs": {"text": prompt_data["positive"], "clip": ["4", 1]}, "class_type": "CLIPTextEncode"},
|
||
"2": {"inputs": {"text": prompt_data["negative"], "clip": ["4", 1]}, "class_type": "CLIPTextEncode"},
|
||
"3": {
|
||
"inputs": {
|
||
"seed": seed,
|
||
"steps": 30,
|
||
"cfg": 7.0,
|
||
"sampler_name": "euler_ancestral",
|
||
"scheduler": "karras",
|
||
"denoise": 1.0,
|
||
"model": ["4", 0],
|
||
"positive": ["1", 0],
|
||
"negative": ["2", 0],
|
||
"latent_image": ["5", 0]
|
||
},
|
||
"class_type": "KSampler"
|
||
},
|
||
"4": {"inputs": {"ckpt_name": checkpoint}, "class_type": "CheckpointLoaderSimple"},
|
||
"5": {"inputs": {"width": prompt_data["width"], "height": prompt_data["height"], "batch_size": 1}, "class_type": "EmptyLatentImage"},
|
||
"6": {"inputs": {"samples": ["3", 0], "vae": ["4", 2]}, "class_type": "VAEDecode"},
|
||
"7": {"inputs": {"filename_prefix": f"natiris_{prompt_data['style']}", "images": ["6", 0]}, "class_type": "SaveImage"},
|
||
}
|
||
|
||
def build_workflow_with_ipadapter(self, prompt_data, face_path):
|
||
"""Workflow mit IPAdapter für Gesichtskonsistenz"""
|
||
workflow = self.build_workflow_basic(prompt_data)
|
||
|
||
# Gewicht basierend auf Trust
|
||
weight = prompt_data.get("ipadapter_weight", 0.6)
|
||
|
||
# SD1.5 vs SDXL wählen
|
||
checkpoint = workflow["4"]["inputs"]["ckpt_name"]
|
||
is_sdxl = "xl" in checkpoint.lower()
|
||
|
||
# Passendes IPAdapter-Modell wählen
|
||
if is_sdxl:
|
||
ipadapter_model = "ip-adapter-plus-face_sdxl_vit-h.safetensors"
|
||
clip_vision = "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
||
else:
|
||
ipadapter_model = "ip-adapter-plus-face_sd15.safetensors"
|
||
clip_vision = "CLIP-ViT-H-14-laion2B-s32B-b79K.safetensors"
|
||
|
||
# Prüfe ob Modelle existieren
|
||
ipadapter_full = os.path.join(MODEL_PATHS["ipadapter_dir"], ipadapter_model)
|
||
if not os.path.exists(ipadapter_full):
|
||
# Fallback zu SD1.5
|
||
ipadapter_model = "ip-adapter-plus-face_sd15.safetensors"
|
||
|
||
clip_full = os.path.join(MODEL_PATHS["clip_vision_dir"], clip_vision)
|
||
if not os.path.exists(clip_full):
|
||
clip_vision = "clip_vision_h.safetensors"
|
||
|
||
# IPAdapter Nodes
|
||
ipadapter_nodes = {
|
||
# Face Image laden
|
||
"20": {"inputs": {"image": face_path}, "class_type": "LoadImage"},
|
||
|
||
# IPAdapter Model
|
||
"21": {"inputs": {"ipadapter_file": ipadapter_model}, "class_type": "IPAdapterModelLoader"},
|
||
|
||
# CLIP Vision
|
||
"22": {"inputs": {"clip_name": clip_vision}, "class_type": "CLIPVisionLoader"},
|
||
|
||
# IPAdapter anwenden
|
||
"23": {
|
||
"inputs": {
|
||
"model": ["4", 0],
|
||
"ipadapter": ["21", 0],
|
||
"image": ["20", 0],
|
||
"clip_vision": ["22", 0],
|
||
"weight": weight,
|
||
"start_at": 0.0,
|
||
"end_at": 1.0,
|
||
},
|
||
"class_type": "IPAdapterAdvanced"
|
||
},
|
||
}
|
||
|
||
# Workflow erweitern
|
||
workflow.update(ipadapter_nodes)
|
||
|
||
# KSampler mit IPAdapter-Modell
|
||
workflow["3"]["inputs"]["model"] = ["23", 0]
|
||
|
||
return workflow
|
||
|
||
def build_workflow(self, prompt_data):
|
||
"""Wählt Workflow basierend auf Features"""
|
||
face_path = str(self.base_images_dir / "face_base.png")
|
||
|
||
if (self.detected_features["ipadapter_nodes"] and
|
||
self.detected_features["ipadapter_models"] and
|
||
self.detected_features["clip_vision_models"] and
|
||
self.detected_features["face_image_exists"] and
|
||
os.path.exists(face_path)):
|
||
|
||
print(f"🎨 Using IPAdapter workflow (weight: {prompt_data.get('ipadapter_weight', 0.6)})")
|
||
return self.build_workflow_with_ipadapter(prompt_data, face_path)
|
||
else:
|
||
print("🎨 Using basic workflow (IPAdapter not available)")
|
||
print(f" Nodes: {self.detected_features.get('ipadapter_nodes', False)}")
|
||
print(f" Models: {self.detected_features.get('ipadapter_models', False)}")
|
||
print(f" Face: {self.detected_features.get('face_image_exists', False)}")
|
||
return self.build_workflow_basic(prompt_data)
|
||
|
||
def submit_workflow(self, workflow):
|
||
"""Sendet Workflow an ComfyUI"""
|
||
try:
|
||
data = {"prompt": workflow, "client_id": self.client_id}
|
||
resp = requests.post(f"{COMFY_API}/prompt", json=data, timeout=10)
|
||
result = resp.json()
|
||
|
||
if "prompt_id" in result:
|
||
return {"success": True, "prompt_id": result["prompt_id"]}
|
||
return {"success": False, "error": result.get("error", "Unknown")}
|
||
except Exception as e:
|
||
return {"success": False, "error": str(e)}
|
||
|
||
def poll_result(self, prompt_id, timeout=300):
|
||
"""Wartet auf Ergebnis"""
|
||
start = time.time()
|
||
while time.time() - start < timeout:
|
||
try:
|
||
history = requests.get(f"{COMFY_API}/history", timeout=5).json()
|
||
if prompt_id in history:
|
||
return {"completed": True, "data": history[prompt_id]}
|
||
|
||
queue = requests.get(f"{COMFY_API}/queue", timeout=5).json()
|
||
print(" ⏳ Generating...")
|
||
time.sleep(1)
|
||
except Exception as e:
|
||
return {"completed": False, "error": str(e)}
|
||
return {"completed": False, "error": "Timeout"}
|
||
|
||
def download_and_save(self, history_data, prompt_data):
|
||
"""Lädt Bild herunter und speichert"""
|
||
outputs = history_data.get("outputs", {})
|
||
|
||
for node_id, node_out in outputs.items():
|
||
if "images" in node_out:
|
||
for img in node_out["images"]:
|
||
try:
|
||
params = {
|
||
"filename": img["filename"],
|
||
"subfolder": img.get("subfolder", ""),
|
||
"type": img.get("type", "output")
|
||
}
|
||
resp = requests.get(f"{COMFY_API}/view", params=params, timeout=30)
|
||
|
||
if resp.status_code == 200:
|
||
timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
|
||
filename = f"natiris_{prompt_data['style']}_{timestamp}.png"
|
||
filepath = self.output_dir / filename
|
||
|
||
with open(filepath, "wb") as f:
|
||
f.write(resp.content)
|
||
|
||
# Metadaten
|
||
metadata = {
|
||
**prompt_data,
|
||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||
"ipadapter_used": self.detected_features["ipadapter_nodes"],
|
||
"filename": filename,
|
||
}
|
||
|
||
with open(str(filepath) + ".json", "w") as f:
|
||
json.dump(metadata, f, indent=2)
|
||
|
||
print(f"✅ Image saved: {filepath}")
|
||
return {"success": True, "path": str(filepath), "metadata": metadata}
|
||
except Exception as e:
|
||
print(f"Error saving: {e}")
|
||
|
||
return {"success": False, "error": "Failed to save image"}
|
||
|
||
def generate(self, state_path=None):
|
||
"""Haupt-Generierungs-Methode"""
|
||
# State laden
|
||
state = {}
|
||
if state_path and os.path.exists(state_path):
|
||
with open(state_path) as f:
|
||
state = json.load(f)
|
||
elif os.path.exists(PATHS["state"]):
|
||
with open(PATHS["state"]) as f:
|
||
state = json.load(f)
|
||
|
||
# Health Check
|
||
health = self.check_health()
|
||
if not health["reachable"]:
|
||
return {"success": False, "error": "ComfyUI unreachable", "health": health}
|
||
|
||
# Workflow bauen
|
||
prompt_data = self.build_prompt(state)
|
||
workflow = self.build_workflow(prompt_data)
|
||
|
||
# Submit
|
||
submit = self.submit_workflow(workflow)
|
||
if not submit["success"]:
|
||
return {"success": False, "error": submit.get("error", "Submit failed")}
|
||
|
||
print(f"🚀 Workflow submitted: {submit['prompt_id']}")
|
||
|
||
# Poll
|
||
poll = self.poll_result(submit["prompt_id"])
|
||
if not poll["completed"]:
|
||
return {"success": False, "error": poll.get("error", "Generation failed")}
|
||
|
||
# Download
|
||
result = self.download_and_save(poll["data"], prompt_data)
|
||
result["health"] = health
|
||
|
||
return result
|
||
|
||
|
||
def main():
|
||
"""CLI Entry"""
|
||
import argparse
|
||
|
||
parser = argparse.ArgumentParser(description="Natiris ComfyBridge Final")
|
||
parser.add_argument("--state", default=PATHS["state"])
|
||
parser.add_argument("--check", action="store_true", help="Health check")
|
||
parser.add_argument("--test", action="store_true", help="Test generation")
|
||
|
||
args = parser.parse_args()
|
||
|
||
bridge = ComfyBridgeFinal()
|
||
|
||
if args.check:
|
||
health = bridge.check_health()
|
||
feats = health.get("features", {})
|
||
print("\n" + "="*50)
|
||
print("ComfyBridge Final Status")
|
||
print("="*50)
|
||
print(f"ComfyUI: {'✅' if health['reachable'] else '❌'} {health.get('version', 'n/a')}")
|
||
print(f"IPAdapter Nodes: {'✅' if feats.get('ipadapter_nodes') else '❌'}")
|
||
print(f"IPAdapter Models: {'✅' if feats.get('ipadapter_models') else '❌'}")
|
||
print(f"CLIP Vision: {'✅' if feats.get('clip_vision_models') else '❌'}")
|
||
print(f"Face Image: {'✅' if feats.get('face_image_exists') else '❌'}")
|
||
print("="*50)
|
||
return
|
||
|
||
if args.test:
|
||
print("🎨 Testing image generation...")
|
||
result = bridge.generate(args.state)
|
||
|
||
if result["success"]:
|
||
print(f"\n✅ SUCCESS!")
|
||
print(f" Image: {result['path']}")
|
||
print(f" Style: {result['metadata']['style']}")
|
||
print(f" Trust: {result['metadata']['trust']}")
|
||
print(f" IPAdapter: {result['metadata']['ipadapter_used']}")
|
||
else:
|
||
print(f"\n❌ Failed: {result.get('error', 'Unknown')}")
|
||
|
||
# Speichere Response
|
||
with open(PATHS["output"], "w") as f:
|
||
json.dump(result, f, indent=2)
|
||
|
||
return
|
||
|
||
# Default: generate
|
||
result = bridge.generate(args.state)
|
||
print(json.dumps(result, indent=2))
|
||
|
||
|
||
if __name__ == "__main__":
|
||
main()
|