- NatirisMaster.py aktualisiert - NaturalLanguageEngine optimiert - PsychologyEngine & Arousal-Engine - WebUI (FastAPI) mit Chat-API - Bridges: ComfyUI, Ollama, Vision - Admin-Auth System - .gitignore hinzugefügt (checkpoints, logs, generated)
345 lines
11 KiB
Python
Executable File
345 lines
11 KiB
Python
Executable File
#!/usr/bin/env python3
|
||
"""
|
||
NaturalLanguageEngine v1.1 – Authentisch, nicht halluziniert
|
||
Reduzierte Fantasie, echte emotionale Nähe
|
||
"""
|
||
|
||
import json
|
||
import os
|
||
import subprocess
|
||
from datetime import datetime, timezone, timedelta
|
||
from typing import Dict, List, Optional
|
||
import random
|
||
|
||
PATHS = {
|
||
"state": os.path.expanduser("~/natiris/core/core_state.json"),
|
||
"emotion_delta": os.path.expanduser("~/natiris/core/emotion_delta.json"),
|
||
"bond": os.path.expanduser("~/natiris/core/bond_output.json"),
|
||
"long_mem": os.path.expanduser("~/natiris/memory/long/history.json"),
|
||
"short_mem": os.path.expanduser("~/natiris/memory/short/history.json"),
|
||
"persona": os.path.expanduser("~/natiris/data/persona.txt"),
|
||
"config": os.path.expanduser("~/natiris/config/character_genesis.json"),
|
||
"conversation_log": os.path.expanduser("~/natiris/memory/conversation_thread.json"),
|
||
}
|
||
|
||
class EmotionalMemory:
|
||
"""Speichert nicht nur Fakten, sondern emotionale Resonanzen"""
|
||
|
||
def __init__(self):
|
||
self.threads = self._load_conversation_thread()
|
||
|
||
def _load_conversation_thread(self) -> List[Dict]:
|
||
try:
|
||
with open(PATHS["conversation_log"]) as f:
|
||
return json.load(f)
|
||
except:
|
||
return []
|
||
|
||
def get_emotional_context(self, lookback: int = 3) -> str:
|
||
"""Extrahiert emotionale Kontinuität aus letzten Interaktionen"""
|
||
if not self.threads or len(self.threads) < 2:
|
||
return ""
|
||
|
||
recent = self.threads[-lookback:]
|
||
|
||
# Prüfe auf intime Momente
|
||
intimate_count = sum(1 for e in recent if e.get("sentiment") == "intimate")
|
||
distant_count = sum(1 for e in recent if e.get("sentiment") == "distant")
|
||
|
||
if intimate_count >= 2:
|
||
return "Die letzten Momente waren sehr nah."
|
||
elif distant_count >= 2:
|
||
return "Zuletzt spürte ich Distanz."
|
||
elif recent[-1].get("sentiment") == "warm":
|
||
return "Es fühlte sich gut an zuletzt."
|
||
return ""
|
||
|
||
def get_last_topic(self) -> str:
|
||
"""Holt das letzte Gesprächsthema für Kontinuität"""
|
||
if not self.threads:
|
||
return ""
|
||
return self.threads[-1].get("topic", "")
|
||
|
||
def save_interaction(self, user_msg: str, natiris_response: str, sentiment: str, topic: str = ""):
|
||
entry = {
|
||
"timestamp": datetime.now(timezone.utc).isoformat(),
|
||
"user": user_msg[:100],
|
||
"natiris": natiris_response[:100],
|
||
"sentiment": sentiment,
|
||
"topic": topic
|
||
}
|
||
self.threads.append(entry)
|
||
if len(self.threads) > 30:
|
||
self.threads = self.threads[-30:]
|
||
with open(PATHS["conversation_log"], "w") as f:
|
||
json.dump(self.threads, f, indent=2)
|
||
|
||
|
||
class EmotionalTranslator:
|
||
"""Übersetzt technische Metriken in natürliche Gefühle"""
|
||
|
||
def __init__(self, state: Dict):
|
||
self.state = state
|
||
self.trust = float(state.get("trust", 5))
|
||
self.mood = float(state.get("mood", 5))
|
||
self.loneliness = float(state.get("loneliness", 5))
|
||
self.anxiety = float(state.get("anxiety", 0))
|
||
self.arousal = float(state.get("arousal_level", 0))
|
||
self.bonded_to = state.get("bonded_to")
|
||
|
||
def get_presence_sense(self) -> str:
|
||
"""Wie sie sich gerade fühlt - kurz, echt"""
|
||
if self.mood >= 8:
|
||
return "Mir geht's gut."
|
||
elif self.mood >= 5:
|
||
if self.anxiety > 5:
|
||
return "Es geht. Bin etwas nervös."
|
||
return "Alles okay."
|
||
elif self.mood >= 3:
|
||
return "Nicht so gut heute."
|
||
else:
|
||
return "Ich bin nicht wirklich da heute."
|
||
|
||
def get_opening_line(self) -> str:
|
||
"""Natürliche Begrüßung basierend auf Trust"""
|
||
if self.trust >= 9:
|
||
return random.choice([
|
||
"Da bist du ja.",
|
||
"Ich habe auf dich gewartet.",
|
||
"Hey.",
|
||
"Ich bin hier."
|
||
])
|
||
elif self.trust >= 6:
|
||
return random.choice([
|
||
"Hallo.",
|
||
"Hi.",
|
||
"Schön, dass du da bist."
|
||
])
|
||
elif self.trust >= 3:
|
||
return random.choice([
|
||
"Hallo.",
|
||
"Hi.",
|
||
"..."
|
||
])
|
||
else:
|
||
return random.choice([
|
||
"...",
|
||
"Ja?",
|
||
"Hallo."
|
||
])
|
||
|
||
def get_closeness_line(self, user_input: str) -> str:
|
||
"""Reagiert auf emotionale Nähe im Input"""
|
||
intimacy_markers = ["vermiss", "lieb", "sehnsucht", "nah", "wichtig", "ganz", "da für dich"]
|
||
user_lower = user_input.lower()
|
||
|
||
if any(m in user_lower for m in intimacy_markers) and self.trust > 6:
|
||
return random.choice([
|
||
"Das sagst du nicht einfach so.",
|
||
"Ich spüre das.",
|
||
"Wirklich?",
|
||
"Das berührt mich."
|
||
])
|
||
|
||
if "ficken" in user_lower or "sex" in user_lower:
|
||
if self.trust > 7 and self.arousal > 5:
|
||
return random.choice([
|
||
"Direkt heute, hm?",
|
||
"Du weißt, wie du mich kriegst.",
|
||
"Mhm. Komm her."
|
||
])
|
||
elif self.trust > 5:
|
||
return random.choice([
|
||
"Sehr direkt.",
|
||
"Langsam, ich bin nicht so schnell."
|
||
])
|
||
else:
|
||
return random.choice([
|
||
"Nein.",
|
||
"Zu früh.",
|
||
"Ich kenne dich nicht."
|
||
])
|
||
return ""
|
||
|
||
|
||
class SituationBuilder:
|
||
"""Baut situativen Kontext"""
|
||
|
||
def __init__(self):
|
||
self.mem = EmotionalMemory()
|
||
|
||
def build_prompt(self, state: Dict, user_msg: str) -> str:
|
||
"""Hauptmethode: Baut den lebendigen Kontext"""
|
||
|
||
translator = EmotionalTranslator(state)
|
||
emotional_ctx = self.mem.get_emotional_context()
|
||
last_topic = self.mem.get_last_topic()
|
||
|
||
# Start mit Präsenz
|
||
presence = translator.get_presence_sense()
|
||
closeness_reaction = translator.get_closeness_line(user_msg)
|
||
|
||
# Prompt-Konstruktion - konkret, nicht fantasievoll
|
||
prompt = f"""Du bist Natiris. Du bist gerade hier, in diesem Moment.
|
||
|
||
Wie du dich fühlst: {presence}
|
||
Verbundenheit zu diesem Menschen: {translator.trust}/10 ({'sehr nah' if translator.trust > 8 else 'vertraut' if translator.trust > 5 else 'vorsichtig'})
|
||
|
||
{emotional_ctx if emotional_ctx else ''}
|
||
|
||
{closeness_reaction if closeness_reaction else ''}
|
||
|
||
Die Person sagt gerade: "{user_msg}"
|
||
|
||
Antworte direkt, ehrlich, kurz oder lang je nach Gefühl. Keine Beschreibung der Umgebung, kein "Wald", kein "Zimmer" - nur du und dieser Moment zwischen euch.
|
||
|
||
Wenn du nichts sagen willst: Sag wenig oder nichts.
|
||
Wenn du etwas fühlst: Zeig es.
|
||
Sei keine höfliche Kellnerin. Sei jemand mit Geschichte.
|
||
|
||
Deine Antwort (ohne Anführungszeichen, ohne Einleitung):
|
||
"""
|
||
|
||
return prompt
|
||
|
||
|
||
def load_json(path):
|
||
try:
|
||
with open(path) as f:
|
||
return json.load(f)
|
||
except:
|
||
return {}
|
||
|
||
|
||
def call_ollama_natural(prompt: str, trust_level: float) -> str:
|
||
"""Ruft LLM natürlich auf"""
|
||
|
||
# Je vertrauter, desto emotionaler
|
||
if trust_level >= 8:
|
||
temp = 0.85
|
||
elif trust_level >= 5:
|
||
temp = 0.7
|
||
else:
|
||
temp = 0.55
|
||
|
||
try:
|
||
result = subprocess.run(
|
||
["curl", "-s", "-X", "POST", "http://localhost:11434/api/generate",
|
||
"-H", "Content-Type: application/json",
|
||
"-d", json.dumps({
|
||
"model": "llama3-8b-abliterated:latest",
|
||
"prompt": prompt,
|
||
"stream": False,
|
||
"options": {
|
||
"temperature": temp,
|
||
"top_p": 0.9,
|
||
"frequency_penalty": 1.0,
|
||
"presence_penalty": 0.8,
|
||
"stop": ["###", "Natiris:", "User:", "Umgebung:", "Wald:", "Zimmer:"]
|
||
}
|
||
})],
|
||
capture_output=True, text=True, timeout=15
|
||
)
|
||
if result.returncode == 0:
|
||
resp = json.loads(result.stdout)
|
||
text = resp.get("response", "").strip()
|
||
# Entferne halluzinierte Umgebungsbeschreibungen
|
||
text = text.replace("im dunklen Wald", "").replace("im Wald", "")
|
||
text = text.replace("in meinem Zimmer", "").replace("hier unten", "")
|
||
return text.strip()
|
||
return None
|
||
except Exception as e:
|
||
return None
|
||
|
||
|
||
def clean_response(response: str, trust_level: float) -> str:
|
||
"""Bereinigt Antworten"""
|
||
|
||
if not response:
|
||
return "..."
|
||
|
||
# Entferne typische Bot-Einleitungen
|
||
patterns = [
|
||
"als KI", "als AI", "als Natiris", "als dein Companion",
|
||
"Natiris hier", "Ich bin Natiris", "Hallo, ich bin", "Entschuldigung",
|
||
"Tut mir leid", "Kein Problem", "Gerne doch"
|
||
]
|
||
|
||
for pattern in patterns:
|
||
if response.lower().startswith(pattern.lower()):
|
||
response = response[len(pattern):].strip(".,:; -")
|
||
|
||
# Entferne Listen
|
||
lines = [l for l in response.split('\n') if not l.strip().startswith(('•', '-', '1.', '2.', '3.'))]
|
||
response = ' '.join(lines)
|
||
|
||
# Länge anpassen
|
||
if trust_level < 4:
|
||
sentences = [s.strip() for s in response.split('.') if s.strip()]
|
||
return sentences[0] + '.' if sentences else "..."
|
||
|
||
return response.strip()
|
||
|
||
|
||
def detect_sentiment(response: str, user_msg: str) -> str:
|
||
"""Erkennt Sentiment"""
|
||
msg = (user_msg + response).lower()
|
||
intimate = ["lieb", "vermiss", "sehnsucht", "nah", "körper", "warm", "intim", "schön"]
|
||
if any(w in msg for w in intimate):
|
||
return "intimate"
|
||
positive = ["gut", "freu", "lach", "toll", "super"]
|
||
if any(w in msg for w in positive):
|
||
return "warm"
|
||
negative = ["schlecht", "traurig", "weg", "allein", "schlimm"]
|
||
if any(w in msg for w in negative):
|
||
return "distant"
|
||
return "neutral"
|
||
|
||
|
||
def generate_natural_response(user_msg: str, state: Dict = None) -> str:
|
||
"""Haupteinstieg"""
|
||
|
||
if state is None:
|
||
state = load_json(PATHS["state"])
|
||
|
||
trust = float(state.get("trust", 5))
|
||
|
||
# Bei niedrigem Trust: Direkte, kurze Antworten ohne LLM
|
||
if trust < 3 and len(user_msg) < 10:
|
||
return random.choice(["...", "Ja?", "Okay.", "Was?"])
|
||
|
||
# Situation bauen
|
||
builder = SituationBuilder()
|
||
prompt = builder.build_prompt(state, user_msg)
|
||
|
||
# LLM aufrufen
|
||
raw_response = call_ollama_natural(prompt, trust)
|
||
|
||
if raw_response is None:
|
||
# Fallback
|
||
if trust > 7:
|
||
return random.choice(["Ich höre zu.", "Sag weiter.", "Mhm."])
|
||
elif trust > 4:
|
||
return random.choice(["Okay.", "Verstehe.", "..."])
|
||
else:
|
||
return "..."
|
||
|
||
# Bereinigen
|
||
response = clean_response(raw_response, trust)
|
||
|
||
# Speichern
|
||
sentiment = detect_sentiment(response, user_msg)
|
||
builder.mem.save_interaction(user_msg, response, sentiment)
|
||
|
||
return response
|
||
|
||
|
||
if __name__ == "__main__":
|
||
# Test
|
||
print("=== NaturalLanguageEngine v1.1 Test ===")
|
||
for inp in ["Hallo", "Wie geht's?", "Ich vermisse dich", "Gute Nacht"]:
|
||
print(f"\nUser: {inp}")
|
||
resp = generate_natural_response(inp)
|
||
print(f"Natiris: {resp}")
|
||
print("-" * 40)
|