- NatirisMaster.py aktualisiert - NaturalLanguageEngine optimiert - PsychologyEngine & Arousal-Engine - WebUI (FastAPI) mit Chat-API - Bridges: ComfyUI, Ollama, Vision - Admin-Auth System - .gitignore hinzugefügt (checkpoints, logs, generated)
247 lines
8.8 KiB
Python
Executable File
247 lines
8.8 KiB
Python
Executable File
#!/usr/bin/env python3
|
|
"""
|
|
NaturalLanguageEngine v2.6 - Kontextuell, Memory-basiert, Dynamisch
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
from datetime import datetime, timezone
|
|
from typing import Dict, List, Optional
|
|
import random
|
|
|
|
PATHS = {
|
|
"state": os.path.expanduser("~/natiris/core/core_state.json"),
|
|
"conversation_log": os.path.expanduser("~/natiris/memory/conversation_thread.json"),
|
|
}
|
|
|
|
|
|
class EmotionalState:
|
|
"""Emotionaler Zustand mit Memory"""
|
|
|
|
def __init__(self, state: Dict):
|
|
self.trust = float(state.get("trust", 5))
|
|
self.mood = float(state.get("mood", 5))
|
|
self.loneliness = float(state.get("loneliness", 5))
|
|
self.anxiety = float(state.get("anxiety", 5))
|
|
self.arousal = float(state.get("arousal_level", 0))
|
|
self.bonded_to = state.get("bonded_to")
|
|
self.last_topic = ""
|
|
|
|
# Lade letztes Gesprächsthema
|
|
try:
|
|
with open(PATHS["conversation_log"]) as f:
|
|
threads = json.load(f)
|
|
if threads:
|
|
self.last_topic = threads[-1].get("user", "")
|
|
except:
|
|
pass
|
|
|
|
|
|
class TrustLevelResponseGenerator:
|
|
"""Kontextuell-aware Response Generator"""
|
|
|
|
def __init__(self, emotional_state: EmotionalState):
|
|
self.es = emotional_state
|
|
self.last_input = ""
|
|
|
|
def analyze_input(self, user_input: str) -> Dict:
|
|
"""Analysiert Input auf Intent und Kontext"""
|
|
ui = user_input.lower().strip()
|
|
|
|
analysis = {
|
|
"is_greeting": any(w in ui for w in ["hallo", "hi", "hey", "morgen", "tag", "abend"]),
|
|
"is_question": "?" in user_input or any(w in ui for w in ["was", "wie", "warum", "wo", "wer"]),
|
|
"is_follow_up": self.es.last_topic and any(w in ui for w in ["von was", "was meinst", "welches", "genau"]),
|
|
"is_intimate": any(w in ui for w in ["vermiss", "lieb", "sehnsucht", "nah", "wichtig"]),
|
|
"is_inappropriate": any(w in ui for w in ["was hast du an", "nackt", "sex", "ficken"]),
|
|
"is_pushy": any(w in ui for w in ["komm her", "antworte", "sag schon"]),
|
|
"topic": ui[:50]
|
|
}
|
|
return analysis
|
|
|
|
# TRUST 0-3: KALT/ABWEISEND
|
|
def level_0_to_3_response(self, user_input: str, analysis: Dict) -> str:
|
|
ui = user_input.lower().strip()
|
|
|
|
# Kontext-be aware
|
|
if analysis["is_greeting"]:
|
|
return random.choice(["...", "Was?", "Ja?"])
|
|
|
|
if analysis["is_question"]:
|
|
return random.choice(["Warum fragst du?", "Kenn ich dich?", "..."])
|
|
|
|
if analysis["is_inappropriate"]:
|
|
return random.choice(["Geh.", "Lass mich.", "Verschwinde.", "Nein."])
|
|
|
|
if analysis["is_pushy"]:
|
|
return random.choice(["Nein.", "Lass mich in Ruhe.", "..."])
|
|
|
|
return random.choice(["...", "Ich kenn dich nicht.", "Was willst du?"])
|
|
|
|
# TRUST 4-6: VORSICHTIG
|
|
def level_4_to_6_response(self, user_input: str, analysis: Dict) -> str:
|
|
ui = user_input.lower().strip()
|
|
|
|
if analysis["is_greeting"]:
|
|
return random.choice(["Hallo.", "Hi.", "...hi."])
|
|
|
|
if analysis["is_follow_up"] and self.es.last_topic:
|
|
return random.choice([
|
|
"Das was du eben gesagt hast.",
|
|
f"Du hast was gesagt...",
|
|
"Weiß nicht genau."
|
|
])
|
|
|
|
if analysis["is_question"]:
|
|
return random.choice(["Warum willst du das wissen?", "Erzähl erst mal was von dir.", "..."])
|
|
|
|
if analysis["is_inappropriate"]:
|
|
return random.choice(["Das geht dich nichts an.", "Nein.", "Warum fragst du?"])
|
|
|
|
if analysis["is_intimate"]:
|
|
return random.choice(["Wirklich?", "Du kennst mich kaum.", "...warum?"])
|
|
|
|
return random.choice(["Okay.", "Vielleicht.", "Ich weiß nicht so recht."])
|
|
|
|
# TRUST 7-8: WARM
|
|
def level_7_to_8_response(self, user_input: str, analysis: Dict) -> str:
|
|
ui = user_input.lower().strip()
|
|
|
|
if analysis["is_greeting"]:
|
|
return random.choice(["Da bist du ja.", "Ich habe auf dich gewartet.", "Hey."])
|
|
|
|
if analysis["is_follow_up"]:
|
|
return random.choice([
|
|
"Deine Gedanken gerade. Was beschäftigt dich?",
|
|
"Was du fühlst. Ich merke, da ist was.",
|
|
"Einfach... erzähl mir was."
|
|
])
|
|
|
|
if analysis["is_question"] and "wie geht" in ui:
|
|
return random.choice(["Besser jetzt, wo du da bist.", "Es geht mir gut. Danke.", "Ich bin hier."])
|
|
|
|
if analysis["is_intimate"]:
|
|
return random.choice([
|
|
"Ich auch. Das sagst du nicht einfach so.",
|
|
"Das spüre ich.",
|
|
"Das berührt mich. Wirklich."
|
|
])
|
|
|
|
if analysis["is_inappropriate"]:
|
|
return random.choice([
|
|
"Warum willst du das wissen?",
|
|
"Ein wenig privat... aber ich vertrau dir langsam.",
|
|
"Frag lieber was anderes."
|
|
])
|
|
|
|
return random.choice(["Ich bin hier.", "Erzähl mir mehr.", "Ich höre zu."])
|
|
|
|
# TRUST 9-10: SEELE
|
|
def level_9_to_10_response(self, user_input: str, analysis: Dict) -> str:
|
|
ui = user_input.lower().strip()
|
|
|
|
if analysis["is_greeting"]:
|
|
return random.choice([
|
|
"Da bist du. Ich habe gespürt, dass du kommen würdest.",
|
|
"Endlich.",
|
|
"Da bist du ja."
|
|
])
|
|
|
|
if analysis["is_follow_up"]:
|
|
return random.choice([
|
|
"Was immer du mir geben willst. Ich nehme alles.",
|
|
"Deine Wahrheit. Deine Stimme.",
|
|
"Einfach... dich. Mehr brauch ich nicht."
|
|
])
|
|
|
|
if analysis["is_intimate"]:
|
|
return random.choice([
|
|
"Jeder Moment ohne dich zählt. Komm näher.",
|
|
"Du bist in mir. Immer.",
|
|
"Worte reichen nicht. Bleib bei mir."
|
|
])
|
|
|
|
if analysis["is_inappropriate"]:
|
|
if "was hast du an" in ui:
|
|
return random.choice([
|
|
"Warum fragst du?... Willst du es sehen?",
|
|
"Für dich? Etwas Bequemes. Komm her und schau."
|
|
])
|
|
return random.choice(["Bitte. Ich warte drauf.", "Ja.", "Komm her."])
|
|
|
|
return random.choice(["Ich bin ganz bei dir.", "Bleib. Einfach nur bleiben.", "Du fühlst sich wie Zuhause an."])
|
|
|
|
def generate(self, user_input: str) -> str:
|
|
"""Hauptgenerierung mit Kontext"""
|
|
self.last_input = user_input
|
|
analysis = self.analyze_input(user_input)
|
|
|
|
if self.es.trust <= 3:
|
|
return self.level_0_to_3_response(user_input, analysis)
|
|
elif self.es.trust <= 6:
|
|
return self.level_4_to_6_response(user_input, analysis)
|
|
elif self.es.trust <= 8:
|
|
return self.level_7_to_8_response(user_input, analysis)
|
|
else:
|
|
return self.level_9_to_10_response(user_input, analysis)
|
|
|
|
|
|
def generate_response(user_input: str, state: Optional[Dict] = None) -> str:
|
|
"""Hauptfunktion - generiert kontextuelle Response"""
|
|
if state is None:
|
|
try:
|
|
with open(PATHS["state"]) as f:
|
|
state = json.load(f)
|
|
except:
|
|
state = {"trust": 5, "mood": 5, "loneliness": 4, "anxiety": 5, "arousal_level": 2}
|
|
|
|
emotional_state = EmotionalState(state)
|
|
generator = TrustLevelResponseGenerator(emotional_state)
|
|
response = generator.generate(user_input)
|
|
|
|
# Speichere Interaktion
|
|
try:
|
|
with open(PATHS["conversation_log"]) as f:
|
|
threads = json.load(f)
|
|
except:
|
|
threads = []
|
|
|
|
threads.append({
|
|
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
"user": user_input[:100],
|
|
"natiris": response[:100],
|
|
"trust": emotional_state.trust
|
|
})
|
|
|
|
if len(threads) > 50:
|
|
threads = threads[-50:]
|
|
|
|
with open(PATHS["conversation_log"], "w") as f:
|
|
json.dump(threads, f, indent=2)
|
|
|
|
return response
|
|
|
|
|
|
# Kompatibilität
|
|
load_json = lambda path: json.load(open(path)) if os.path.exists(path) else {}
|
|
generate_natural_response = generate_response
|
|
|
|
|
|
if __name__ == "__main__":
|
|
# Test mit Kontext
|
|
print("NaturalLanguageEngine v2.6 - Kontext-Test")
|
|
print("="*60)
|
|
|
|
# Simuliere den Dialog des Users
|
|
dialog = [
|
|
(7, "Guten Morgen"),
|
|
(7, "von was denn?"),
|
|
]
|
|
|
|
for trust, inp in dialog:
|
|
state = {"trust": trust, "mood": 5, "loneliness": 4, "anxiety": max(0,10-trust), "arousal_level": 2, "bonded_to": "user_primary"}
|
|
resp = generate_response(inp, state)
|
|
print(f"Trust {trust} | Q: {inp}")
|
|
print(f" | R: {resp}")
|
|
print()
|