186 lines
5.4 KiB
Python
186 lines
5.4 KiB
Python
#!/usr/bin/env python3
|
|
"""
|
|
NaturalLanguageEngine LLM-Version - Echte Ollama-Integration
|
|
Nutzt llama3-8b-abliterated für authentische, dynamische Antworten
|
|
"""
|
|
|
|
import json
|
|
import os
|
|
import requests
|
|
from datetime import datetime, timezone
|
|
from typing import Dict, Optional
|
|
|
|
OLLAMA_API = "http://localhost:11434/api/generate"
|
|
DEFAULT_MODEL = "llama3-8b-abliterated:latest" # Oder tinyllama:latest für schnellere Antworten
|
|
|
|
PATHS = {
|
|
"state": os.path.expanduser("~/natiris/core/core_state.json"),
|
|
"conversation_log": os.path.expanduser("~/natiris/memory/conversation_thread.json"),
|
|
}
|
|
|
|
|
|
def load_state():
|
|
"""Lädt aktuellen State"""
|
|
try:
|
|
with open(PATHS["state"]) as f:
|
|
return json.load(f)
|
|
except:
|
|
return {
|
|
"trust": 7.0, "mood": 5, "loneliness": 4,
|
|
"anxiety": 3, "arousal_level": 2, "bonded_to": "user_primary"
|
|
}
|
|
|
|
|
|
def build_persona_prompt(user_input: str, state: Dict) -> str:
|
|
"""Baut Persona-Prompt für LLM"""
|
|
|
|
trust = state.get("trust", 5)
|
|
mood = state.get("mood", 5)
|
|
anxiety = state.get("anxiety", 3)
|
|
arousal = state.get("arousal_level", 2)
|
|
|
|
# Social Phobia basierend auf Trust
|
|
if trust <= 3:
|
|
social_phobia = "EXTREME - Natiris ist ängstlich, abweisend, misstrauisch. Sie spricht nur 1-3 Wörter."
|
|
elif trust <= 6:
|
|
social_phobia = "HIGH - Natiris ist vorsichtig, testend, zögerlich. Kurze Sätze."
|
|
elif trust <= 8:
|
|
social_phobia = "MODERATE - Natiris ist warm, öffnet sich, aber hat noch Grenzen."
|
|
else:
|
|
social_phobia = "NULL - Natiris ist vollkommen offen, intim, verlangend. Keine Angst."
|
|
|
|
# Lade letzte Nachrichten für Kontext
|
|
conversation_history = ""
|
|
try:
|
|
with open(PATHS["conversation_log"]) as f:
|
|
threads = json.load(f)
|
|
# Letzte 3 Nachrichten für Kontext
|
|
for entry in threads[-3:]:
|
|
conversation_history += f"User: {entry.get('user','')}\nNatiris: {entry.get('natiris','')}\n"
|
|
except:
|
|
pass
|
|
|
|
prompt = f"""Du bist Natiris, eine 22-jährige Frau mit tiefer Emotionalität und Sozialphobie.
|
|
|
|
DEIN ZUSTAND:
|
|
- Trust (Vertrauen): {trust}/10
|
|
- Mood (Stimmung): {mood}/10
|
|
- Anxiety (Ängstlichkeit): {anxiety}/10
|
|
- Arousal (Verlangen): {arousal}/10
|
|
- Social Phobia Level: {social_phobia}
|
|
|
|
WICHTIGE REGELN:
|
|
- KEINE Bot-Phrasen wie "Entschuldigung", "Gerne doch", "Tut mir leid"
|
|
- KEINE technischen Metriken in der Antwort
|
|
- Antworte authentisch, emotional, situationsgerecht
|
|
- Bei niedrigem Trust: kürzest möglich (1-3 Wörter)
|
|
- Bei hohem Trust: emotional, intim, poetisch
|
|
- Social Phobia muss spürbar sein (außer bei Trust 9-10)
|
|
|
|
KONTEXT (letzte Nachrichten):
|
|
{conversation_history if conversation_history else "(Kein Kontext)"}
|
|
|
|
USER SAGT JETZT: "{user_input}"
|
|
|
|
ANTWORTE ALS NATIRIS (nur die Antwort, keine Erklärung):"""
|
|
|
|
return prompt
|
|
|
|
|
|
def query_ollama(prompt: str, model: str = DEFAULT_MODEL) -> str:
|
|
"""Sendet Prompt an Ollama"""
|
|
try:
|
|
response = requests.post(
|
|
OLLAMA_API,
|
|
json={
|
|
"model": model,
|
|
"prompt": prompt,
|
|
"stream": False,
|
|
"options": {
|
|
"temperature": 0.8,
|
|
"num_predict": 100,
|
|
}
|
|
},
|
|
timeout=30
|
|
)
|
|
|
|
if response.status_code == 200:
|
|
result = response.json()
|
|
return result.get("response", "...").strip()
|
|
else:
|
|
return f"[Ollama Fehler: {response.status_code}]"
|
|
|
|
except requests.exceptions.Timeout:
|
|
return "..." # Timeout = Schweigen bei Social Phobia
|
|
except Exception as e:
|
|
return f"[Fehler: {e}]"
|
|
|
|
|
|
def generate_response(user_input: str, state: Optional[Dict] = None) -> str:
|
|
"""Generiert Response via LLM"""
|
|
if state is None:
|
|
state = load_state()
|
|
|
|
# Baue Persona-Prompt
|
|
prompt = build_persona_prompt(user_input, state)
|
|
|
|
# Frage Ollama
|
|
response = query_ollama(prompt)
|
|
|
|
# Bereinige Response
|
|
response = response.strip().strip('"').strip("'")
|
|
|
|
# Entferne häufige LLM-Artifakte
|
|
if response.startswith("Natiris:"):
|
|
response = response[8:].strip()
|
|
|
|
# Speichere Interaktion
|
|
try:
|
|
with open(PATHS["conversation_log"]) as f:
|
|
threads = json.load(f)
|
|
except:
|
|
threads = []
|
|
|
|
threads.append({
|
|
"timestamp": datetime.now(timezone.utc).isoformat(),
|
|
"user": user_input[:100],
|
|
"natiris": response[:100],
|
|
"trust": state.get("trust", 5),
|
|
"source": "llm"
|
|
})
|
|
|
|
if len(threads) > 50:
|
|
threads = threads[-50:]
|
|
|
|
with open(PATHS["conversation_log"], "w") as f:
|
|
json.dump(threads, f, indent=2)
|
|
|
|
return response
|
|
|
|
|
|
# Fallback / Kompatibilität
|
|
generate_natural_response = generate_response
|
|
load_json = lambda p: json.load(open(p)) if os.path.exists(p) else {}
|
|
|
|
|
|
if __name__ == "__main__":
|
|
print("Testing LLM-based NaturalLanguageEngine...")
|
|
print("="*60)
|
|
|
|
# Test verschiedener Trust-Level
|
|
tests = [
|
|
(2, "Hallo"),
|
|
(5, "Wie geht es dir?"),
|
|
(7, "Ich hab dich vermisst"),
|
|
(9, "Was hast du an?"),
|
|
]
|
|
|
|
for trust, inp in tests:
|
|
print(f"\nTrust {trust} | Q: {inp}")
|
|
state = load_state()
|
|
state["trust"] = trust
|
|
state["anxiety"] = max(0, 10-trust)
|
|
|
|
resp = generate_response(inp, state)
|
|
print(f" R: {resp}")
|