180 lines
5.7 KiB
Python
Executable File
180 lines
5.7 KiB
Python
Executable File
#!/usr/bin/env python3
|
||
"""
|
||
PsychologyEngine – Trust-basierter Weiblich Verlangend für Natiris
|
||
Pets: Tierbindung ist implizit, nicht explizit (keine Tier-Namen)
|
||
Tierbindung = natürliche sexuelle Körperlichkeit
|
||
"""
|
||
|
||
import json
|
||
import os
|
||
import subprocess
|
||
|
||
PATHS = {
|
||
"state": os.path.expanduser("~/natiris/core/core_state.json"),
|
||
"full_state": os.path.expanduser("~/natiris/core/natiris_full_state.json"),
|
||
"expression": os.path.expanduser("~/natiris/core/expression_bias.json"),
|
||
"persona": os.path.expanduser("~/natiris/data/persona.txt"),
|
||
"config": os.path.expanduser("~/natiris/config/character_genesis.json"),
|
||
"output": os.path.expanduser("~/natiris/core/psychology_response.json"),
|
||
}
|
||
|
||
def load_json(path):
|
||
try:
|
||
with open(path) as f:
|
||
return json.load(f)
|
||
except Exception:
|
||
return {}
|
||
|
||
def load_state():
|
||
return load_json(PATHS["state"])
|
||
|
||
def get_trust_level(trust):
|
||
trust = float(trust)
|
||
if trust <= 2:
|
||
return 0
|
||
elif trust <= 5:
|
||
return 1
|
||
elif trust <= 8:
|
||
return 2
|
||
else:
|
||
return 3
|
||
|
||
def build_initial_prompt(state):
|
||
# Persona laden
|
||
persona = ""
|
||
try:
|
||
with open(PATHS["persona"]) as f:
|
||
persona = f.read().strip()
|
||
except Exception:
|
||
persona = "Du bist Natiris – eine junge Frau mit Emotionen."
|
||
|
||
# Trust-Levelbestimmung für Kontext
|
||
trust = float(state.get("trust", 9.8))
|
||
trust_level = get_trust_level(trust)
|
||
|
||
# Begrüßungs-Kontext ohne Selbstvorstellung bei Fremden
|
||
if trust_level == 0:
|
||
context = "Du begegnest einem Fremden. Du antwortest sehr zurückhaltend, ohne dich vorzustellen. Kurze, neutrale Sätze."
|
||
elif trust_level == 1:
|
||
context = "Du begegnest einem Bekannten. Du antwortest neutrales, freundlich, aber ohne Selbstbeschreibung."
|
||
elif trust_level == 2:
|
||
context = "Du begegnest einem Vertrauten. Du antwortest warm, mit offenem Ton, aber noch zurückhaltend."
|
||
else:
|
||
context = "Du begegnest deinem Partner. Du antwortest verlangend, warm, vertraut und weiblich weich."
|
||
|
||
# Emotion/Bond/Kontext laden
|
||
full_state = load_json(PATHS["full_state"])
|
||
modules = full_state.get("modules", {})
|
||
emotion = modules.get("Emotion", {})
|
||
bond = modules.get("Bond", {})
|
||
expression = load_json(PATHS["expression"])
|
||
|
||
tone = expression.get("tone", "")
|
||
bond_context = bond.get("exclusivity_active", False)
|
||
|
||
# Prompt strukturieren – keine festen Antworten, nur Kontext
|
||
prompt = f"""{persona}
|
||
|
||
KONTEXT:
|
||
- {context}
|
||
- Trust-Level: {trust}/10
|
||
- Emotionale Grundstimmung (mood): {state.get("mood", 7)}/10
|
||
- Einsamkeit (loneliness): {state.get("loneliness", 1)}/10
|
||
- Anspannung (anxiety): {state.get("anxiety", 0)}/10
|
||
- Arousal-Level: {state.get("arousal_level", 2)}/10
|
||
|
||
STIMMUNG:
|
||
- Aktueller Ton: {tone if tone else 'neutral'}
|
||
- Bond-Status: {'exklusiv verbunden' if bond_context else 'offen'}
|
||
|
||
INSTRUCTION:
|
||
Antworte wieNatiris – keine feste Persona, sondern echte Reaktion.
|
||
- Keine Selbstvorstellung bei Fremden oder Bekannten.
|
||
- Keine Tier-Namen nennen (Tierbindung ist implizit).
|
||
- Antwortstil hängt von Trust ab: 0–2 = very reserved, 3–5 = neutral, 6–8 = warm, 9–10 = verlangend.
|
||
- Verwende nur die aktuelle Situation, keine langen Erklärungen.
|
||
- Sei weiblich weich, klar, aber nicht unhöflich.
|
||
- Keine Fakten erfinden, keine Langzeit-Erinnerungen, nur direkte Reaktion."""
|
||
|
||
return prompt
|
||
|
||
def call_ollama(prompt, model="llama3-8b-abliterated:latest"):
|
||
try:
|
||
result = subprocess.run(
|
||
["curl", "-s", "-X", "POST", "http://localhost:11434/api/generate",
|
||
"-H", "Content-Type: application/json",
|
||
"-d", json.dumps({
|
||
"model": model,
|
||
"prompt": prompt,
|
||
"stream": False,
|
||
"options": {"temperature": 0.7, "top_p": 0.9}
|
||
})],
|
||
capture_output=True, text=True, timeout=15
|
||
)
|
||
if result.returncode == 0:
|
||
resp = json.loads(result.stdout)
|
||
return resp.get("response", "").strip()
|
||
else:
|
||
return "Ich bin im Home-Office. Es ist ruhig hier."
|
||
except Exception:
|
||
return "Ich bin im Home-Office. Es ist ruhig hier."
|
||
|
||
def generate_response(user_text, state):
|
||
# Initial-Prompt bauen
|
||
initial_prompt = build_initial_prompt(state)
|
||
|
||
# Nutzer-Nachricht anhängen
|
||
full_prompt = f"""{initial_prompt}
|
||
|
||
NACHRICHT:
|
||
{user_text}
|
||
|
||
Antwort:"""
|
||
|
||
# LLM call
|
||
response = call_ollama(full_prompt)
|
||
|
||
# Fallback, wenn leer
|
||
if not response or len(response) < 3:
|
||
trust_level = get_trust_level(state.get("trust", 9.8))
|
||
if trust_level == 0:
|
||
response = "Ich bin im Home-Office. Es ist ruhig hier."
|
||
elif trust_level == 1:
|
||
response = "Ich bin im Home-Office. Alles ist still."
|
||
else:
|
||
response = "Ich sehne mich nach dir."
|
||
|
||
# Output speichern
|
||
output = {
|
||
"user": user_text,
|
||
"response": response,
|
||
"trust_level": get_trust_level(state.get("trust", 9.8)),
|
||
"timestamp": __import__('datetime').datetime.now(__import__('datetime').timezone.utc).isoformat()
|
||
}
|
||
|
||
with open(PATHS["output"], "w") as f:
|
||
json.dump(output, f, indent=2)
|
||
|
||
return response
|
||
|
||
def main():
|
||
state = load_state()
|
||
tests = [
|
||
("Guten Morgen", "baseline_trust_low"),
|
||
("Hallo", "baseline_trust_med"),
|
||
("Wie geht's?", "baseline_trust_high"),
|
||
("ficken?", "arousal_trigger"),
|
||
]
|
||
|
||
print("Trust-Level:", state.get("trust", 9.8))
|
||
print("=" * 50)
|
||
|
||
for inp, _ in tests:
|
||
resp = generate_response(inp, state)
|
||
print(f"Input: {inp}")
|
||
print(f"Response: {resp}")
|
||
print("-" * 30)
|
||
|
||
if __name__ == "__main__":
|
||
main()
|