- Langzeit- und Kurzzeitgedächtnis mit SQLite - Ollama-Integration für lokale LLMs - Flask-Webinterface mit Stream-Response - Persona-System mit konfigurierbarem Charakter - Auto-Zusammenfassung bei Token-Limit - Rate Limiting und Sicherheitsfeatures - Uncensored Modell-Support
61 lines
1.2 KiB
Bash
Executable File
61 lines
1.2 KiB
Bash
Executable File
#!/bin/bash
|
|
# Nimue Setup Script
|
|
|
|
set -e
|
|
|
|
echo "================================"
|
|
echo " Nimue Setup"
|
|
echo "================================"
|
|
|
|
# Check if Ollama is installed
|
|
if ! command -v ollama &> /dev/null; then
|
|
echo "Ollama not found. Installing..."
|
|
curl -fsSL https://ollama.com/install.sh | sh
|
|
fi
|
|
|
|
echo "✓ Ollama found"
|
|
|
|
# Check if Ollama is running
|
|
if ! curl -s http://localhost:11434/api/tags > /dev/null; then
|
|
echo "Starting Oollama..."
|
|
ollama serve &
|
|
sleep 5
|
|
fi
|
|
|
|
echo "✓ Ollama running"
|
|
|
|
# Check Python
|
|
if ! command -v python &> /dev/null; then
|
|
echo "Python not found!"
|
|
exit 1
|
|
fi
|
|
|
|
echo "✓ Python found"
|
|
|
|
# Install dependencies
|
|
echo "Installing Python dependencies..."
|
|
pip install -r requirements.txt
|
|
|
|
echo "✓ Dependencies installed"
|
|
|
|
# Check model
|
|
MODEL="HammerAI/rocinante-v1.1:12b-q4_K_M"
|
|
echo "Checking for model: $MODEL"
|
|
|
|
if ! ollama list | grep -q "$MODEL"; then
|
|
echo "Model not found. Downloading (this may take a while)..."
|
|
ollama pull $MODEL
|
|
fi
|
|
|
|
echo "✓ Model ready"
|
|
|
|
# Create directories
|
|
mkdir -p logs
|
|
|
|
echo ""
|
|
echo "================================"
|
|
echo " Setup complete!"
|
|
echo ""
|
|
echo " Start with: python main.py"
|
|
echo " Then open: http://localhost:5000"
|
|
echo "================================" |