#!/usr/bin/env python3 """Test script for local Ollama connection""" import requests import sys OLLAMA_HOST = "http://localhost:11434" MODEL = "HammerAI/rocinante-v1.1:12b-q4_K_M" def test_connection(): """Test if Ollama is running""" try: resp = requests.get(f"{OLLAMA_HOST}/api/tags", timeout=5) if resp.status_code == 200: data = resp.json() models = [m['name'] for m in data.get('models', [])] print(f"✓ Ollama is running") print(f" Available models: {models}") return models else: print(f"✗ Ollama returned status {resp.status_code}") return [] except requests.exceptions.ConnectionError: print(f"✗ Cannot connect to Ollama at {OLLAMA_HOST}") print(f" Start Ollama with: ollama serve") return None except Exception as e: print(f"✗ Error: {e}") return None def check_model(models): """Check if our target model is available""" if MODEL in models: print(f"✓ Model {MODEL} is available") return True else: print(f"✗ Model {MODEL} not found") print(f" Available models: {models}") print(f"\n To download, run:") print(f" ollama pull {MODEL}") return False def test_generate(): """Test simple generation""" try: resp = requests.post(f"{OLLAMA_HOST}/api/generate", json={ "model": MODEL, "prompt": "Hello, who are you?", "stream": False }, timeout=30) if resp.status_code == 200: data = resp.json() print(f"✓ Test generation successful") print(f" Response preview: {data.get('response', '')[:100]}...") return True else: print(f"✗ Generation failed: {resp.status_code}") print(f" {resp.text}") return False except Exception as e: print(f"✗ Generation error: {e}") return False if __name__ == "__main__": print("=" * 50) print("Nimue Ollama Test") print("=" * 50) print(f"Target: {OLLAMA_HOST}") print(f"Model: {MODEL}") print("-" * 50) models = test_connection() if models is None: sys.exit(1) if not check_model(models): print("\n" + "=" * 50) print("SETUP REQUIRED:") print("=" * 50) else: print("\n" + "-" * 50) test_generate()