#!/usr/bin/env python3
"""
Run analytics tables migration on Supabase.

Usage:
    python scripts/run_analytics_migration.py

Requires URL and SUPABASE_ADMIN environment variables (or .env file).
"""

import os
import sys
from pathlib import Path

# Load .env if exists
try:
    from dotenv import load_dotenv
    load_dotenv()
except ImportError:
    pass

# Get Supabase credentials
SUPABASE_URL = os.environ.get('URL') or os.environ.get('SUPABASE_URL')
SUPABASE_KEY = os.environ.get('SUPABASE_ADMIN')

if not SUPABASE_URL or not SUPABASE_KEY:
    print("ERROR: Missing Supabase credentials")
    print("  Set URL and SUPABASE_ADMIN environment variables")
    print("  Or create a .env file with these values")
    sys.exit(1)

try:
    from supabase import create_client
except ImportError:
    print("ERROR: supabase package not installed")
    print("  Run: pip install supabase")
    sys.exit(1)

# Read migration SQL (just the analytics portion)
ANALYTICS_SQL = """
-- Analytics Tables for Worker Monitoring

-- Worker heartbeats (real-time status, 1 row per worker)
CREATE TABLE IF NOT EXISTS worker_heartbeats (
    worker_id TEXT PRIMARY KEY,
    machine_id TEXT NOT NULL,
    gpu_id INT NOT NULL,
    status TEXT DEFAULT 'idle',
    current_video_id TEXT,
    current_stage INT,
    current_stage_name TEXT,
    last_heartbeat TIMESTAMPTZ DEFAULT NOW(),
    session_start TIMESTAMPTZ DEFAULT NOW(),
    session_videos_done INT DEFAULT 0,
    session_videos_failed INT DEFAULT 0,
    session_audio_minutes FLOAT DEFAULT 0,
    session_usable_minutes FLOAT DEFAULT 0
);

-- Worker lifetime metrics (aggregated stats)
CREATE TABLE IF NOT EXISTS worker_metrics (
    worker_id TEXT PRIMARY KEY,
    machine_id TEXT NOT NULL,
    gpu_id INT NOT NULL,
    total_videos_processed INT DEFAULT 0,
    total_videos_failed INT DEFAULT 0,
    total_audio_hours FLOAT DEFAULT 0,
    total_usable_hours FLOAT DEFAULT 0,
    total_processing_hours FLOAT DEFAULT 0,
    avg_rtf FLOAT DEFAULT 0,
    first_seen TIMESTAMPTZ DEFAULT NOW(),
    last_active TIMESTAMPTZ DEFAULT NOW()
);

-- Processing events (done/fail only, for analysis)
CREATE TABLE IF NOT EXISTS processing_events (
    id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
    worker_id TEXT NOT NULL,
    video_id TEXT NOT NULL,
    event_type TEXT NOT NULL,
    stage_name TEXT,
    duration_seconds FLOAT,
    audio_minutes FLOAT,
    speakers INT,
    usable_pct FLOAT,
    error_type TEXT,
    error_message TEXT,
    created_at TIMESTAMPTZ DEFAULT NOW()
);

-- Error logs (detailed error tracking)
CREATE TABLE IF NOT EXISTS error_logs (
    id UUID DEFAULT gen_random_uuid() PRIMARY KEY,
    worker_id TEXT NOT NULL,
    video_id TEXT NOT NULL,
    stage TEXT,
    error_type TEXT,
    error_message TEXT,
    stack_trace TEXT,
    created_at TIMESTAMPTZ DEFAULT NOW()
);

-- Indexes
CREATE INDEX IF NOT EXISTS idx_events_worker ON processing_events(worker_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_events_video ON processing_events(video_id);
CREATE INDEX IF NOT EXISTS idx_events_type ON processing_events(event_type, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_errors_worker ON error_logs(worker_id, created_at DESC);
CREATE INDEX IF NOT EXISTS idx_errors_recent ON error_logs(created_at DESC);
"""

# SQL for the view (run separately as it may need different syntax)
VIEW_SQL = """
CREATE OR REPLACE VIEW cluster_overview AS
SELECT
    COUNT(DISTINCT machine_id) as total_machines,
    COUNT(*) as total_workers,
    COUNT(*) FILTER (WHERE status = 'processing') as active_workers,
    COUNT(*) FILTER (WHERE status = 'idle') as idle_workers,
    COUNT(*) FILTER (WHERE last_heartbeat < NOW() - INTERVAL '2 minutes') as stale_workers,
    COALESCE(SUM(session_videos_done), 0) as session_total_videos,
    COALESCE(SUM(session_audio_minutes), 0) as session_total_audio_min,
    COALESCE(SUM(session_usable_minutes), 0) as session_total_usable_min
FROM worker_heartbeats;
"""

# Trigger function for auto-updating metrics
TRIGGER_SQL = """
CREATE OR REPLACE FUNCTION update_worker_metrics()
RETURNS TRIGGER AS $$
BEGIN
    IF NEW.event_type = 'done' THEN
        INSERT INTO worker_metrics (worker_id, machine_id, gpu_id, total_videos_processed,
                                    total_audio_hours, total_usable_hours, total_processing_hours, last_active)
        VALUES (
            NEW.worker_id,
            split_part(NEW.worker_id, '_gpu', 1),
            split_part(NEW.worker_id, '_gpu', 2)::INT,
            1,
            COALESCE(NEW.audio_minutes, 0) / 60.0,
            COALESCE(NEW.audio_minutes * NEW.usable_pct / 100.0, 0) / 60.0,
            COALESCE(NEW.duration_seconds, 0) / 3600.0,
            NOW()
        )
        ON CONFLICT (worker_id) DO UPDATE SET
            total_videos_processed = worker_metrics.total_videos_processed + 1,
            total_audio_hours = worker_metrics.total_audio_hours + COALESCE(NEW.audio_minutes, 0) / 60.0,
            total_usable_hours = worker_metrics.total_usable_hours + COALESCE(NEW.audio_minutes * NEW.usable_pct / 100.0, 0) / 60.0,
            total_processing_hours = worker_metrics.total_processing_hours + COALESCE(NEW.duration_seconds, 0) / 3600.0,
            avg_rtf = CASE
                WHEN worker_metrics.total_processing_hours + COALESCE(NEW.duration_seconds, 0) / 3600.0 > 0
                THEN (worker_metrics.total_audio_hours + COALESCE(NEW.audio_minutes, 0) / 60.0) /
                     (worker_metrics.total_processing_hours + COALESCE(NEW.duration_seconds, 0) / 3600.0)
                ELSE 0
            END,
            last_active = NOW();
    ELSIF NEW.event_type = 'fail' THEN
        INSERT INTO worker_metrics (worker_id, machine_id, gpu_id, total_videos_failed, last_active)
        VALUES (
            NEW.worker_id,
            split_part(NEW.worker_id, '_gpu', 1),
            split_part(NEW.worker_id, '_gpu', 2)::INT,
            1,
            NOW()
        )
        ON CONFLICT (worker_id) DO UPDATE SET
            total_videos_failed = worker_metrics.total_videos_failed + 1,
            last_active = NOW();
    END IF;
    RETURN NEW;
END;
$$ LANGUAGE plpgsql;

DROP TRIGGER IF EXISTS trigger_update_worker_metrics ON processing_events;
CREATE TRIGGER trigger_update_worker_metrics
    AFTER INSERT ON processing_events
    FOR EACH ROW
    EXECUTE FUNCTION update_worker_metrics();
"""

def main():
    print("=" * 60)
    print("Analytics Tables Migration")
    print("=" * 60)
    print(f"Supabase URL: {SUPABASE_URL}")
    print()

    # Create client
    client = create_client(SUPABASE_URL, SUPABASE_KEY)

    # Execute migrations using RPC (requires pg_execute SQL function or similar)
    # Supabase doesn't allow raw SQL execution via client library directly
    # So we'll use the REST API to execute SQL

    print("NOTE: Supabase Python client doesn't support raw SQL execution.")
    print()
    print("Please run the following SQL in Supabase SQL Editor:")
    print("  1. Go to: " + SUPABASE_URL.replace('.supabase.co', '.supabase.co/project/default/sql'))
    print("  2. Copy and paste the SQL from: scripts/supabase_migrations.sql")
    print("  3. Look for the 'ANALYTICS TABLES' section (Steps 8-15)")
    print()
    print("Or run the full migration file:")
    print(f"  cat scripts/supabase_migrations.sql")
    print()

    # Try to verify tables exist
    print("Checking if tables already exist...")
    try:
        result = client.table('worker_heartbeats').select('worker_id').limit(1).execute()
        print("  worker_heartbeats: EXISTS")
    except Exception as e:
        print(f"  worker_heartbeats: NOT FOUND (run migration)")

    try:
        result = client.table('processing_events').select('id').limit(1).execute()
        print("  processing_events: EXISTS")
    except Exception as e:
        print(f"  processing_events: NOT FOUND (run migration)")

    try:
        result = client.table('error_logs').select('id').limit(1).execute()
        print("  error_logs: EXISTS")
    except Exception as e:
        print(f"  error_logs: NOT FOUND (run migration)")

    print()
    print("=" * 60)

if __name__ == '__main__':
    main()
