#!/usr/bin/env python3
"""
Background Export Manager - Async audio segmentation and R2 upload.

Runs export jobs in background threads so GPU processing isn't blocked.
Properly handles shutdown and waits for in-flight jobs to complete.
"""

import os
import json
import time
import logging
import subprocess
import tarfile
from pathlib import Path
from typing import Dict, List, Optional, Any
from concurrent.futures import ThreadPoolExecutor, Future, as_completed
from dataclasses import dataclass, field
from threading import Lock
from queue import Queue

logger = logging.getLogger("FastPipelineV6.Export")


@dataclass
class ExportJob:
    """A single export job (segmentation + optional R2 upload)."""
    video_id: str
    output_dir: Path
    original_audio_path: Path
    segments: List[Dict]
    intro_offset: float = 0.0
    r2_upload_enabled: bool = False
    r2_upload_key: str = ""
    r2_bucket_type: str = "production"
    include_original_in_tar: bool = False  # Don't include _original.flac in TAR (source is in R2)
    submitted_at: float = field(default_factory=time.time)


@dataclass
class ExportResult:
    """Result of an export job."""
    video_id: str
    success: bool
    segments_exported: int = 0
    export_time_sec: float = 0.0
    r2_upload_time_sec: float = 0.0
    tar_size_bytes: int = 0
    error: Optional[str] = None


def segment_audio_parallel(
    audio_path: Path,
    segments: List[Dict],
    output_dir: Path,
    intro_offset: float = 0.0,
    max_workers: int = 8,
    output_format: str = "flac"
) -> int:
    """
    Cut audio into individual speaker segment files using parallel ffmpeg.
    
    Args:
        audio_path: Path to original quality audio (FLAC)
        segments: List of segment dicts with 'start', 'end', 'speaker', 'status'
        output_dir: Directory to write segment files
        intro_offset: Offset already applied to audio (for timestamp adjustment)
        max_workers: Number of parallel ffmpeg processes
        output_format: Output format (flac, wav, mp3)
    
    Returns:
        Number of segments exported
    """
    # Filter usable segments only
    usable = [s for s in segments if s.get('status') == 'usable']
    if not usable:
        logger.warning(f"No usable segments to export")
        return 0
    
    segments_dir = output_dir / "segments"
    segments_dir.mkdir(exist_ok=True)
    
    def cut_segment(seg: Dict, idx: int) -> Optional[str]:
        """Cut a single segment using ffmpeg."""
        try:
            speaker = seg.get('speaker', 'UNKNOWN')
            start = seg['start']
            end = seg['end']
            duration = end - start
            
            # Filename: SPEAKER_IDX_START-END.flac
            filename = f"{speaker}_{idx:04d}_{start:.2f}-{end:.2f}.{output_format}"
            out_path = segments_dir / filename
            
            cmd = [
                'ffmpeg', '-y', '-hide_banner', '-loglevel', 'error',
                '-ss', str(start),
                '-t', str(duration),
                '-i', str(audio_path),
                '-c:a', 'flac' if output_format == 'flac' else 'copy',
                str(out_path)
            ]
            
            result = subprocess.run(cmd, capture_output=True, timeout=30)
            if result.returncode != 0:
                logger.warning(f"ffmpeg failed for segment {idx}: {result.stderr.decode()[:100]}")
                return None
            
            return str(out_path)
        except Exception as e:
            logger.warning(f"Failed to cut segment {idx}: {e}")
            return None
    
    # Run parallel ffmpeg
    exported = 0
    with ThreadPoolExecutor(max_workers=max_workers) as executor:
        futures = {
            executor.submit(cut_segment, seg, i): i 
            for i, seg in enumerate(usable)
        }
        for future in as_completed(futures):
            result = future.result()
            if result:
                exported += 1
    
    return exported


def run_export_job(job: ExportJob) -> ExportResult:
    """
    Execute a complete export job (segmentation + tar + optional R2 upload).
    
    This runs in a background thread and should not block GPU processing.
    """
    start_time = time.time()
    result = ExportResult(video_id=job.video_id, success=False)
    
    try:
        # Step 1: Segment audio (parallel ffmpeg)
        if job.original_audio_path.exists():
            seg_start = time.time()
            result.segments_exported = segment_audio_parallel(
                audio_path=job.original_audio_path,
                segments=job.segments,
                output_dir=job.output_dir,
                intro_offset=job.intro_offset,
                max_workers=8,
                output_format="flac"
            )
            result.export_time_sec = time.time() - seg_start
            logger.info(f"[{job.video_id}] Segmented {result.segments_exported} clips in {result.export_time_sec:.1f}s")
        else:
            logger.warning(f"[{job.video_id}] Original audio not found: {job.original_audio_path}")
        
        # Step 2: Remove original audio from output dir if not needed in TAR
        # (Original is already preserved in R2 source bucket)
        if not job.include_original_in_tar:
            original_in_output = job.output_dir / f"{job.video_id}_original.flac"
            if original_in_output.exists():
                original_in_output.unlink()
                logger.debug(f"[{job.video_id}] Removed _original.flac from TAR (source in R2)")

        # Step 3: Create tar archive
        tar_path = job.output_dir.parent / f"{job.video_id}.tar"
        with tarfile.open(tar_path, "w") as tar:
            tar.add(job.output_dir, arcname=job.video_id)
        result.tar_size_bytes = tar_path.stat().st_size if tar_path.exists() else 0

        # Step 4: R2 upload (if enabled)
        if job.r2_upload_enabled and job.r2_upload_key:
            from src.r2_client import R2Client
            
            upload_start = time.time()
            r2 = R2Client(bucket_type=job.r2_bucket_type)
            upload_ok = r2.upload_file(str(tar_path), job.r2_upload_key)
            result.r2_upload_time_sec = time.time() - upload_start
            
            if not upload_ok:
                raise RuntimeError(f"R2 upload failed: {job.r2_upload_key}")
            
            logger.info(f"[{job.video_id}] Uploaded to R2 in {result.r2_upload_time_sec:.1f}s")
            
            # Delete local tar after successful upload
            try:
                tar_path.unlink()
            except:
                pass
        
        result.success = True
        
    except Exception as e:
        result.error = str(e)
        logger.error(f"[{job.video_id}] Export failed: {e}")
    
    return result


class BackgroundExportManager:
    """
    Manages background export jobs with proper lifecycle handling.
    
    Usage:
        manager = BackgroundExportManager(max_workers=4)
        manager.start()
        
        # Submit jobs (non-blocking)
        manager.submit(job1)
        manager.submit(job2)
        
        # At shutdown, wait for completion
        results = manager.shutdown(wait=True, timeout=300)
    """
    
    def __init__(self, max_workers: int = 4):
        self.max_workers = max_workers
        self.executor: Optional[ThreadPoolExecutor] = None
        self.pending_futures: Dict[str, Future] = {}
        self.results: Dict[str, ExportResult] = {}
        self.lock = Lock()
        self._started = False
    
    def start(self):
        """Start the background export manager."""
        if self._started:
            return
        self.executor = ThreadPoolExecutor(
            max_workers=self.max_workers,
            thread_name_prefix="export_worker"
        )
        self._started = True
        logger.info(f"BackgroundExportManager started with {self.max_workers} workers")
    
    def submit(self, job: ExportJob) -> bool:
        """
        Submit an export job to run in background.
        
        Returns True if submitted successfully, False otherwise.
        """
        if not self._started or not self.executor:
            logger.error("ExportManager not started")
            return False
        
        try:
            future = self.executor.submit(run_export_job, job)
            with self.lock:
                self.pending_futures[job.video_id] = future
            logger.debug(f"Submitted export job: {job.video_id}")
            return True
        except Exception as e:
            logger.error(f"Failed to submit export job: {e}")
            return False
    
    def get_pending_count(self) -> int:
        """Get number of pending export jobs."""
        with self.lock:
            return len(self.pending_futures)
    
    def collect_completed(self) -> List[ExportResult]:
        """Collect results from completed jobs (non-blocking)."""
        completed = []
        with self.lock:
            done_ids = []
            for video_id, future in self.pending_futures.items():
                if future.done():
                    try:
                        result = future.result(timeout=0)
                        self.results[video_id] = result
                        completed.append(result)
                    except Exception as e:
                        result = ExportResult(video_id=video_id, success=False, error=str(e))
                        self.results[video_id] = result
                        completed.append(result)
                    done_ids.append(video_id)
            
            for vid in done_ids:
                del self.pending_futures[vid]
        
        return completed
    
    def shutdown(self, wait: bool = True, timeout: float = 300) -> Dict[str, ExportResult]:
        """
        Shutdown the export manager.
        
        Args:
            wait: If True, wait for pending jobs to complete
            timeout: Maximum time to wait for pending jobs
        
        Returns:
            Dict of video_id -> ExportResult for all jobs
        """
        if not self._started:
            return self.results
        
        logger.info(f"Shutting down ExportManager (pending: {len(self.pending_futures)})")
        
        if wait and self.pending_futures:
            # Wait for pending jobs with timeout
            pending_list = list(self.pending_futures.values())
            start = time.time()
            
            for future in as_completed(pending_list, timeout=timeout):
                try:
                    result = future.result(timeout=1)
                    with self.lock:
                        self.results[result.video_id] = result
                except Exception as e:
                    logger.error(f"Export job failed during shutdown: {e}")
            
            elapsed = time.time() - start
            logger.info(f"Waited {elapsed:.1f}s for {len(pending_list)} export jobs")
        
        # Shutdown executor
        if self.executor:
            self.executor.shutdown(wait=False)
            self.executor = None
        
        self._started = False
        return self.results


# Global export manager instance (initialized by massive_process.py)
_EXPORT_MANAGER: Optional[BackgroundExportManager] = None


def get_export_manager() -> Optional[BackgroundExportManager]:
    """Get the global export manager instance."""
    return _EXPORT_MANAGER


def init_export_manager(max_workers: int = 4) -> BackgroundExportManager:
    """Initialize the global export manager."""
    global _EXPORT_MANAGER
    if _EXPORT_MANAGER is None:
        _EXPORT_MANAGER = BackgroundExportManager(max_workers=max_workers)
        _EXPORT_MANAGER.start()
    return _EXPORT_MANAGER


def shutdown_export_manager(wait: bool = True, timeout: float = 300) -> Dict[str, ExportResult]:
    """Shutdown the global export manager and return results."""
    global _EXPORT_MANAGER
    if _EXPORT_MANAGER:
        results = _EXPORT_MANAGER.shutdown(wait=wait, timeout=timeout)
        _EXPORT_MANAGER = None
        return results
    return {}

