"""
Abstract provider interface. Both AI Studio and OpenRouter implement this.
The batch_cycle engine calls provider.send_batch() agnostic of backend.
"""
from __future__ import annotations

from abc import ABC, abstractmethod
from dataclasses import dataclass, field
from enum import Enum
from typing import Any, Optional


class RequestStatus(str, Enum):
    SUCCESS = "success"
    RATE_LIMITED = "rate_limited"
    ERROR = "error"
    TIMEOUT = "timeout"


@dataclass
class TranscriptionRequest:
    segment_id: str
    audio_base64: str
    language_code: str
    original_file: str
    mime_type: str = "audio/flac"


@dataclass
class TokenUsage:
    input_tokens: int = 0
    output_tokens: int = 0
    cached_tokens: int = 0
    total_tokens: int = 0
    cache_hit: bool = False


@dataclass
class TranscriptionResponse:
    segment_id: str
    status: RequestStatus
    transcription_data: Optional[dict] = None
    token_usage: TokenUsage = field(default_factory=TokenUsage)
    latency_ms: float = 0.0
    error_message: str = ""
    raw_response: Any = None


class BaseProvider(ABC):

    @abstractmethod
    async def send_batch(self, requests: list[TranscriptionRequest]) -> list[TranscriptionResponse]:
        """Send a batch of transcription requests concurrently."""
        ...

    @abstractmethod
    def verify_cache_hit(self, response: Any) -> bool:
        """Check if the response used cached prompt tokens."""
        ...

    @abstractmethod
    def get_token_usage(self, response: Any) -> TokenUsage:
        """Extract token usage from raw response."""
        ...

    @abstractmethod
    def get_provider_name(self) -> str:
        ...
