"""Bot entry point — Traditional pipeline: Deepgram STT + GoogleVertexLLM + ElevenLabs TTS.

Follows India prod pipeline.py pattern exactly.
"""

import asyncio
import json
import os
import sys
import uuid

from dotenv import load_dotenv
from loguru import logger

from pipecat.adapters.schemas.tools_schema import ToolsSchema
from pipecat.audio.vad.silero import SileroVADAnalyzer
from pipecat.audio.vad.vad_analyzer import VADParams
from pipecat.pipeline.pipeline import Pipeline
from pipecat.pipeline.runner import PipelineRunner
from pipecat.pipeline.task import PipelineParams, PipelineTask
from pipecat.processors.aggregators.openai_llm_context import OpenAILLMContext
from pipecat.processors.audio.audio_buffer_processor import AudioBufferProcessor
from pipecat.processors.frameworks.rtvi import RTVIConfig, RTVIObserver, RTVIProcessor
from pipecat.processors.transcript_processor import TranscriptProcessor
from pipecat.services.soniox.stt import SonioxSTTService
from pipecat.transcriptions.language import Language
from pipecat.services.deepgram.tts import DeepgramTTSService
from pipecat.services.google.llm_vertex import GoogleVertexLLMService
from pipecat.transports.services.daily import DailyParams, DailyTransport

from bot.core.config import (
    SAMPLE_RATE,
    BotSessionState,
    ChunkState,
    TranscriptManager,
    load_persona,
)
from bot.core.session_handlers import (
    create_audio_data_handler,
    create_cleanup_if_no_user,
    create_first_participant_joined_handler,
    create_participant_left_handler,
    create_transcript_handler,
)
from bot.services.image_service import cf_image_function, create_cf_image_handler
from bot.services.image_upload import handle_image_upload_url, handle_image_upload_base64
from bot.services.ppt_service import generate_ppt_function, create_ppt_handler
from bot.services.video_service import (
    generate_video_function,
    generate_video_from_image_function,
    create_video_handler,
    create_video_from_image_handler,
)
from bot.services.video_ads_service import (
    generate_video_ad_function,
    create_video_ad_handler,
)

load_dotenv(override=True)

logger.remove(0)
logger.add(sys.stdout, level="INFO")


# --- SafeRTVIObserver (same as India prod) ---

class SafeRTVIObserver(RTVIObserver):
    """RTVIObserver that suppresses 'Unable to send messages before joining' errors."""

    def __init__(self, rtvi, **kwargs):
        super().__init__(rtvi, **kwargs)
        self._startup_errors_suppressed = 0

    async def on_process_frame(self, data):
        try:
            await super().on_process_frame(data)
        except Exception as e:
            if "Unable to send messages before joining" in str(e):
                self._startup_errors_suppressed += 1
                if self._startup_errors_suppressed == 1:
                    logger.debug("SafeRTVIObserver: Suppressing startup message (transport not yet joined)")
            else:
                raise


# --- Main ---

async def main():
    daily_room_url = os.getenv("DAILY_ROOM_URL")
    daily_room_token = os.getenv("DAILY_ROOM_TOKEN")
    session_id = os.getenv("SESSION_ID", uuid.uuid4().hex[:8])
    user_id = os.getenv("USER_ID", "")
    is_premium = os.getenv("IS_PREMIUM", "False") == "True"
    plan = os.getenv("PLAN", "")
    persona_id = os.getenv("PERSONA_ID", "image_gen")

    # Load persona config from DB
    persona = load_persona(persona_id)
    persona_name = persona["name"]
    persona_prompt = persona["system_prompt"]
    persona_tools = persona["tools"]  # list of tool function names to register
    persona_tts_voice = persona["tts_voice"]

    logger.info(f"PERSONA_LOADED: id={persona_id} name={persona_name} tools={persona_tools}")

    soniox_api_key = os.getenv("SONIOX_API_KEY", "f039c535a6fc559216b37cb04dc39766e08c5e801a84e06d8abc9a1cc1c3c812")

    # Vertex AI credentials
    creds_path = os.getenv("GOOGLE_VERTEX_CREDENTIALS_PATH", "gcp-service-account.json")
    if not os.path.isabs(creds_path):
        creds_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..", "..", creds_path)
    project_id = os.getenv("GOOGLE_CLOUD_PROJECT_ID", "")
    location = os.getenv("GOOGLE_CLOUD_LOCATION", "us-central1")

    if not daily_room_url:
        logger.error("DAILY_ROOM_URL environment variable is not set")
        sys.exit(1)

    logger.info(f"BOT_START: room={daily_room_url} session_id={session_id} user_id={user_id}")

    # --- Session State ---
    bot_session = BotSessionState(session_id=session_id, user_id=user_id)
    bot_session.is_premium = is_premium
    bot_session.plan = plan
    bot_session.character_id = persona_id
    bot_session.character_name = persona_name
    transcript_manager = TranscriptManager()
    chunk_state = ChunkState()

    # --- VAD (same as India prod) ---
    vad_analyzer = SileroVADAnalyzer(
        params=VADParams(
            confidence=0.7,
            stop_secs=0.5,
            start_secs=0.2,
            min_volume=0.6,
        )
    )

    # --- Transport (follows India prod create_transport) ---
    transport = DailyTransport(
        daily_room_url,
        daily_room_token,
        persona_name,
        DailyParams(
            audio_in_enabled=True,
            audio_out_enabled=True,
            audio_in_sample_rate=SAMPLE_RATE,
            audio_out_sample_rate=SAMPLE_RATE,
            vad_analyzer=vad_analyzer,
        ),
    )
    bot_session.transport = transport

    # --- STT (Soniox) ---
    stt = SonioxSTTService(
        api_key=soniox_api_key,
        settings=SonioxSTTService.Settings(
            language_hints=[Language.EN],
        ),
    )

    # --- LLM (Google Vertex AI Gemini — same as India prod) ---
    llm = GoogleVertexLLMService(
        project_id=project_id,
        location=location,
        model="gemini-2.5-flash",
        credentials_path=creds_path,
        system_instruction=persona_prompt,
        params=GoogleVertexLLMService.InputParams(
            max_output_tokens=4096,
            temperature=0.7,
        ),
    )

    # --- TTS (Deepgram) ---
    deepgram_api_key = os.getenv("DEEPGRAM_API_KEY", "")
    tts = DeepgramTTSService(
        api_key=deepgram_api_key,
        voice=persona_tts_voice,
    )

    # --- Context & Tools (persona-specific) ---
    tool_map = {
        "cf_image": cf_image_function,
        "generate_video": generate_video_function,
        "generate_video_from_image": generate_video_from_image_function,
        "generate_ppt": generate_ppt_function,
        "generate_video_ad": generate_video_ad_function,
    }
    standard_tools = [tool_map[t] for t in persona_tools if t in tool_map]
    tools = ToolsSchema(standard_tools=standard_tools) if standard_tools else None

    messages = [
        {"role": "system", "content": persona_prompt},
        {"role": "user", "content": "Start the Conversation :"},
    ]

    context = OpenAILLMContext(
        messages=messages,
        tools=tools,
    )
    context_aggregator = llm.create_context_aggregator(context)

    # --- Processors (follows India prod create_processors) ---
    rtvi = RTVIProcessor(config=RTVIConfig(config=[]))
    transcript = TranscriptProcessor()
    audio_buffer = AudioBufferProcessor(
        sample_rate=SAMPLE_RATE,
        num_channels=1,
        buffer_duration_secs=60,
    )
    audio_buffer.event_handler("on_audio_data")(
        create_audio_data_handler(bot_session, chunk_state)
    )

    # --- Pipeline (follows India prod create_pipeline EXACTLY) ---
    pipeline = Pipeline([
        transport.input(),
        stt,
        transcript.user(),
        context_aggregator.user(),
        rtvi,
        llm,
        tts,
        transport.output(),
        audio_buffer,
        transcript.assistant(),
        context_aggregator.assistant(),
    ])
    bot_session.pipeline = pipeline

    # --- Task (follows India prod create_task EXACTLY) ---
    task = PipelineTask(
        pipeline,
        params=PipelineParams(
            enable_metrics=True,
            enable_usage_metrics=True,
            allow_interruptions=True,
        ),
        observers=[
            SafeRTVIObserver(rtvi),
        ],
    )

    # --- Register tool handlers (persona-specific) ---
    handler_map = {
        "cf_image": ("cf_image", create_cf_image_handler),
        "generate_video": ("generate_video", create_video_handler),
        "generate_video_from_image": ("generate_video_from_image", create_video_from_image_handler),
        "generate_ppt": ("generate_ppt", create_ppt_handler),
        "generate_video_ad": ("generate_video_ad", create_video_ad_handler),
    }
    for tool_name in persona_tools:
        if tool_name in handler_map:
            func_name, handler_factory = handler_map[tool_name]
            llm.register_function(
                func_name,
                handler_factory(bot_session, transcript_manager, task),
                timeout_secs=120, cancel_on_interruption=False,
            )
            logger.info(f"TOOL_REGISTERED: {func_name} for persona={persona_id}")

    # --- RTVI event handlers (follows India prod) ---
    @rtvi.event_handler("on_client_ready")
    async def on_client_ready(rtvi_proc):
        await rtvi_proc.set_bot_ready()

    # --- Transcript Handler ---
    transcript.event_handler("on_transcript_update")(
        create_transcript_handler(bot_session, transcript_manager, task)
    )

    # --- Transport Event Handlers ---
    @transport.event_handler("on_first_participant_joined")
    async def _on_first_participant_joined(transport, participant):
        handler = create_first_participant_joined_handler(bot_session, task, audio_buffer, chunk_state)
        await handler(transport, participant)

    @transport.event_handler("on_participant_left")
    async def _on_participant_left(transport, participant, reason):
        handler = create_participant_left_handler(bot_session, transcript_manager, chunk_state, task)
        await handler(transport, participant, reason)

    @transport.event_handler("on_app_message")
    async def _on_app_message(transport, message, sender):
        """Handle image uploads from the frontend."""
        try:
            if isinstance(message, str):
                message = json.loads(message)

            msg_type = message.get("type")
            data = message.get("data", {})

            # Unwrap RTVI client-message format
            if msg_type == "client-message" and isinstance(data, dict):
                inner_type = data.get("t", "")
                inner_data = data.get("d", {})
                if inner_type:
                    msg_type = inner_type
                    data = inner_data

            if msg_type in ("user_image_upload", "image_upload"):
                mime_type = data.get("mime_type", "image/jpeg")
                image_url = data.get("image_url")
                image_id = data.get("image_id", "")

                if image_url:
                    await handle_image_upload_url(
                        image_url, image_id, mime_type,
                        bot_session, transcript_manager, task,
                    )
                else:
                    image_b64 = data.get("image", "")
                    if image_b64:
                        await handle_image_upload_base64(
                            image_b64, mime_type,
                            bot_session, transcript_manager, task,
                        )
                    else:
                        logger.warning("Received image_upload with no image data or URL")

        except Exception as e:
            logger.error(f"Error handling app message: {e}", exc_info=True)

    # --- No-user timeout ---
    cleanup_coro = create_cleanup_if_no_user(bot_session, task)
    bot_session.no_user_timeout_task = asyncio.create_task(cleanup_coro())

    # --- Run ---
    runner = PipelineRunner()
    await runner.run(task)


if __name__ == "__main__":
    asyncio.run(main())
