"""
Function call handlers — wired to the LLM via llm.register_function().

Each handler receives a FunctionCallParams object and uses result_callback
to return the result to the LLM context.

Handlers also push RTVIServerMessageFrame so the Android client receives
structured data (image URLs, video URLs, product cards, search links, etc.)
over the WebRTC data channel. The Android app listens for these in
VoiceClientManager.handleServerMessage().
"""

import asyncio
import time

from loguru import logger

from pipecat.processors.frameworks.rtvi import RTVIServerMessageFrame
from pipecat.services.llm_service import FunctionCallParams

from services.image_service import generate_image
from services.edit_image_service import edit_image
from services.search_service import web_search
from services.shopping_service import shopping_search
from services.video_service import generate_video


def _rtvi_msg(msg_type: str, data: dict) -> RTVIServerMessageFrame:
    """Build an RTVI server message frame for the Android client.

    The RTVIProcessor wraps this into {label: "rtvi-ai", type: "server-message",
    data: ...} which the Pipecat Android SDK delivers to onServerMessage().
    """
    return RTVIServerMessageFrame(data={"type": msg_type, "data": data})


# ---------------------------------------------------------------------------
# 1. Image Generation Handler (supports parallel multi-image)
# ---------------------------------------------------------------------------
async def handle_generate_image(params: FunctionCallParams):
    """Handle generate_image function call from the LLM.

    Supports generating 2-7 images in parallel with DIFFERENT prompts.
    Each prompt describes a unique angle/view/style. All Replicate calls
    run concurrently and successful URLs are sent as an array in a single
    RTVI message so the Android app can display them together.
    """
    args = params.arguments
    prompts = args.get("prompts", [])
    aspect_ratio = args.get("aspect_ratio", "1:1")
    batch_id = f"imgbatch_{int(time.time() * 1000)}"

    # Fallback: if LLM sends old format with single "prompt" instead of "prompts"
    if not prompts and args.get("prompt"):
        prompts = [args["prompt"], args["prompt"] + ", different angle and perspective"]

    # Clamp 2-7
    if len(prompts) < 2:
        prompts = (prompts * 2)[:2]
    prompts = prompts[:7]

    num_images = len(prompts)
    description = prompts[0] if prompts else ""

    logger.info(
        f"[Handler] generate_image called: {num_images} prompts, "
        f"aspect_ratio={aspect_ratio}"
    )
    for i, p in enumerate(prompts):
        logger.info(f"[Handler]   Prompt {i+1}: {p!r}")

    # Tell the client generation has started (shows loading indicator)
    await params.llm.push_frame(
        _rtvi_msg("generation_started", {
            "generation_type": "image",
            "description": description,
            "num_images": num_images,
        })
    )

    # Fire all image generations in parallel — each with a DIFFERENT prompt
    tasks = [
        generate_image(prompt=p, aspect_ratio=aspect_ratio)
        for p in prompts
    ]
    results = await asyncio.gather(*tasks, return_exceptions=True)

    # Collect successful URLs
    images = []
    for i, result in enumerate(results):
        if isinstance(result, Exception):
            logger.error(f"[Handler] Image {i+1}/{num_images} failed: {result}")
            continue
        if result.get("success"):
            url = result["url"]
            images.append({
                "image_id": f"{batch_id}_{i}",
                "image_url": url,
                "s3_url": url,
            })

    if images:
        # Send all image URLs to the Android client in one message
        await params.llm.push_frame(
            _rtvi_msg("image_generation_complete", {
                "batch_id": batch_id,
                "images": images,
                "description": description,
                "image_model": "black-forest-labs/flux-2-klein-4b",
                "count": len(images),
            })
        )

        count_msg = (
            f"All {len(images)} images are" if len(images) > 1
            else "The image is"
        )
        await params.result_callback(
            {
                "status": "success",
                "images_generated": len(images),
                "images_requested": num_images,
                "message": (
                    f"{count_msg} now visible in the user's app. "
                    "Do NOT read out any URL. Just tell the user the "
                    "images are ready."
                ),
            }
        )
    else:
        error_msg = "All image generations failed. Please try again."

        await params.llm.push_frame(
            _rtvi_msg("generation_ended", {"error": error_msg})
        )

        await params.result_callback(
            {
                "status": "error",
                "message": error_msg,
            }
        )


# ---------------------------------------------------------------------------
# 2. Video Generation Handler
# ---------------------------------------------------------------------------
async def handle_generate_video(params: FunctionCallParams):
    """Handle generate_video function call from the LLM."""
    args = params.arguments
    prompt = args.get("prompt", "")
    request_id = f"video_{int(time.time() * 1000)}"

    logger.info(f"[Handler] generate_video called: prompt={prompt!r}")

    # Tell the client video generation has started
    await params.llm.push_frame(
        _rtvi_msg("generation_started", {
            "generation_type": "video",
            "description": prompt,
        })
    )
    await params.llm.push_frame(
        _rtvi_msg("video_generation_started", {
            "request_id": request_id,
            "prompt": prompt,
        })
    )

    result = await generate_video(prompt=prompt)

    if result.get("success"):
        video_url = result["url"]

        # Send the video URL to the Android client
        await params.llm.push_frame(
            _rtvi_msg("video_generation_complete", {
                "request_id": request_id,
                "video_url": video_url,
                "prompt": prompt,
                "video_model": "wan-video/wan-2.2-t2v-fast",
                "duration": 0.0,
            })
        )

        await params.result_callback(
            {
                "status": "success",
                "message": (
                    "Video generated successfully. The video is now visible "
                    "in the user's app. Do NOT read out any URL. Just tell "
                    "the user the video is ready."
                ),
            }
        )
    else:
        error_msg = result.get("error", "Video generation failed.")

        # Tell the client video generation errored
        await params.llm.push_frame(
            _rtvi_msg("video_generation_error", {
                "request_id": request_id,
                "error": error_msg,
            })
        )

        await params.result_callback(
            {
                "status": "error",
                "message": error_msg,
            }
        )


# ---------------------------------------------------------------------------
# 3. Web Search Handler (general info — Gemini grounded search)
# ---------------------------------------------------------------------------
async def handle_web_search(params: FunctionCallParams):
    """Handle web_search function call from the LLM.

    Sends the text summary back to the LLM for TTS, and also pushes
    structured source links + C1 visual card to the Android app via RTVI.
    C1 visual card generation runs in parallel — it doesn't block the
    voice response.
    """
    args = params.arguments
    query = args.get("query", "")

    logger.info(f"[Handler] web_search called: query={query!r}")

    # Tell the client search has started (shows loading indicator)
    await params.llm.push_frame(
        _rtvi_msg("generation_started", {
            "generation_type": "search",
            "description": query,
        })
    )

    result = await web_search(query=query)

    if result.get("success"):
        sources = result.get("sources", [])
        summary = result.get("summary", "")

        # Return summary to LLM immediately for TTS (don't wait for C1)
        await params.result_callback(
            {
                "status": "success",
                "search_results": summary,
                "message": (
                    "Search complete. Source links and a visual card are "
                    "shown in the user's app. Do NOT read out any URLs. "
                    "Present the information in natural spoken language."
                ),
            }
        )

        # Generate C1 visual card (non-blocking for voice)
        c1_dsl = ""
        try:
            from services.c1_service import generate_visual_card

            c1_result = await generate_visual_card(
                query=query, summary=summary, sources=sources
            )
            if c1_result.get("success"):
                c1_dsl = c1_result.get("c1_dsl", "")
                logger.info(f"[Handler] C1 visual card generated ({len(c1_dsl)} chars)")
            else:
                logger.warning(f"[Handler] C1 card failed: {c1_result.get('error')}")
        except Exception as e:
            logger.warning(f"[Handler] C1 card generation skipped: {e}")

        # Send search results + C1 visual card to Android client
        await params.llm.push_frame(
            _rtvi_msg("search_results", {
                "query": query,
                "summary": summary,
                "sources": sources[:6],
                "c1_dsl": c1_dsl,
            })
        )

        # Clear the loader
        await params.llm.push_frame(
            _rtvi_msg("generation_ended", {"generation_type": "search"})
        )
    else:
        # Clear the loader on error too
        await params.llm.push_frame(
            _rtvi_msg("generation_ended", {
                "generation_type": "search",
                "error": result.get("error", "Search failed."),
            })
        )

        await params.result_callback(
            {
                "status": "error",
                "message": result.get("error", "Search failed."),
            }
        )


# ---------------------------------------------------------------------------
# 4. Shopping Search Handler (SerpAPI Google Shopping)
# ---------------------------------------------------------------------------
async def handle_shopping_search(params: FunctionCallParams):
    """Handle shopping_search function call from the LLM.

    Sends structured product cards to the Android app via RTVI, and returns
    a spoken summary to the LLM for TTS.
    """
    args = params.arguments
    query = args.get("query", "")

    logger.info(f"[Handler] shopping_search called: query={query!r}")

    # Tell the client shopping search has started (shows loading indicator)
    await params.llm.push_frame(
        _rtvi_msg("generation_started", {
            "generation_type": "shopping",
            "description": query,
        })
    )

    result = await shopping_search(query=query)

    if result.get("success"):
        products = result.get("products", [])

        if products:
            # Send product cards to Android app
            await params.llm.push_frame(
                _rtvi_msg("shopping_results", {
                    "query": query,
                    "products": products,
                    "count": len(products),
                })
            )

        # Clear the loader
        await params.llm.push_frame(
            _rtvi_msg("generation_ended", {"generation_type": "shopping"})
        )

        await params.result_callback(
            {
                "status": "success",
                "summary": result["summary"],
                "product_count": len(products),
                "message": (
                    "Shopping results are now displayed as product cards in "
                    "the user's app with images, prices, and buy links. "
                    "Do NOT read out any URLs or links. Just summarize the "
                    "top products with their names, prices, and store names "
                    "in natural spoken language."
                ),
            }
        )
    else:
        # Clear the loader on error too
        await params.llm.push_frame(
            _rtvi_msg("generation_ended", {
                "generation_type": "shopping",
                "error": result.get("error", "Shopping search failed."),
            })
        )

        await params.result_callback(
            {
                "status": "error",
                "message": result.get("error", "Shopping search failed."),
            }
        )


# ---------------------------------------------------------------------------
# 5. Edit Image Handler (user-uploaded photo → direct to model)
# ---------------------------------------------------------------------------
async def handle_edit_image(params: FunctionCallParams):
    """Handle edit_image function call from the LLM.

    Takes the user's uploaded photo from memory and sends it directly to
    the model with the raw edit instruction. No S3, no prompt rewriting.
    """
    import bot as bot_module

    args = params.arguments
    edit_instruction = args.get("edit_instruction", "")
    edit_id = f"edit_{int(time.time() * 1000)}"

    logger.info(f"[Handler] edit_image called: {edit_instruction!r}")

    # Check if there's an uploaded image in memory
    if not bot_module.uploaded_image:
        await params.result_callback({
            "status": "error",
            "message": (
                "No photo uploaded yet. Ask the user to upload a photo first "
                "before requesting an edit."
            ),
        })
        return

    image_base64 = bot_module.uploaded_image["base64"]
    mime_type = bot_module.uploaded_image["mime_type"]

    # Tell the client editing has started
    await params.llm.push_frame(
        _rtvi_msg("generation_started", {
            "generation_type": "image_edit",
            "description": edit_instruction,
        })
    )

    result = await edit_image(
        image_base64=image_base64,
        instruction=edit_instruction,
        mime_type=mime_type,
    )

    if result.get("success"):
        edited_url = result["url"]

        # Send edited image URL directly to frontend
        await params.llm.push_frame(
            _rtvi_msg("image_edit_complete", {
                "status": "completed",
                "edit_id": edit_id,
                "edited_image_url": edited_url,
                "edit_instruction": edit_instruction,
                "image_model": "black-forest-labs/flux-2-klein-4b",
            })
        )

        await params.result_callback({
            "status": "success",
            "message": (
                "The edited image is now visible in the user's app. "
                "Do NOT read out any URL. Just tell the user the edit is done."
            ),
        })
    else:
        error_msg = result.get("error", "Image edit failed.")

        await params.llm.push_frame(
            _rtvi_msg("generation_ended", {
                "generation_type": "image_edit",
                "error": error_msg,
            })
        )

        await params.result_callback({
            "status": "error",
            "message": error_msg,
        })


# ---------------------------------------------------------------------------
# 6. Video from Uploaded Image Handler (user photo → direct to Replicate I2V)
# ---------------------------------------------------------------------------
async def handle_generate_video_from_image(params: FunctionCallParams):
    """Handle generate_video_from_image function call from the LLM.

    Takes the user's uploaded photo from memory and sends it directly to
    Replicate's video model as a base64 data URI.
    """
    import bot as bot_module

    args = params.arguments
    prompt = args.get("prompt", "")
    request_id = f"video_{int(time.time() * 1000)}"

    logger.info(f"[Handler] generate_video_from_image called: {prompt!r}")

    # Check if there's an uploaded image in memory
    if not bot_module.uploaded_image:
        await params.result_callback({
            "status": "error",
            "message": (
                "No photo uploaded yet. Ask the user to upload a photo first "
                "before requesting a video."
            ),
        })
        return

    image_base64 = bot_module.uploaded_image["base64"]
    mime_type = bot_module.uploaded_image["mime_type"]

    # Build data URI for the image
    data_uri = f"data:{mime_type};base64,{image_base64}"

    # Tell the client video generation started
    await params.llm.push_frame(
        _rtvi_msg("generation_started", {
            "generation_type": "video",
            "description": prompt,
        })
    )
    await params.llm.push_frame(
        _rtvi_msg("video_generation_started", {
            "request_id": request_id,
            "prompt": prompt,
            "source": "uploaded_image",
        })
    )

    # Send image directly to video model via data URI
    result = await generate_video(prompt=prompt, image_url=data_uri)

    if result.get("success"):
        video_url = result["url"]

        await params.llm.push_frame(
            _rtvi_msg("video_generation_complete", {
                "request_id": request_id,
                "video_url": video_url,
                "prompt": prompt,
                "video_model": "lightricks/ltx-2-distilled",
                "duration": 0.0,
                "source": "uploaded_image",
            })
        )

        await params.result_callback({
            "status": "success",
            "message": (
                "Video generated from the uploaded photo! The video is now "
                "visible in the user's app. Do NOT read out any URL. "
                "Just tell the user the video is ready."
            ),
        })
    else:
        error_msg = result.get("error", "Video generation failed.")

        await params.llm.push_frame(
            _rtvi_msg("video_generation_error", {
                "request_id": request_id,
                "error": error_msg,
            })
        )

        await params.result_callback({
            "status": "error",
            "message": error_msg,
        })


def register_all_handlers(llm):
    """Register all function handlers with the LLM service.

    Image and video generation are long-running (10-60s+), so we set
    cancel_on_interruption=False so the user can keep talking without
    cancelling the generation.
    """
    llm.register_function(
        "generate_image", handle_generate_image, cancel_on_interruption=False
    )
    llm.register_function(
        "generate_video", handle_generate_video, cancel_on_interruption=False
    )
    llm.register_function(
        "edit_image", handle_edit_image, cancel_on_interruption=False
    )
    llm.register_function(
        "generate_video_from_image",
        handle_generate_video_from_image,
        cancel_on_interruption=False,
    )
    llm.register_function("web_search", handle_web_search)
    llm.register_function("shopping_search", handle_shopping_search)
    logger.info(
        "[Handler] Function handlers registered "
        "(generate_image, generate_video, edit_image, "
        "generate_video_from_image, web_search, shopping_search)"
    )
