"""Image generation and editing service using CF Flux with R2 storage."""

import asyncio
import base64
import json
import os
import time
import uuid

import httpx
from loguru import logger

from pipecat.adapters.schemas.function_schema import FunctionSchema
from pipecat.frames.frames import OutputTransportMessageUrgentFrame
from pipecat.services.llm_service import FunctionCallParams

from app.services.r2 import upload_bytes, generate_key

from typing import TYPE_CHECKING
if TYPE_CHECKING:
    from bot.core.config import BotSessionState, TranscriptManager


# --- Workers AI Client (direct, no proxy worker) ---

CF_ACCOUNT_ID = os.getenv("CF_WORKERS_ACCOUNT_ID", "")
CF_API_TOKEN = os.getenv("CF_API_TOKEN", "")
CF_FLUX_MODEL = "@cf/black-forest-labs/flux-2-klein-4b"

_cf_shared_client: httpx.AsyncClient | None = None


def _get_cf_client(timeout: float = 120.0) -> httpx.AsyncClient:
    global _cf_shared_client
    if _cf_shared_client is None or _cf_shared_client.is_closed:
        _cf_shared_client = httpx.AsyncClient(
            timeout=timeout,
            limits=httpx.Limits(max_keepalive_connections=5, max_connections=10),
        )
    return _cf_shared_client


def _parse_image_response(response: httpx.Response) -> bytes:
    """Parse Workers AI response, handling both raw image and JSON base64 formats."""
    content_type = response.headers.get("content-type", "")
    if "image" in content_type:
        return response.content

    # JSON response — image is base64 in result
    data = response.json()
    if not data.get("success"):
        errors = data.get("errors", [])
        raise Exception(f"Workers AI failed: {errors}")
    image_b64 = data.get("result", {}).get("image")
    if not image_b64:
        raise Exception("Workers AI returned no image data")
    return base64.b64decode(image_b64)


async def cf_generate_image(prompt: str, width: int = 1024, height: int = 1024,
                            num_steps: int = 4) -> tuple[bytes, str, float]:
    """Generate image via Workers AI REST API. Returns (image_bytes, mime_type, api_latency)."""
    if not CF_ACCOUNT_ID or not CF_API_TOKEN:
        raise Exception("CF_API_TOKEN or CF_WORKERS_ACCOUNT_ID not set.")

    url = f"https://api.cloudflare.com/client/v4/accounts/{CF_ACCOUNT_ID}/ai/run/{CF_FLUX_MODEL}"
    headers = {"Authorization": f"Bearer {CF_API_TOKEN}"}

    # flux-2-klein-4b requires multipart even for text-to-image
    form_data = {
        "prompt": prompt,
        "width": str(width),
        "height": str(height),
        "num_steps": str(num_steps),
    }

    client = _get_cf_client()
    api_start = time.time()
    response = await client.post(url, headers=headers, data=form_data)
    api_latency = time.time() - api_start

    if response.status_code != 200:
        error_text = response.text[:300]
        raise Exception(f"Workers AI error {response.status_code}: {error_text}")

    image_bytes = _parse_image_response(response)
    if len(image_bytes) == 0:
        raise Exception("Workers AI returned empty image")

    mime_type = "image/jpeg" if image_bytes[:2] == b'\xff\xd8' else "image/png"
    logger.info(f"CF_GENERATE_OK: latency={api_latency:.3f}s size={len(image_bytes)} type={mime_type}")
    return image_bytes, mime_type, api_latency


async def cf_edit_image(image_base64: str, prompt: str, reference_b64: str = None,
                        width: int = 1024, height: int = 1024, num_steps: int = 4) -> tuple[bytes, str, float]:
    """Edit image via Workers AI using flux-2-klein-4b multipart with reference images."""
    if not CF_ACCOUNT_ID or not CF_API_TOKEN:
        raise Exception("CF_API_TOKEN or CF_WORKERS_ACCOUNT_ID not set.")

    url = f"https://api.cloudflare.com/client/v4/accounts/{CF_ACCOUNT_ID}/ai/run/{CF_FLUX_MODEL}"
    headers = {"Authorization": f"Bearer {CF_API_TOKEN}"}

    # Decode base64 image to bytes for multipart upload
    image_bytes_input = base64.b64decode(image_base64)

    # Build multipart form data — flux-2-klein-4b accepts input_image_0..3
    files = {
        "input_image_0": ("image.jpg", image_bytes_input, "image/jpeg"),
    }
    form_data = {
        "prompt": prompt,
        "num_steps": str(num_steps),
        "width": str(width),
        "height": str(height),
    }

    # Add reference image if provided
    if reference_b64:
        ref_bytes = base64.b64decode(reference_b64)
        files["input_image_1"] = ("reference.jpg", ref_bytes, "image/jpeg")

    client = _get_cf_client()
    api_start = time.time()
    response = await client.post(url, headers=headers, data=form_data, files=files)
    api_latency = time.time() - api_start

    if response.status_code != 200:
        error_text = response.text[:300]
        raise Exception(f"Workers AI edit error {response.status_code}: {error_text}")

    image_bytes = _parse_image_response(response)
    if len(image_bytes) == 0:
        raise Exception("Workers AI returned empty edited image")

    mime_type = "image/jpeg" if image_bytes[:2] == b'\xff\xd8' else "image/png"
    logger.info(f"CF_EDIT_OK: latency={api_latency:.3f}s size={len(image_bytes)} type={mime_type}")
    return image_bytes, mime_type, api_latency


# --- Function Schema ---

cf_image_function = FunctionSchema(
    name="cf_image",
    description=(
        "Generate or edit images. Use for ALL image tasks: "
        "1) Image generation - create new images from text descriptions "
        "2) Image edits - modify uploaded or generated images "
        "CRITICAL: Do NOT call this function again if generation/edit is already in progress."
    ),
    properties={
        "operation": {
            "type": "string", "enum": ["generate", "edit"],
            "description": "Operation type: 'generate' for text-to-image, 'edit' for image modification",
        },
        "prompt": {
            "type": "string",
            "description": "For 'generate': detailed image description. For 'edit': edit instruction.",
        },
    },
    required=["operation", "prompt"],
)


# --- Send RTVI helper ---

async def _send_rtvi(task, data: dict):
    """Send RTVI server-message to frontend."""
    try:
        frame = OutputTransportMessageUrgentFrame(message={
            "label": "rtvi-ai", "type": "server-message", "data": data,
        })
        await task.queue_frame(frame)
    except Exception as e:
        logger.warning(f"Failed to send RTVI message: {e}")


# --- Handler factory ---

def create_cf_image_handler(
    bot_session: "BotSessionState",
    transcript_manager: "TranscriptManager",
    task,
):
    """Create CF Flux image generation/editing handler."""

    async def handle_cf_image(params: FunctionCallParams):
        args = params.arguments
        operation = args.get("operation", "generate")
        prompt = args.get("prompt", "")
        session_id = bot_session.session_id or "unknown"

        if bot_session.is_processing:
            await params.result_callback(json.dumps({
                "status": "busy",
                "assistant_instruction": "Image generation/edit is already in progress. Wait silently.",
            }))
            return

        # Usage limit check
        limit_reason = bot_session.can_generate_image()
        if limit_reason == "subscribe":
            await _send_rtvi(task, {
                "type": "limit_reached",
                "data": {"media_type": "image", "reason": "subscribe"},
            })
            await params.result_callback(json.dumps({
                "status": "limit_reached",
                "assistant_instruction": "The user has used all their free image credits. Tell them to subscribe to Maya Pro for more images and videos.",
            }))
            return
        elif limit_reason == "topup":
            await _send_rtvi(task, {
                "type": "limit_reached",
                "data": {"media_type": "image", "reason": "topup"},
            })
            await params.result_callback(json.dumps({
                "status": "limit_reached",
                "assistant_instruction": "The user has used all their image credits for this subscription period. Tell them they can buy a top-up pack from the app to get more credits.",
            }))
            return

        bot_session.is_processing = True
        await _send_rtvi(task, {"type": "generation_started", "data": {"type": "image", "status": "in_progress"}})

        try:
            if operation == "generate":
                image_id = f"img_{int(time.time())}_{uuid.uuid4().hex[:4]}"
                logger.info(f"IMAGE_GENERATE_START: session={session_id} prompt={prompt[:60]}")

                image_bytes, mime_type, api_latency = await cf_generate_image(prompt)
                ext = ".jpg" if "jpeg" in mime_type else ".png"
                r2_key = generate_key(session_id, "images", ext)
                r2_url = await asyncio.to_thread(upload_bytes, image_bytes, r2_key, mime_type)

                bot_session.set_generated_image({
                    "data": base64.b64encode(image_bytes).decode(),
                    "mime_type": mime_type,
                    "r2_url": r2_url,
                    "image_id": image_id,
                    "description": prompt,
                })

                transcript_manager.add_generation("image", {
                    "image_id": image_id, "description": prompt, "r2_url": r2_url,
                    "api_latency": round(api_latency, 3),
                })
                bot_session.use_image()

                logger.info(f"IMAGE_GENERATED: session={session_id} image_id={image_id} url={r2_url} latency={api_latency:.3f}s")

                await _send_rtvi(task, {
                    "type": "image_generation_complete",
                    "data": {"image_url": r2_url, "image_id": image_id, "description": prompt},
                })

                await params.result_callback(json.dumps({
                    "status": "completed", "image_url": r2_url, "image_id": image_id,
                    "assistant_instruction": "Image is ready! Tell the user their image is generated and they can see it on screen.",
                }))

            else:  # edit
                edit_id = f"edit_{int(time.time())}_{uuid.uuid4().hex[:4]}"
                logger.info(f"IMAGE_EDIT_START: session={session_id} prompt={prompt[:60]}")

                image_to_edit = bot_session.get_image_to_edit()
                if not image_to_edit:
                    await _send_rtvi(task, {
                        "type": "generation_failed",
                        "data": {"error": "No image available to edit", "media_type": "image"},
                    })
                    await params.result_callback(json.dumps({
                        "status": "error",
                        "assistant_instruction": "No image available to edit. Ask the user to generate or upload an image first.",
                    }))
                    return

                image_b64 = image_to_edit.get("data")
                image_bytes, mime_type, api_latency = await cf_edit_image(image_b64, prompt)
                ext = ".jpg" if "jpeg" in mime_type else ".png"
                r2_key = generate_key(session_id, "edits", ext)
                r2_url = await asyncio.to_thread(upload_bytes, image_bytes, r2_key, mime_type)

                bot_session.set_edited_image({
                    "data": base64.b64encode(image_bytes).decode(),
                    "mime_type": mime_type,
                    "r2_url": r2_url,
                    "image_id": edit_id,
                    "description": f"Edited: {prompt}",
                })

                original_url = image_to_edit.get("r2_url") or image_to_edit.get("url")
                transcript_manager.add_generation("image_edit", {
                    "edit_id": edit_id, "edit_instruction": prompt,
                    "edited_r2_url": r2_url, "original_r2_url": original_url,
                    "api_latency": round(api_latency, 3),
                })
                bot_session.use_image()

                logger.info(f"IMAGE_EDITED: session={session_id} edit_id={edit_id} url={r2_url} latency={api_latency:.3f}s")

                await _send_rtvi(task, {
                    "type": "image_edit_complete",
                    "data": {"image_url": r2_url, "edit_id": edit_id, "description": f"Edited: {prompt}"},
                })

                await params.result_callback(json.dumps({
                    "status": "completed", "image_url": r2_url, "edit_id": edit_id,
                    "assistant_instruction": "Edit is done! Tell the user their edited image is ready on screen.",
                }))

        except Exception as e:
            logger.error(f"IMAGE_ERROR: session={session_id} error={e}", exc_info=True)
            await _send_rtvi(task, {
                "type": "generation_failed",
                "data": {"error": str(e)[:100], "media_type": "image"},
            })
            await params.result_callback(json.dumps({
                "status": "error",
                "assistant_instruction": f"Image generation failed: {str(e)[:100]}. Apologize and suggest trying again.",
            }))
        finally:
            bot_session.is_processing = False

    return handle_cf_image
