"""Centralized OpenAI client for the platform layer."""

from __future__ import annotations

import json
import os
import re
from pathlib import Path
from typing import Any

from openai import OpenAI
from pdf2image import convert_from_bytes

from platform.logging.platform_logger import get_platform_logger, log_structured


logger = get_platform_logger("ai")
_PROJECT_ROOT = Path(__file__).resolve().parents[2]
_KEYS_DIR = _PROJECT_ROOT / "platform" / "secrets" / "openai_keys"


def run_llm(context, app_id, prompt, model="gpt-4o-mini", options=None):
    """Execute an OpenAI request and return normalized output plus usage."""
    options = options or {}
    api_key = _resolve_api_key(app_id)
    client = OpenAI(api_key=api_key)

    request_kwargs = _build_request_kwargs(prompt=prompt, model=model, options=options)
    response = client.chat.completions.create(**request_kwargs)

    content = ""
    if response.choices and response.choices[0].message:
        content = response.choices[0].message.content or ""

    output = _normalize_output(content=content, options=options)
    usage = _extract_usage(response)

    log_structured(
        logger,
        "ai_usage",
        tenant_id=((context or {}).get("tenant") or {}).get("tenant_id") if isinstance((context or {}).get("tenant"), dict) else None,
        app_id=app_id,
        model=model,
        tokens=usage.get("total_tokens"),
        request_id=(context or {}).get("request_id") if isinstance(context, dict) else None,
    )

    return {
        "output": output,
        "usage": usage,
    }


def _resolve_api_key(app_id: str) -> str:
    normalized = re.sub(r"[^A-Z0-9]+", "_", str(app_id or "").upper()).strip("_")
    app_key = _read_key_file(_KEYS_DIR / f"{normalized.lower()}.key")
    dev_key = _read_key_file(_KEYS_DIR / "dev.key")

    if not dev_key:
        dev_key = _seed_dev_key_from_env()

    env_app_key = os.getenv(f"OPENAI_API_KEY_{normalized}")
    env_dev_key = os.getenv("OPENAI_API_KEY_DEV") or os.getenv("OPENAI_API_KEY")

    api_key = app_key or dev_key or env_app_key or env_dev_key
    if not api_key:
        raise RuntimeError(f"Missing OpenAI API key for app_id={app_id!r}")
    return api_key


def _read_key_file(path: Path) -> str | None:
    try:
        value = path.read_text(encoding="utf-8").strip()
    except FileNotFoundError:
        return None
    except OSError:
        return None
    return value or None


def _seed_dev_key_from_env() -> str | None:
    api_key = os.getenv("OPENAI_API_KEY_DEV") or os.getenv("OPENAI_API_KEY")
    if not api_key:
        return None

    try:
        _KEYS_DIR.mkdir(parents=True, exist_ok=True)
        dev_key_path = _KEYS_DIR / "dev.key"
        if not dev_key_path.exists():
            dev_key_path.write_text(api_key.strip() + "\n", encoding="utf-8")
    except OSError:
        # If the file cannot be persisted, we still allow the env fallback.
        return api_key.strip()

    return api_key.strip()


def _build_request_kwargs(*, prompt: str, model: str, options: dict[str, Any]) -> dict[str, Any]:
    request_kwargs: dict[str, Any] = {
        "model": model,
        "messages": [
            {
                "role": "user",
                "content": _build_content(prompt=prompt, options=options),
            }
        ],
    }

    response_format = options.get("response_format")
    if response_format == "json_object":
        request_kwargs["response_format"] = {"type": "json_object"}
    return request_kwargs


def _build_content(*, prompt: str, options: dict[str, Any]) -> list[dict[str, Any]]:
    content: list[dict[str, Any]] = [{"type": "text", "text": prompt}]
    file_bytes = options.get("file_bytes")
    if file_bytes:
        file_bytes, mimetype = _prepare_visual_input(
            file_bytes=file_bytes,
            mimetype=options.get("mimetype") or "application/octet-stream",
        )
        import base64

        b64 = base64.b64encode(file_bytes).decode("utf-8")
        content.append(
            {
                "type": "image_url",
                "image_url": {"url": f"data:{mimetype};base64,{b64}"},
            }
        )
    return content


def _prepare_visual_input(*, file_bytes: bytes, mimetype: str) -> tuple[bytes, str]:
    if mimetype == "application/pdf":
        return _pdf_first_page_to_png(file_bytes), "image/png"
    return file_bytes, mimetype


def _pdf_first_page_to_png(pdf_bytes: bytes) -> bytes:
    pages = convert_from_bytes(pdf_bytes, dpi=200, fmt="png", first_page=1, last_page=1)
    if not pages:
        raise RuntimeError("PDF conversion failed: no pages rendered")

    from io import BytesIO

    buf = BytesIO()
    pages[0].save(buf, format="PNG")
    return buf.getvalue()


def _normalize_output(*, content: str, options: dict[str, Any]):
    if options.get("response_format") == "json_object":
        try:
            return json.loads(content)
        except Exception:
            return {"_raw": content}
    return content


def _extract_usage(response) -> dict[str, int]:
    usage = getattr(response, "usage", None)
    if usage is None:
        return {"input_tokens": 0, "output_tokens": 0, "total_tokens": 0}
    return {
        "input_tokens": int(getattr(usage, "prompt_tokens", 0) or 0),
        "output_tokens": int(getattr(usage, "completion_tokens", 0) or 0),
        "total_tokens": int(getattr(usage, "total_tokens", 0) or 0),
    }
