#!/usr/bin/env python3
"""
Kitzu — Dashboard Rebuild Script

Stamps fresh health data into the dashboard template.
This is the ONLY way the dashboard HTML should be generated.

How it works:
  1. Reads dashboard.template.html (the clean template with %%MARKERS%%)
  2. Reads the latest brief and unified profile from the data lake
  3. Injects the JSON data into the markers
  4. Writes dashboard.html (the served file)

Also updates the personal dashboard's briefing.json from Kitzu data.

Usage:
    python3 rebuild.py              # Rebuild everything
    python3 rebuild.py --check      # Just verify template is valid
"""

import json
import argparse
import logging
from datetime import datetime, date, timedelta
from pathlib import Path

log = logging.getLogger("kitzu.rebuild")
logging.basicConfig(level=logging.INFO, format="%(levelname)s | %(message)s")

KITZU_ROOT = Path(__file__).resolve().parent
TEMPLATE_PATH = KITZU_ROOT / "dashboard.template.html"
OUTPUT_PATH = KITZU_ROOT / "dashboard.html"
DATA_ROOT = KITZU_ROOT / "data"
UNIFIED_DIR = DATA_ROOT / "unified"
BRIEFS_DIR = UNIFIED_DIR / "briefs"
OURA_DIR = DATA_ROOT / "oura"

# Personal dashboard paths
CLAWD = Path.home() / "clawd"
BRIEFING_PATH = CLAWD / "_Organized" / "Data" / "briefing.json"
DASHBOARD_DATA_PATH = CLAWD / "_Organized" / "Data" / "dashboard-data.json"
PERSONAL_DASHBOARD_BUILD = CLAWD / "dashboard" / "bin" / "dashboard-build.sh"


def load_latest_brief() -> dict:
    """Load today's brief, or the most recent one."""
    today = date.today().isoformat()
    brief_path = BRIEFS_DIR / f"{today}.json"
    if brief_path.exists():
        return json.loads(brief_path.read_text())

    # Fall back to most recent
    briefs = sorted(BRIEFS_DIR.glob("*.json"))
    if briefs:
        chosen = briefs[-1]
        log.warning(f"No brief for {today}, using {chosen.name}")
        return json.loads(chosen.read_text())

    log.error("No briefs found at all!")
    return {"date": today, "sections": [], "error": "No brief data available"}


def load_profile() -> dict:
    """Load the unified profile."""
    path = UNIFIED_DIR / "profile.json"
    if path.exists():
        return json.loads(path.read_text())
    log.error("No profile.json found!")
    return {}


def rebuild_kitzu_dashboard():
    """Stamp fresh data into the Kitzu dashboard template."""
    if not TEMPLATE_PATH.exists():
        log.error(f"Template not found: {TEMPLATE_PATH}")
        return False

    template = TEMPLATE_PATH.read_text()

    # Verify markers exist
    if "%%BRIEF_JSON%%" not in template:
        log.error("%%BRIEF_JSON%% marker missing from template!")
        return False
    if "%%PROFILE_JSON%%" not in template:
        log.error("%%PROFILE_JSON%% marker missing from template!")
        return False

    brief = load_latest_brief()
    profile = load_profile()

    brief_json = json.dumps(brief, ensure_ascii=True)
    profile_json = json.dumps(profile, ensure_ascii=True)

    # Simple, reliable string replacement
    html = template.replace('"%%BRIEF_JSON%%"', brief_json)
    html = template.replace('"%%BRIEF_JSON%%"', brief_json)  # safety
    html = html.replace('"%%PROFILE_JSON%%"', profile_json)

    # Verify the replacement worked
    if "%%BRIEF_JSON%%" in html or "%%PROFILE_JSON%%" in html:
        log.error("Markers were NOT replaced! Something went wrong.")
        return False

    OUTPUT_PATH.write_text(html)
    log.info(f"Kitzu dashboard rebuilt: {OUTPUT_PATH}")
    log.info(f"  Brief date: {brief.get('date', '?')}")
    log.info(f"  Sections: {len(brief.get('sections', []))}")
    log.info(f"  Priority actions: {len(brief.get('priority_actions', []))}")
    log.info(f"  File size: {len(html):,} bytes")
    return True


def rebuild_personal_briefing():
    """Update briefing.json for the personal dashboard using Kitzu data."""
    if not DASHBOARD_DATA_PATH.exists():
        log.warning("dashboard-data.json not found, skipping personal dashboard")
        return False

    dd = json.loads(DASHBOARD_DATA_PATH.read_text())
    today = date.today()
    now = datetime.now()

    # ── Sync weight & body composition from Kitzu profile ──────────
    profile = load_profile()
    vitals = profile.get("vitals", {})
    pw = vitals.get("weight", {})
    pbc = vitals.get("body_composition", {})
    pwh = vitals.get("weight_history", [])
    pbp = vitals.get("bp_history", [])
    ptrend = vitals.get("weight_trend_7d", {})

    if pw.get("weight_lb"):
        # Build 30-day weight series from profile history
        series30d = []
        cutoff = (today - timedelta(days=30)).isoformat()
        for rec in pwh:
            if rec.get("date", "") >= cutoff:
                series30d.append(rec["weight_lb"])

        baseline = dd.get("weight", {}).get("baseline", 245.0)
        goal = dd.get("weight", {}).get("goal", 220.0)
        lost = round(baseline - pw["weight_lb"], 1)
        pace = abs(ptrend.get("delta_lb", 0)) if ptrend.get("direction") == "down" else 0
        remaining = pw["weight_lb"] - goal
        eta_weeks = round(remaining / pace, 1) if pace > 0 else 0

        dd["weight"] = {
            "current": pw["weight_lb"],
            "avg7d": round(sum(series30d[-7:]) / max(len(series30d[-7:]), 1), 1) if series30d else pw["weight_lb"],
            "goal": goal,
            "baseline": baseline,
            "baselineDate": dd.get("weight", {}).get("baselineDate", "February 1st"),
            "deltaVsLast": round(ptrend.get("delta_lb", 0), 1),
            "pace": pace,
            "paceUnit": "lb/wk",
            "etaWeeks": eta_weeks,
            "lastWeighIn": pw.get("date", ""),
            "series30d": series30d,
            "lostSoFar": lost,
            "weeklyPace": pace,
            "projectedWeeks": eta_weeks,
            "date": pw.get("date", ""),
            "synced_at": datetime.now().isoformat(),
            "source": "iHealth API",
            "bodyFat": pw.get("body_fat_pct", 0),
            "muscleMass": pw.get("muscle_mass", 0),
            "leanMass": pbc.get("lean_mass", 0),
            "boneMass": pw.get("bone_mass", 0),
            "bodyWater": pw.get("body_water", 0),
            "protein": dd.get("weight", {}).get("protein", 0),
            "visceralFat": pw.get("visceral_fat", 0),
            "bmr": dd.get("weight", {}).get("bmr", 0),
            "bmi": pw.get("bmi", 0),
        }
        log.info(f"Weight synced from profile: {pw['weight_lb']} lbs ({pw.get('date')})")

        # Body fat object
        bf_series = [rec.get("body_fat_pct", 0) for rec in pwh if rec.get("date", "") >= cutoff and rec.get("body_fat_pct")]
        dd["bodyFat"] = {
            "current": pw.get("body_fat_pct", 0),
            "goal": dd.get("bodyFat", {}).get("goal", 20.0),
            "baseline": dd.get("bodyFat", {}).get("baseline", 30.0),
            "deltaVsLast": round(bf_series[-1] - bf_series[-2], 1) if len(bf_series) >= 2 else 0,
            "series30d": bf_series,
            "date": pw.get("date", ""),
        }

        # BMI
        dd["bmi"] = {"current": pw.get("bmi", 0), "date": pw.get("date", "")}

        # Body composition
        dd["bodyComposition"] = {
            "muscleMass": pw.get("muscle_mass", 0),
            "leanMass": pbc.get("lean_mass", 0),
            "boneMass": pw.get("bone_mass", 0),
            "bodyWater": pw.get("body_water", 0),
            "visceralFat": pw.get("visceral_fat", 0),
            "date": pw.get("date", ""),
        }

    # ── Sync blood pressure from profile ───────────────────────────
    if pbp:
        latest_bp = pbp[-1]
        dd["bloodPressure"] = {
            "systolic": latest_bp.get("systolic"),
            "diastolic": latest_bp.get("diastolic"),
            "status": "high-stage1" if latest_bp.get("systolic", 0) >= 130 else "normal",
            "date": latest_bp.get("date", ""),
            "pulse": latest_bp.get("pulse", 0),
            "time": latest_bp.get("time", ""),
            "source": "iHealth BP Monitor",
            "history": [{"systolic": r["systolic"], "diastolic": r["diastolic"],
                         "pulse": r.get("pulse", 0), "date": r["date"],
                         "time": r.get("time", "")} for r in pbp[-8:]],
        }
        log.info(f"Blood pressure synced: {latest_bp.get('systolic')}/{latest_bp.get('diastolic')} ({latest_bp.get('date')})")

    # ── Sync Oura readiness, steps, exercise from profile ──────────
    oura_data = profile.get("oura", {})

    if oura_data.get("readiness") and oura_data["readiness"].get("score"):
        dd["readiness"] = {
            "score": oura_data["readiness"]["score"],
            "date": oura_data["readiness"].get("date", ""),
        }
        log.info(f"Readiness synced: {oura_data['readiness']['score']} ({oura_data['readiness'].get('date')})")

    if oura_data.get("steps") and oura_data["steps"].get("count"):
        # Build daily steps from profile
        existing_daily = dd.get("steps", {}).get("daily", {})
        existing_daily[oura_data["steps"]["date"]] = oura_data["steps"]["count"]
        # Keep last 7 days
        sorted_days = dict(sorted(existing_daily.items())[-7:])
        dd["steps"] = {"daily": sorted_days, "updated": oura_data["steps"]["date"]}
        log.info(f"Steps synced: {oura_data['steps']['count']} ({oura_data['steps']['date']})")

    exercise_feed = oura_data.get("exercise_feed", [])
    if exercise_feed:
        recent = [{"date": e["date"], "name": e.get("type", "Workout"),
                   "duration_min": e.get("duration_min", 0)} for e in exercise_feed[:10]]
        # Calculate this week's total
        week_start = (today - timedelta(days=today.weekday())).isoformat()
        this_week_min = sum(e.get("duration_min", 0) for e in exercise_feed if e.get("date", "") >= week_start)
        dd["exercise"] = {"recent": recent, "thisWeek": this_week_min}
        log.info(f"Exercise synced: {len(recent)} recent activities, {this_week_min} min this week")

    # Update sleep from latest Kitzu Oura data
    today_oura = OURA_DIR / f"{today.isoformat()}.json"
    yesterday_oura = OURA_DIR / f"{(today - timedelta(days=1)).isoformat()}.json"
    oura_path = today_oura if today_oura.exists() else yesterday_oura

    if oura_path.exists():
        oura = json.loads(oura_path.read_text())
        dd["sleep"] = {
            "total_hours": oura.get("sleep_hours", 0),
            "bedtime": oura.get("bedtime", ""),
            "wake_time": oura.get("wake_time", ""),
            "source": "Oura",
            "stages": {
                "awake_min": oura.get("awake_min", 0),
                "light_min": oura.get("light_sleep_min", 0),
                "deep_min": oura.get("deep_sleep_min", 0),
                "rem_min": oura.get("rem_sleep_min", 0),
            },
            "avg_hrv": oura.get("avg_hrv", 0),
            "date": oura.get("date", ""),
        }

        # Build 14d sleep series
        series = {}
        for f in sorted(OURA_DIR.glob("*.json")):
            try:
                rec = json.loads(f.read_text())
                series[rec["date"]] = rec.get("sleep_hours", 0)
            except Exception:
                pass
        dd["sleep"]["series14d"] = dict(list(sorted(series.items()))[-14:])

        # Save updated dashboard-data.json
        DASHBOARD_DATA_PATH.write_text(json.dumps(dd, indent=2, ensure_ascii=False))
        log.info(f"dashboard-data.json updated with Oura data from {oura.get('date')}")
    else:
        log.warning(f"No Oura data for today or yesterday, keeping existing sleep data")

    # Build briefing.json
    hour = now.hour
    greeting = "Good morning" if hour < 12 else "Good afternoon" if hour < 17 else "Good evening"

    w = dd.get("weight", {})
    series = w.get("series30d", [])
    if len(series) >= 7 and isinstance(series[0], (int, float)):
        week_change = series[-1] - series[-7]
        trend_text = f"{'down' if week_change < 0 else 'up'} {abs(week_change):.1f} lb this week"
        trend_positive = week_change <= 0
    else:
        trend_text = "tracking"
        trend_positive = True

    briefing = {
        "generated": now.isoformat(),
        "greeting": greeting,
        "date_display": today.strftime("%A, %B %-d"),
        "weight": {
            "current": w.get("current"),
            "trend": trend_text,
            "trend_positive": trend_positive,
            "pace": w.get("pace", 0),
            "eta_weeks": w.get("etaWeeks", 0),
        },
        "events_today": [],
        "events_upcoming": [],
        "checkpoint": None,
        "cro_price": dd.get("crypto", {}).get("cro_price"),
        "on_this_day": None,
        "focus": dd.get("focus", ""),
        "sleep": dd.get("sleep"),
        "email": None,
        "google_status": {"drive": False, "gmail": False},
        "betting": None,
    }

    BRIEFING_PATH.write_text(json.dumps(briefing, indent=2, ensure_ascii=False))
    log.info(f"briefing.json regenerated for {today}")

    # Rebuild personal dashboard HTML
    if PERSONAL_DASHBOARD_BUILD.exists():
        import subprocess
        result = subprocess.run(
            ["bash", str(PERSONAL_DASHBOARD_BUILD)],
            capture_output=True, text=True, cwd=str(CLAWD),
        )
        if result.returncode == 0:
            log.info("Personal dashboard HTML rebuilt")
        else:
            log.warning(f"Personal dashboard build failed: {result.stderr[:200]}")
    else:
        log.warning("Personal dashboard build script not found")

    return True


def check_template():
    """Verify the template is valid."""
    if not TEMPLATE_PATH.exists():
        print(f"FAIL: Template not found at {TEMPLATE_PATH}")
        return False

    template = TEMPLATE_PATH.read_text()
    checks = {
        "%%BRIEF_JSON%%": template.count("%%BRIEF_JSON%%"),
        "%%PROFILE_JSON%%": template.count("%%PROFILE_JSON%%"),
    }

    ok = True
    for marker, count in checks.items():
        if count != 1:
            print(f"FAIL: {marker} found {count} times (expected 1)")
            ok = False
        else:
            print(f"  OK: {marker} — 1 instance")

    print(f"  Template size: {len(template):,} bytes")
    print(f"  Template lines: {len(template.splitlines())}")

    # Check for stale data leaks
    if '"2026-' in template:
        print(f"  WARNING: Template contains date strings — may have leftover data")

    return ok


def main():
    parser = argparse.ArgumentParser(description="Kitzu Dashboard Rebuild")
    parser.add_argument("--check", action="store_true", help="Verify template only")
    parser.add_argument("--kitzu-only", action="store_true", help="Only rebuild Kitzu dashboard")
    args = parser.parse_args()

    if args.check:
        check_template()
        return

    print(f"\n🔄 Kitzu Dashboard Rebuild — {date.today()}")
    print("=" * 42)

    # 1. Kitzu dashboard
    ok = rebuild_kitzu_dashboard()
    if not ok:
        print("❌ Kitzu dashboard rebuild FAILED")
        return

    # 2. Personal dashboard
    if not args.kitzu_only:
        rebuild_personal_briefing()

    print(f"\n✅ All dashboards rebuilt")
    print(f"   Kitzu: http://100.91.208.3:8080/kitzu/dashboard.html")
    print(f"   Personal: http://100.91.208.3:8080/dashboard/\n")


if __name__ == "__main__":
    main()
