// Brook Consultancy — AI Assist screen + 2-step prompt modal
// Mirrors the Ofsted EYFS app's "bring-your-own-AI" workflow

// ---------------------------------------------------------------
// 2-step prompt modal — Copy prompt → Paste response → Parse & apply
// ---------------------------------------------------------------
function PromptModal({ open, onClose, def, summary, transcript, store, onApplied }) {
  const [step, setStep] = React.useState(1);
  const [response, setResponse] = React.useState('');
  const [error, setError] = React.useState(null);
  const [copied, setCopied] = React.useState(false);
  const promptRef = React.useRef(null);

  React.useEffect(() => {
    if (open) {
      setStep(1);
      setResponse('');
      setError(null);
      setCopied(false);
    }
  }, [open, def]);

  if (!open || !def) return null;

  const prompt = def.build(summary || '', transcript || '');
  const charCount = prompt.length;

  const copy = async () => {
    try {
      await navigator.clipboard.writeText(prompt);
      setCopied(true);
      setTimeout(() => setCopied(false), 2200);
    } catch {
      // Fallback: select the text
      if (promptRef.current) {
        promptRef.current.select();
        try { document.execCommand('copy'); setCopied(true); setTimeout(() => setCopied(false), 2200); }
        catch {}
      }
    }
  };

  const parseAndApply = () => {
    setError(null);
    if (!response.trim()) {
      setError('Paste the AI response above before parsing.');
      return;
    }
    try {
      const parsed = window.extractJson(response);
      def.apply(store, parsed);
      onApplied && onApplied(def);
      onClose();
    } catch (e) {
      setError(e.message || String(e));
    }
  };

  return (
    <div className="modal-overlay" onClick={onClose}>
      <div className="modal" onClick={e => e.stopPropagation()} style={{ maxWidth: 760 }}>
        <div className="modal-header">
          <div>
            <h2>AI prompt — {def.label}</h2>
            <div className="modal-sub">
              {def.kind === 'master' ? 'Master prompt — covers every section in one go' : 'Section-specific prompt'}
            </div>
          </div>
          <button className="close-btn" onClick={onClose}>×</button>
        </div>

        {/* Stepper */}
        <div style={{ display: 'flex', alignItems: 'center', padding: '14px 24px', background: 'var(--bg-soft)', borderBottom: '1px solid var(--border)', gap: 12 }}>
          <button
            className={`step-pill${step === 1 ? ' active' : ''}${step > 1 ? ' done' : ''}`}
            onClick={() => setStep(1)}
          >
            <span className="step-num">1.</span> Copy prompt
          </button>
          <div style={{ flex: 1, height: 1, background: step > 1 ? 'var(--success)' : 'var(--border-strong)' }}></div>
          <button
            className={`step-pill${step === 2 ? ' active' : ''}`}
            onClick={() => setStep(2)}
          >
            <span className="step-num">2.</span> Paste AI response
          </button>
        </div>

        <div className="modal-body">
          {step === 1 && (
            <>
              <p style={{ fontSize: 13.5, color: 'var(--text-soft)', marginTop: 0, marginBottom: 14 }}>
                Copy this prompt and paste it into <strong>ChatGPT, Claude.ai, Gemini, Copilot</strong>, or your AI tool of choice. The prompt includes the assessment criteria + your meeting source. The AI will reply with structured JSON.
              </p>

              <textarea
                ref={promptRef}
                className="prompt-textarea"
                value={prompt}
                readOnly
                onClick={e => e.target.select()}
              />

              <div style={{ display: 'flex', alignItems: 'center', justifyContent: 'space-between', marginTop: 12 }}>
                <div style={{ fontSize: 12, color: 'var(--text-muted)' }}>
                  {charCount.toLocaleString()} characters
                </div>
                <button className="btn btn-teal" onClick={copy} style={{ display: 'inline-flex', alignItems: 'center', gap: 8 }}>
                  {copied ? <><span>✓</span> Copied to clipboard</> : <><span>📋</span> Copy prompt</>}
                </button>
              </div>

              <div style={{ background: 'var(--bg-soft)', border: '1px solid var(--border)', borderRadius: 6, padding: '12px 16px', marginTop: 18, fontSize: 13, color: 'var(--text-soft)' }}>
                <strong style={{ color: 'var(--brook-navy)' }}>Tips:</strong>
                <ul style={{ margin: '6px 0 0', paddingLeft: 20 }}>
                  <li>If the AI replies in markdown or with extra commentary, that's fine — we'll extract the JSON.</li>
                  <li>If it returns "I can't process this much text", split your transcript and run sections separately instead of using the master prompt.</li>
                  <li>Best results: Claude.ai, ChatGPT (GPT-4 / o-series), Gemini Advanced. Smaller free models may struggle with long transcripts.</li>
                </ul>
              </div>
            </>
          )}

          {step === 2 && (
            <>
              <p style={{ fontSize: 13.5, color: 'var(--text-soft)', marginTop: 0, marginBottom: 14 }}>
                Paste the AI's full response below. It can include markdown code fences or extra prose — we'll extract the JSON automatically.
              </p>

              <textarea
                className="prompt-textarea"
                value={response}
                onChange={e => { setResponse(e.target.value); setError(null); }}
                placeholder={`Paste the AI response here. Looks like:\n{ "indicators": [...], "judgement": "...", ...\n}`}
              />

              {error && (
                <div className="app-banner error" style={{ borderRadius: 6, marginTop: 12, borderBottom: 'none', border: '1px solid #f0bfb9' }}>
                  <span><strong>Could not parse:</strong> {error}</span>
                  <button className="dismiss" onClick={() => setError(null)}>×</button>
                </div>
              )}
            </>
          )}
        </div>

        <div className="modal-footer">
          <div className="left">
            {step === 1
              ? <>🔒 Prompt is built locally on your device — nothing is sent until you paste it into your AI of choice.</>
              : <>✦ Findings will be highlighted in amber for your review before they enter the report.</>}
          </div>
          <div className="actions">
            {step === 1 && <button className="btn" onClick={onClose}>Close</button>}
            {step === 1 && <button className="btn btn-primary" onClick={() => setStep(2)}>Next: paste response →</button>}
            {step === 2 && <button className="btn" onClick={() => setStep(1)}>← Back to prompt</button>}
            {step === 2 && <button className="btn btn-primary" onClick={parseAndApply}>Parse &amp; review</button>}
          </div>
        </div>
      </div>
    </div>
  );
}

// ---------------------------------------------------------------
// Audio recording card — appears at the top of AI Assist for any record
// that has an audio attachment. Lets the operator play it back in-app,
// copy a direct Drive link, or open it in Drive. The transcription still
// gets pasted back into the existing aiTranscript field via the usual flow.
// ---------------------------------------------------------------
function AudioRecordingCard({ audio }) {
  const audioRef = React.useRef(null);
  const [playing, setPlaying] = React.useState(false);
  const [currentTime, setCurrentTime] = React.useState(0);
  const [duration, setDuration] = React.useState(audio.durationSec || 0);
  const [streamUrl, setStreamUrl] = React.useState(null);
  const [loadError, setLoadError] = React.useState(null);
  const [copied, setCopied] = React.useState(false);
  const [loading, setLoading] = React.useState(false);

  const isUploading = audio.uploadStatus === 'uploading';
  const isFailed = audio.uploadStatus === 'failed';
  const isComplete = !!audio.fileId;

  const viewLink = audio.webViewLink || (audio.fileId ? `https://drive.google.com/file/d/${audio.fileId}/view` : null);
  const directLink = audio.fileId ? `https://drive.google.com/uc?export=download&id=${audio.fileId}` : null;

  // Lazy-load the audio for in-app playback. Drive needs the OAuth token.
  const loadAudio = async () => {
    if (streamUrl || !audio.fileId) return;
    setLoading(true);
    setLoadError(null);
    try {
      const settings = window.loadDriveSettings();
      if (!window.hasValidToken()) {
        await window.ensureToken(settings.clientId);
      }
      const params = new URLSearchParams({ alt: 'media', supportsAllDrives: 'true' });
      const res = await fetch(`https://www.googleapis.com/drive/v3/files/${audio.fileId}?${params}`, {
        headers: { Authorization: `Bearer ${window.getAccessToken()}` }
      });
      if (!res.ok) throw new Error(`Drive ${res.status}`);
      const blob = await res.blob();
      setStreamUrl(URL.createObjectURL(blob));
    } catch (e) {
      setLoadError(e.message || String(e));
    } finally {
      setLoading(false);
    }
  };

  React.useEffect(() => () => { if (streamUrl) URL.revokeObjectURL(streamUrl); }, [streamUrl]);

  const togglePlay = async () => {
    if (!streamUrl) { await loadAudio(); return; }
    const a = audioRef.current;
    if (!a) return;
    if (a.paused) { try { await a.play(); setPlaying(true); } catch (e) { setLoadError(e.message); } }
    else { a.pause(); setPlaying(false); }
  };

  React.useEffect(() => {
    if (streamUrl && audioRef.current) {
      audioRef.current.play().then(() => setPlaying(true)).catch(() => {});
    }
  }, [streamUrl]);

  const copyLink = async () => {
    if (!directLink) return;
    try { await navigator.clipboard.writeText(directLink); }
    catch { try { window.prompt('Copy the direct audio link:', directLink); } catch {} }
    setCopied(true);
    setTimeout(() => setCopied(false), 2200);
  };

  const seek = (e) => {
    const a = audioRef.current;
    if (!a || !duration) return;
    const rect = e.currentTarget.getBoundingClientRect();
    const ratio = Math.max(0, Math.min(1, (e.clientX - rect.left) / rect.width));
    a.currentTime = ratio * duration;
    setCurrentTime(a.currentTime);
  };

  return (
    <div className="audio-card">
      <div className="audio-card-head">
        <div>
          <div className="eyebrow" style={{ color: 'var(--brook-teal)' }}>✎ Audio recording</div>
          <h3 style={{ fontSize: 16, marginTop: 2 }}>
            {isUploading ? 'Uploading from consultant device…' :
             isFailed ? 'Audio capture — upload failed' :
             'Captured from this consultation'}
          </h3>
        </div>
        <div className="audio-card-meta">
          <span>{window.fmtDuration(duration)}</span>
          {audio.sizeBytes ? <span> · {window.fmtBytes(audio.sizeBytes)}</span> : null}
          {audio.mimeType ? <span> · {audio.mimeType.split(';')[0].replace('audio/','')}</span> : null}
        </div>
      </div>

      {isUploading && (
        <div className="audio-card-progress">
          <div className="audio-card-progress-bar"><div className="audio-card-progress-fill" style={{ width: `${audio.uploadProgress || 0}%` }} /></div>
          <div className="audio-card-progress-text">{audio.uploadProgress || 0}% — the back-office team can play this once upload completes.</div>
        </div>
      )}

      {isFailed && (
        <div className="audio-card-failed">
          <strong>Upload failed:</strong> {audio.uploadError || 'unknown error'}. The recording is held on the consultant's device and can be retried.
        </div>
      )}

      {isComplete && (
        <>
          <div className="audio-player">
            <button className="audio-play-btn" onClick={togglePlay} disabled={loading} aria-label={playing ? 'Pause' : 'Play'}>
              {loading ? '⋯' : playing ? '⏸' : '▶'}
            </button>
            <div className="audio-track" onClick={seek}>
              <div className="audio-track-fill" style={{ width: duration ? `${(currentTime / duration) * 100}%` : '0%' }} />
            </div>
            <div className="audio-time">{window.fmtDuration(currentTime)} / {window.fmtDuration(duration)}</div>
            {streamUrl && (
              <audio
                ref={audioRef}
                src={streamUrl}
                onTimeUpdate={e => setCurrentTime(e.currentTarget.currentTime)}
                onLoadedMetadata={e => { if (!duration || isFinite(e.currentTarget.duration)) setDuration(e.currentTarget.duration); }}
                onEnded={() => setPlaying(false)}
                onPause={() => setPlaying(false)}
                onPlay={() => setPlaying(true)}
              />
            )}
          </div>
          {loadError && <div className="audio-card-failed" style={{ marginTop: 8 }}>{loadError}</div>}
          <div className="audio-card-actions">
            <button className="btn" style={{ fontSize: 12, padding: '6px 12px' }} onClick={copyLink}>
              {copied ? '✓ Link copied' : '🔗 Copy direct link'}
            </button>
            {viewLink && (
              <a className="btn" style={{ fontSize: 12, padding: '6px 12px', display: 'inline-flex', alignItems: 'center', textDecoration: 'none' }} href={viewLink} target="_blank" rel="noreferrer">
                ↗ Open in Drive
              </a>
            )}
            <span className="audio-card-hint">
              Drop the link or file into your transcription tool of choice (Whisper, Otter.ai, Microsoft Word dictation, etc.) and paste the resulting transcript into the Meeting Transcript field below.
            </span>
          </div>
        </>
      )}
    </div>
  );
}

// ---------------------------------------------------------------
// AI Assist screen
// ---------------------------------------------------------------
function AiAssistView({ store, onJumpToDashboard }) {
  const [openDef, setOpenDef] = React.useState(null);
  const data = store.data;
  const summary = data.aiSummary || '';
  const transcript = data.aiTranscript || '';

  const wordCount = (s) => (s.trim() ? s.trim().split(/\s+/).length : 0);
  const totalWords = wordCount(summary) + wordCount(transcript);

  const setSummary = (v) => store.setAiSource(v, undefined);
  const setTranscript = (v) => store.setAiSource(undefined, v);

  const defs = window.getBrookPromptDefs();
  const masterDef = defs.find(d => d.key === 'master');
  const sectionDefs = defs.filter(d => d.key !== 'master');

  // Build status counts
  const appliedCount = sectionDefs.filter(d => window.isPromptApplied(store, d)).length;

  const onApplied = (def) => {
    // Optional: jump to dashboard after applying first area/compliance/findings result
    // Keep behaviour subtle — just leave the user on the AI Assist screen so they can keep going
  };

  const loadDemo = () => {
    if (summary || transcript) {
      if (!confirm('Replace the current source content with the worked example?')) return;
    }
    const parts = window.BROOK_DEMO_TRANSCRIPT.split('TRANSCRIPT EXCERPTS:');
    store.setAiSource(parts[0].trim(), (parts[1] || '').trim());
  };

  return (
    <div>
      <div className="section-header">
        <div className="eyebrow">AI Assist</div>
        <h1>AI Assist — analyse meeting notes</h1>
        <div className="meta" style={{ marginTop: 12 }}>
          <span className="chip">{totalWords.toLocaleString()} source words</span>
          <span className="chip">{appliedCount} / {sectionDefs.length} sections analysed</span>
          {data._autofill_used && (
            <button className="chip" style={{ cursor: 'pointer', borderColor: 'var(--brook-teal)', color: 'var(--brook-teal)' }} onClick={onJumpToDashboard}>
              View Assessment Dashboard →
            </button>
          )}
        </div>
      </div>

      <p style={{ color: 'var(--text-soft)', fontSize: 14, marginTop: -8, marginBottom: 18, maxWidth: 720 }}>
        Paste the meeting summary and/or full transcript of your initial consultation. Then for each section, copy the auto-built prompt → paste it into <strong>your AI tool of choice</strong> (ChatGPT, Claude.ai, Gemini, Copilot...) → paste the response back here. <strong>You review every suggestion before anything is saved.</strong>
      </p>

      {/* Privacy reminder */}
      <div className="privacy-banner">
        <span className="privacy-icon">⚠</span>
        <div>
          <strong>Privacy reminder.</strong> Your transcript will leave this device when you paste it into an AI tool. Before pasting, replace personal names with initials and remove identifying details. Choose an AI tool whose privacy policy you're comfortable with for client data.
        </div>
      </div>

      {/* Audio recording card — visible for any record that has audio */}
      {data && data.audio && (data.audio.fileId || data.audio.uploadStatus) && (
        <AudioRecordingCard audio={data.audio} />
      )}

      {/* Source content card */}
      <div className="ai-source-card">
        <div className="ai-source-row">
          <label className="autocomplete-label">
            <span>Meeting summary <span style={{ color: 'var(--text-muted)', fontWeight: 400 }}>(optional)</span></span>
            <span className="opt">{wordCount(summary).toLocaleString()} words</span>
          </label>
          <textarea
            className="autocomplete-textarea"
            style={{ minHeight: 140 }}
            value={summary}
            onChange={e => setSummary(e.target.value)}
            placeholder="Paste the executive summary, meeting notes, or call recap..."
          />
        </div>

        <div className="ai-source-row" style={{ marginTop: 14 }}>
          <label className="autocomplete-label">
            <span>Meeting transcript</span>
            <span className="opt">{wordCount(transcript).toLocaleString()} words</span>
          </label>
          <textarea
            className="autocomplete-textarea"
            style={{ minHeight: 220 }}
            value={transcript}
            onChange={e => setTranscript(e.target.value)}
            placeholder="Paste the verbatim meeting transcript here..."
          />
        </div>

        <div style={{ display: 'flex', justifyContent: 'space-between', alignItems: 'center', marginTop: 10 }}>
          <span style={{ fontSize: 12, color: 'var(--text-muted)' }}>{totalWords.toLocaleString()} total words · saved automatically to this device</span>
          <button className="demo-link" onClick={loadDemo}>Load worked example (Westbrook Care Home)</button>
        </div>
      </div>

      {/* Generate prompts grid */}
      <div className="ai-prompts-card">
        <div className="ai-prompts-header">
          <div>
            <h3 style={{ fontSize: 16, marginBottom: 2, color: 'var(--brook-navy)' }}>Generate prompts</h3>
            <div style={{ fontSize: 12, color: 'var(--text-muted)' }}>
              Click a section → copy the prompt → paste into your AI → paste the JSON response back.
            </div>
          </div>
          <button
            className="btn btn-primary"
            disabled={!summary && !transcript}
            onClick={() => setOpenDef(masterDef)}
          >
            ✦ Master prompt — all sections
          </button>
        </div>

        <p style={{ fontSize: 12.5, color: 'var(--text-soft)', padding: '0 18px', marginTop: 14, marginBottom: 12 }}>
          <strong>How it works:</strong> click any section below → copy the prompt → paste it into your AI tool → paste the AI's JSON response back. The "Master prompt" generates every section in one go (use with a capable tool like ChatGPT-4, Claude, or Gemini Advanced).
        </p>

        <div className="prompt-grid">
          {sectionDefs.map(def => {
            const applied = window.isPromptApplied(store, def);
            const ranAt = window.getPromptRanAt(store, def);
            return (
              <div key={def.key} className={`prompt-card${applied ? ' applied' : ''}`}>
                <div className="prompt-card-body">
                  <div className="prompt-card-name">{def.label}</div>
                  <div className="prompt-card-status">
                    {applied
                      ? <span className="applied-badge">✓ Applied{ranAt ? ` · ${new Date(ranAt).toLocaleDateString('en-GB', { day: 'numeric', month: 'short' })}` : ''}</span>
                      : <span style={{ color: 'var(--text-muted)' }}>Not yet generated</span>}
                  </div>
                </div>
                <button
                  className="btn"
                  disabled={!summary && !transcript}
                  onClick={() => setOpenDef(def)}
                  style={{ fontSize: 12.5, padding: '6px 14px' }}
                >
                  {applied ? 'Re-do' : 'Open prompt'}
                </button>
              </div>
            );
          })}
        </div>

        <div style={{ padding: '14px 18px', borderTop: '1px solid var(--border)', fontSize: 11.5, color: 'var(--text-muted)', background: 'var(--bg-soft)' }}>
          🔒 Bring your own AI: copy the prompt → paste into ChatGPT / Claude / Gemini → paste the response back. Free, private, works on any device.
        </div>
      </div>

      {/* TV Feed Approval — only shown when the assessment dashboard has data (operator workflow) */}
      <TVFeedApprovalCard store={store} />

      {openDef && (
        <PromptModal
          open={!!openDef}
          onClose={() => setOpenDef(null)}
          def={openDef}
          summary={summary}
          transcript={transcript}
          store={store}
          onApplied={onApplied}
        />
      )}
    </div>
  );
}

// ---------------------------------------------------------------
// TV Feed Approval card
//
// Auto-shown once the assessment dashboard has at least one applied area.
// Operator picks which evaluation area / recommended service to feature; the
// app deterministically generates an anonymised one-line summary. Soft
// validation: flags org name, postcode, contact name, or digit runs of 3+
// but lets the operator override with a confirmation.
// ---------------------------------------------------------------
function TVFeedApprovalCard({ store }) {
  const cardRef = React.useRef(null);
  const data = store.data;
  const ai = data?.aiResults || {};

  // Apply event listener — auto-scroll into view when an AI prompt just landed
  React.useEffect(() => {
    if (!cardRef.current) return;
    // Find the most-recently applied area; if any area has a ranAt within the
    // last 5 seconds, scroll the TV card into view.
    let mostRecent = 0;
    Object.values(ai).forEach(v => {
      const t = v && v.ranAt ? Date.parse(v.ranAt) : 0;
      if (t > mostRecent) mostRecent = t;
    });
    if (mostRecent && (Date.now() - mostRecent) < 5_000) {
      setTimeout(() => {
        try { cardRef.current.scrollIntoView({ behavior: 'smooth', block: 'center' }); } catch {}
      }, 250);
    }
  }, [data?.aiResults]);

  // Build the picker options: every area in aiResults that has data
  const areaDefs = window.BROOK_SCHEMA?.evaluationAreas || [];
  const populatedAreas = areaDefs.filter(a => ai[a.id] && (ai[a.id].suggestedGrade || ai[a.id].judgement));

  const services = (data?.fields?.recommended_services || []);

  const hasAnyAi = populatedAreas.length > 0 || (ai.findings?.afd || []).length > 0 || (ai.executive_summary?.overall_assessment);

  // ----- Picker state -----
  const [areaId, setAreaId] = React.useState('');
  const [service, setService] = React.useState('');
  const [sector, setSector] = React.useState(data?.fields?.org_type || '');
  const [region, setRegion] = React.useState(data?.fields?.la_region || '');
  const [line, setLine] = React.useState('');
  const [touched, setTouched] = React.useState(false);
  const [approving, setApproving] = React.useState(false);
  const [approved, setApproved] = React.useState(null);
  const [error, setError] = React.useState(null);

  // Default selections once the dashboard populates
  React.useEffect(() => {
    if (areaId || populatedAreas.length === 0) return;
    // Default to the lowest-graded area (most newsworthy)
    const order = ['critical', 'needs_improvement', 'adequate', 'strong', 'exceptional', 'insufficient_evidence'];
    const sorted = [...populatedAreas].sort((a, b) =>
      order.indexOf(ai[a.id]?.suggestedGrade || 'insufficient_evidence') -
      order.indexOf(ai[b.id]?.suggestedGrade || 'insufficient_evidence')
    );
    setAreaId(sorted[0]?.id || populatedAreas[0]?.id || '');
  }, [populatedAreas.length]);

  React.useEffect(() => {
    if (!service && services.length) setService(services[0]);
  }, [services.length]);

  React.useEffect(() => { if (!sector) setSector(data?.fields?.org_type || ''); }, [data?.fields?.org_type]);
  React.useEffect(() => { if (!region) setRegion(data?.fields?.la_region || ''); }, [data?.fields?.la_region]);

  // Deterministic line builder — runs whenever picker changes (unless user has edited the textarea)
  React.useEffect(() => {
    if (touched) return;
    const area = areaDefs.find(a => a.id === areaId);
    if (!area || !sector) { setLine(''); return; }
    const headline = area.name;
    const sectorLabel = sector || 'organisation';
    const regionPart = region ? ` in ${region}` : '';
    const servicePart = service ? ` Recommended focus: ${service}.` : '';
    setLine(`${sectorLabel}${regionPart} — top finding: ${headline}.${servicePart}`);
  }, [areaId, service, sector, region, touched]);

  // Soft validation
  const orgName = data?.fields?.org_name || '';
  const clientName = data?.fields?.client_contact_name || '';
  const postcodeRe = /\b[A-Z]{1,2}\d{1,2}[A-Z]?\s*\d[A-Z]{2}\b/i;
  const longDigitsRe = /\d{3,}/;

  const warnings = React.useMemo(() => {
    if (!line) return [];
    const out = [];
    if (orgName && line.toLowerCase().includes(orgName.toLowerCase())) {
      out.push({ phrase: orgName, reason: 'Contains the organisation name. Abstract to “a small care home” or similar.' });
    }
    if (clientName) {
      const firstName = clientName.split(/\s+/)[0];
      if (firstName && firstName.length >= 3 && line.toLowerCase().includes(firstName.toLowerCase())) {
        out.push({ phrase: firstName, reason: 'Contains a contact name. Remove names from public-facing copy.' });
      }
    }
    const pc = line.match(postcodeRe);
    if (pc) out.push({ phrase: pc[0], reason: 'Looks like a postcode. Abstract to region only.' });
    const digits = line.match(longDigitsRe);
    if (digits) out.push({ phrase: digits[0], reason: 'Specific number — consider rephrasing (e.g. “a six-figure budget”).' });
    return out;
  }, [line, orgName, clientName]);

  const approve = async () => {
    setError(null);
    if (!line.trim()) { setError('Write or pick a line first.'); return; }

    if (warnings.length > 0) {
      const ok = confirm(`${warnings.length} validation warning${warnings.length === 1 ? '' : 's'}:\n\n${warnings.map(w => `• "${w.phrase}" — ${w.reason}`).join('\n')}\n\nPush to TV anyway?`);
      if (!ok) return;
    }

    setApproving(true);
    try {
      const settings = window.loadDriveSettings();
      if (!settings.clientId || !settings.sharedDriveId) {
        throw new Error('Drive sync is not configured. Open Sync Settings to set it up before pushing to TV.');
      }
      if (!window.hasValidToken()) {
        await window.ensureToken(settings.clientId);
      }
      const main = await window.findOrCreateFolder(window.BROOK_MAIN_FOLDER, settings.sharedDriveId, settings.sharedDriveId);
      const tv = await window.findOrCreateFolder(window.BROOK_TV_FOLDER, main, settings.sharedDriveId);
      await window.uploadTVLine(tv, line.trim(), store.deviceId, settings.sharedDriveId);

      // Log on the record
      store.updateActive(d => ({
        ...d,
        tvApprovals: [...(d.tvApprovals || []), {
          line: line.trim(),
          approvedAt: new Date().toISOString(),
          approvedBy: store.deviceId
        }]
      }));
      setApproved(line.trim());
    } catch (e) {
      setError(e.message || String(e));
    } finally {
      setApproving(false);
    }
  };

  if (!hasAnyAi) return null;

  const previousApprovals = data?.tvApprovals || [];

  return (
    <div ref={cardRef} className="tv-approval-card">
      <div className="tv-approval-header">
        <div>
          <div className="eyebrow" style={{ color: 'var(--brook-amber)' }}>✦ TV FEED APPROVAL</div>
          <h3>Push an anonymised insight to the stand TVs</h3>
        </div>
        {previousApprovals.length > 0 && (
          <span className="tv-approval-count">{previousApprovals.length} pushed from this session</span>
        )}
      </div>
      <p className="tv-approval-help">
        Pick the area and service to feature. The line is built deterministically — review and edit before pushing. Once approved, it appears on the TV displays within ~20 seconds.
      </p>

      <div className="tv-approval-pickers">
        <div className="field">
          <label className="field-label">Headline area</label>
          <select className="select" value={areaId} onChange={e => { setAreaId(e.target.value); setTouched(false); }}>
            <option value="">— Choose —</option>
            {populatedAreas.map(a => {
              const g = window.getGradeStyle ? window.getGradeStyle(ai[a.id]?.suggestedGrade).label : '';
              return <option key={a.id} value={a.id}>{a.name}{g ? ` (${g})` : ''}</option>;
            })}
          </select>
        </div>
        <div className="field">
          <label className="field-label">Recommended focus</label>
          <select className="select" value={service} onChange={e => { setService(e.target.value); setTouched(false); }}>
            <option value="">— None —</option>
            {(window.BROOK_SCHEMA?.brookServices || []).map(s => <option key={s} value={s}>{s}</option>)}
          </select>
        </div>
        <div className="field">
          <label className="field-label">Sector (abstracted)</label>
          <input className="input" value={sector} onChange={e => { setSector(e.target.value); setTouched(false); }} placeholder="e.g. Care home, Nonprofit, Faith-based school" />
        </div>
        <div className="field">
          <label className="field-label">Region (broad)</label>
          <input className="input" value={region} onChange={e => { setRegion(e.target.value); setTouched(false); }} placeholder="e.g. Greater Manchester" />
        </div>
      </div>

      <div className="field" style={{ marginTop: 8 }}>
        <label className="field-label">TV line preview <span className="muted-help">— edit freely</span></label>
        <textarea
          className="textarea"
          rows={2}
          value={line}
          onChange={e => { setLine(e.target.value); setTouched(true); }}
          placeholder="Sector in region — top finding: …. Recommended focus: …"
          maxLength={220}
          style={warnings.length > 0 ? { borderColor: 'var(--warning)' } : {}}
        />
        <div className="tv-approval-meta">
          <span>{line.length} / 220 chars</span>
          {touched && <button className="demo-link" onClick={() => setTouched(false)}>Reset to auto-generated</button>}
        </div>
      </div>

      {warnings.length > 0 && (
        <div className="tv-approval-warn">
          <strong>⚠ {warnings.length} validation warning{warnings.length === 1 ? '' : 's'}</strong>
          <ul>
            {warnings.map((w, i) => <li key={i}><code>{w.phrase}</code> — {w.reason}</li>)}
          </ul>
          <div className="tv-approval-warn-sub">You can still push, but the soft validation flagged these. The TV is a public display — keep it abstract.</div>
        </div>
      )}

      {error && (
        <div className="app-banner error" style={{ borderRadius: 6, marginTop: 10, border: '1px solid #f0bfb9' }}>
          <span><strong>•</strong> {error}</span>
        </div>
      )}

      {approved && (
        <div className="app-banner success" style={{ borderRadius: 6, marginTop: 10, border: '1px solid #b9dec5' }}>
          <span><strong>✓ Pushed to TV:</strong> “{approved}”</span>
        </div>
      )}

      <div className="tv-approval-actions">
        <button className="btn btn-amber" disabled={approving || !line.trim()} onClick={approve}>
          {approving ? 'Pushing…' : '✦ Approve & push to TV'}
        </button>
      </div>

      {previousApprovals.length > 0 && (
        <details className="tv-approval-history">
          <summary>Previously pushed from this session ({previousApprovals.length})</summary>
          <ul>
            {previousApprovals.slice().reverse().map((a, i) => (
              <li key={i}>
                <span className="tv-approval-history-time">{new Date(a.approvedAt).toLocaleTimeString([], { hour: '2-digit', minute: '2-digit' })}</span>
                {a.line}
              </li>
            ))}
          </ul>
        </details>
      )}
    </div>
  );
}

Object.assign(window, { AiAssistView, PromptModal, TVFeedApprovalCard });
