// shared.jsx — Icons, VoiceField, helpers exposed on window

const I = {
  Mic: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><rect x="9" y="2" width="6" height="12" rx="3"/><path d="M5 10v2a7 7 0 0 0 14 0v-2"/><path d="M12 19v3"/></svg>,
  Stop: (p) => <svg viewBox="0 0 24 24" fill="currentColor" {...p}><rect x="7" y="7" width="10" height="10" rx="2"/></svg>,
  ArrowRight: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><path d="M5 12h14"/><path d="m13 5 7 7-7 7"/></svg>,
  ArrowLeft: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><path d="M19 12H5"/><path d="m11 5-7 7 7 7"/></svg>,
  Check: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="2.25" strokeLinecap="round" strokeLinejoin="round" {...p}><path d="M20 6 9 17l-5-5"/></svg>,
  Clock: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><circle cx="12" cy="12" r="10"/><path d="M12 6v6l4 2"/></svg>,
  Chat: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><path d="M21 15a2 2 0 0 1-2 2H7l-4 4V5a2 2 0 0 1 2-2h14a2 2 0 0 1 2 2z"/></svg>,
  Mail: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><rect width="20" height="16" x="2" y="4" rx="2"/><path d="m22 7-10 6L2 7"/></svg>,
  Plus: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><path d="M12 5v14M5 12h14"/></svg>,
  Play: (p) => <svg viewBox="0 0 24 24" fill="currentColor" {...p}><path d="M8 5v14l11-7z"/></svg>,
  Read: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><path d="M2 4h7a3 3 0 0 1 3 3v13a2 2 0 0 0-2-2H2zM22 4h-7a3 3 0 0 0-3 3v13a2 2 0 0 1 2-2h8z"/></svg>,
  Build: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><path d="M14.7 6.3a1 1 0 0 0 0 1.4l1.6 1.6a1 1 0 0 0 1.4 0l3.77-3.77a6 6 0 0 1-7.94 7.94l-6.91 6.91a2.12 2.12 0 0 1-3-3l6.91-6.91a6 6 0 0 1 7.94-7.94l-3.76 3.76z"/></svg>,
  Calendar: (p) => <svg viewBox="0 0 24 24" fill="none" stroke="currentColor" strokeWidth="1.75" strokeLinecap="round" strokeLinejoin="round" {...p}><rect width="18" height="18" x="3" y="4" rx="2"/><path d="M16 2v4M8 2v4M3 10h18"/></svg>,
};

// Plausible German transcription samples (used to mock Whisper API)
const TRANSCRIPT_SAMPLES = {
  pain: "Ehrlich gesagt der ganze Reporting-Kram am Monatsende. Wir ziehen Daten aus drei verschiedenen Tools, kopieren das in Sheets, und am Ende sitzt jemand zwei Tage dran nur um das aufzubereiten. Das nervt eigentlich alle bei uns.",
  usecases: "Erstens, die wöchentlichen Outbound-Sequenzen — wir personalisieren die noch händisch pro Lead, das frisst Stunden. Zweitens, Lead-Recherche vor Calls. Wir würden gern automatisch das Unternehmen, die letzten LinkedIn-Posts und Pressemitteilungen zusammenfassen lassen. Und drittens vielleicht das Intake-Formular nach dem Erstgespräch — da landen aktuell viele Infos verstreut in Slack.",
  attempts: "Wir haben Make ein paar Wochen ausprobiert für die Lead-Recherche. Die Flows waren erstmal okay, aber sobald wir Edge-Cases hatten — fehlende LinkedIn-Profile, komische Domains — sind die Automationen still gestorben und niemand hat's gemerkt. Reporting wollten wir mit Looker Studio lösen, aber die Datenpipeline davor ist halt das eigentliche Problem, nicht die Visualisierung.",
  inspiration: "Bei Lemlist hatte ich neulich eine Demo gesehen, wo deren Outbound vollständig auf Signalen läuft — also Job-Wechsel, Funding, Tool-Stack — und das Sales-Team kriegt morgens eine Liste mit fertigen Erstkontakten. Sowas in der Richtung, aber halt für unseren Stack.",
  success: "Wenn wir nach dem Call wissen, ob das überhaupt für uns passt, und einen ehrlichen Vorschlag haben mit was es ungefähr kostet und in welchem Zeitraum man da Ergebnisse sieht. Kein Pitch, lieber direkt konkret werden.",
  bonus: "Budget liegt grob im niedrig fünfstelligen Bereich pro Monat, wir wollen idealerweise im nächsten Quartal starten. Mein Co-Founder Max wird beim Call dabei sein, der ist eher technisch.",
};

const fakeTranscript = (key) => TRANSCRIPT_SAMPLES[key] || "Hier ist eine Beispiel-Transkription deiner Sprachnachricht — du kannst den Text nachträglich beliebig editieren.";

// ─── VoiceField ───────────────────────────────────────────────────────────
// Pick a MIME type the browser actually supports for MediaRecorder.
// Safari needs mp4, Chrome/Firefox prefer webm/opus.
const pickMime = () => {
  if (typeof MediaRecorder === 'undefined') return null;
  const candidates = ['audio/webm;codecs=opus', 'audio/webm', 'audio/mp4', 'audio/ogg'];
  return candidates.find(m => MediaRecorder.isTypeSupported(m)) || '';
};

// Words/patterns Whisper hallucinates on near-silent audio — drop the result if
// the transcript is just one of these or matches one of these patterns. The
// primary defense is the in-browser silence detector; this is a safety net.
const WHISPER_HALLUCINATIONS = [
  'untertitel der amara.org-community',
  'untertitel von amara.org',
  'untertitelung im auftrag des zdf',
  'vielen dank fürs zuschauen',
  'untertitel im auftrag',
  'mehr informationen auf www.',
  'thank you.',
  'thanks for watching',
  '.',
];

const isHallucination = (text) => {
  const t = text.trim().toLowerCase();
  if (!t) return true;
  if (t.length < 60 && WHISPER_HALLUCINATIONS.some(h => t.includes(h))) return true;
  // Short transcripts that are basically just a URL.
  if (t.length < 80 && /^[^a-zäöüß]*(www\.|http|amara\.org|hansgrohe)/i.test(t)) return true;
  return false;
};

const transcribeBlob = async (blob) => {
  const form = new FormData();
  const ext = (blob.type.includes('mp4') ? 'mp4' : blob.type.includes('ogg') ? 'ogg' : 'webm');
  form.append('file', blob, `recording.${ext}`);
  form.append('model', 'whisper-1');
  form.append('language', 'de');
  form.append('response_format', 'json');
  form.append('temperature', '0');
  // Anti-hallucination prompt: gives Whisper a non-empty German context, so it's
  // less likely to fall back to its training-data phrases on quiet audio.
  form.append('prompt', 'Sprachnotiz auf Deutsch zu Geschäftsprozessen, Tools und Automatisierung.');
  console.log('[voice] uploading', { size: blob.size, type: blob.type, ext });
  const res = await fetch('/api/transcribe', { method: 'POST', body: form });
  const bodyText = await res.text();
  if (!res.ok) {
    console.error('[voice] transcribe HTTP', res.status, bodyText);
    throw new Error(`transcribe HTTP ${res.status}: ${bodyText.slice(0, 200)}`);
  }
  let data;
  try { data = JSON.parse(bodyText); }
  catch (e) { throw new Error('Non-JSON response: ' + bodyText.slice(0, 200)); }
  if (typeof data.text !== 'string') throw new Error('No text in response: ' + JSON.stringify(data).slice(0, 200));
  console.log('[voice] transcription ok', { len: data.text.length });
  return data.text.trim();
};

const BAR_COUNT = 28;
const SILENCE_PEAK_THRESHOLD = 12; // 0–255 from analyser; below = essentially silent

const VoiceField = ({ value, onChange, placeholder, multiline = true, rows = 5, autoFocus = false, hint, sampleKey = 'pain' }) => {
  const [recording, setRecording] = React.useState(false);
  const [transcribing, setTranscribing] = React.useState(false);
  const [seconds, setSeconds] = React.useState(0);
  const [error, setError] = React.useState(null);
  const [levels, setLevels] = React.useState(() => new Array(BAR_COUNT).fill(0));
  const inputRef = React.useRef(null);
  const timerRef = React.useRef(null);
  const recorderRef = React.useRef(null);
  const chunksRef = React.useRef([]);
  const streamRef = React.useRef(null);
  const audioCtxRef = React.useRef(null);
  const analyserRef = React.useRef(null);
  const rafRef = React.useRef(null);
  const peakRef = React.useRef(0);

  React.useEffect(() => {
    if (autoFocus && inputRef.current) inputRef.current.focus();
  }, [autoFocus]);

  React.useEffect(() => {
    if (recording) {
      setSeconds(0);
      timerRef.current = setInterval(() => setSeconds(s => s + 1), 1000);
    } else {
      clearInterval(timerRef.current);
    }
    return () => clearInterval(timerRef.current);
  }, [recording]);

  React.useEffect(() => () => {
    // Cleanup on unmount: stop any live mic stream + analyser so the indicator turns off.
    if (rafRef.current) cancelAnimationFrame(rafRef.current);
    if (streamRef.current) streamRef.current.getTracks().forEach(t => t.stop());
    if (audioCtxRef.current && audioCtxRef.current.state !== 'closed') audioCtxRef.current.close().catch(() => {});
  }, []);

  const teardownAnalyser = () => {
    if (rafRef.current) { cancelAnimationFrame(rafRef.current); rafRef.current = null; }
    if (audioCtxRef.current && audioCtxRef.current.state !== 'closed') {
      audioCtxRef.current.close().catch(() => {});
    }
    audioCtxRef.current = null;
    analyserRef.current = null;
    setLevels(new Array(BAR_COUNT).fill(0));
  };

  const fmt = (s) => `${Math.floor(s/60)}:${(s%60).toString().padStart(2,'0')}`;

  const insertText = (t) => {
    const next = (value || '').trim() ? `${value.trim()} ${t}` : t;
    onChange(next);
    setTimeout(() => inputRef.current && inputRef.current.focus(), 50);
  };

  const start = async () => {
    setError(null);
    if (!navigator.mediaDevices?.getUserMedia || typeof MediaRecorder === 'undefined') {
      setError('Mikrofon wird in diesem Browser nicht unterstützt. Bitte tippen.');
      return;
    }
    try {
      const stream = await navigator.mediaDevices.getUserMedia({
        audio: { echoCancellation: true, noiseSuppression: true, autoGainControl: true },
      });
      streamRef.current = stream;

      // Live analyser — drives the waveform + tracks peak volume to detect silence.
      const AC = window.AudioContext || window.webkitAudioContext;
      const audioCtx = new AC();
      const source = audioCtx.createMediaStreamSource(stream);
      const analyser = audioCtx.createAnalyser();
      analyser.fftSize = 64; // 32 frequency bins, we use 28
      analyser.smoothingTimeConstant = 0.55;
      source.connect(analyser);
      audioCtxRef.current = audioCtx;
      analyserRef.current = analyser;
      peakRef.current = 0;

      const buf = new Uint8Array(analyser.frequencyBinCount);
      const tick = () => {
        if (!analyserRef.current) return;
        analyser.getByteFrequencyData(buf);
        const next = new Array(BAR_COUNT);
        for (let i = 0; i < BAR_COUNT; i++) next[i] = buf[i] || 0;
        setLevels(next);
        let max = 0;
        for (let i = 0; i < buf.length; i++) if (buf[i] > max) max = buf[i];
        if (max > peakRef.current) peakRef.current = max;
        rafRef.current = requestAnimationFrame(tick);
      };
      tick();

      const mime = pickMime();
      const rec = mime ? new MediaRecorder(stream, { mimeType: mime }) : new MediaRecorder(stream);
      chunksRef.current = [];
      rec.ondataavailable = (e) => { if (e.data && e.data.size > 0) chunksRef.current.push(e.data); };
      rec.onstop = async () => {
        const blob = new Blob(chunksRef.current, { type: rec.mimeType || 'audio/webm' });
        const peak = peakRef.current;
        stream.getTracks().forEach(t => t.stop());
        streamRef.current = null;
        teardownAnalyser();
        console.log('[voice] recording stopped', { size: blob.size, type: blob.type, peak });

        if (blob.size === 0) {
          setTranscribing(false);
          setError('Aufnahme leer. Mikrofon klemmt? Bitte erneut versuchen.');
          return;
        }
        if (peak < SILENCE_PEAK_THRESHOLD) {
          setTranscribing(false);
          setError('Mikrofon hat (fast) keinen Ton aufgenommen. Falsches Eingabegerät? In den Systemeinstellungen prüfen.');
          return;
        }

        try {
          const text = await transcribeBlob(blob);
          if (!text) throw new Error('Leeres Transkript');
          if (isHallucination(text)) {
            console.warn('[voice] dropped likely hallucination', text);
            setError('Whisper hat keinen klaren Inhalt erkannt. Versuch es nochmal etwas lauter und länger.');
          } else {
            insertText(text);
          }
        } catch (err) {
          console.warn('[voice] transcription failed, using mock fallback', err);
          setError(`Transkription fehlgeschlagen (${err.message || err}). Demo-Text eingefügt.`);
          insertText(fakeTranscript(sampleKey));
        } finally {
          setTranscribing(false);
        }
      };
      recorderRef.current = rec;
      rec.start();
      setRecording(true);
    } catch (err) {
      console.warn('[voice] mic permission / start failed', err);
      teardownAnalyser();
      setError('Mikrofonzugriff erforderlich. Oder einfach tippen.');
    }
  };

  const stop = () => {
    setRecording(false);
    setTranscribing(true);
    try {
      recorderRef.current?.stop();
    } catch (err) {
      console.warn('[voice] stop failed', err);
      setTranscribing(false);
    }
  };

  const toggle = () => recording ? stop() : start();

  const InputEl = multiline ? 'textarea' : 'input';

  return (
    <div className={`vfield ${recording ? 'recording' : ''}`}>
      <InputEl
        ref={inputRef}
        value={value || ''}
        onChange={(e) => onChange(e.target.value)}
        placeholder={recording ? '' : placeholder}
        rows={multiline ? rows : undefined}
        disabled={transcribing}
        style={transcribing ? { opacity: .55 } : {}}
      />
      <div className="vfield-tools">
        <button
          type="button"
          className={`mic-btn ${recording ? 'active' : ''}`}
          onClick={toggle}
          title={recording ? 'Aufnahme stoppen' : 'Sprachnachricht aufnehmen'}
        >
          {recording ? <I.Stop/> : <I.Mic/>}
        </button>
      </div>
      {recording && (
        <div className="recording-bar">
          <span className="rec-dot"/>
          <span style={{minWidth: 86}}>Nimmt auf · {fmt(seconds)}</span>
          <div className="rec-waveform">
            {levels.map((lvl, i) => {
              const h = 2 + Math.round((lvl / 255) * 16); // 2px..18px
              return (
                <div
                  key={i}
                  className="bar bar-live"
                  style={{ height: h, animation: 'none', transition: 'height 60ms linear' }}
                />
              );
            })}
          </div>
          <button type="button" onClick={stop} style={{
            background:'transparent', border:'none', cursor:'pointer',
            font:'600 13px/1 var(--bw-font-primary)', color:'var(--bw-pulse-600)',
            padding:'6px 10px', borderRadius: 6
          }}>Stopp</button>
        </div>
      )}
      {transcribing && (
        <div className="transcribing">
          <span className="spinner"/>
          <span>Wird transkribiert…</span>
        </div>
      )}
      {hint && !recording && !transcribing && !error && (
        <div style={{ padding: '0 20px 14px', font: '400 13px/1.4 var(--bw-font-primary)', color: 'var(--bw-carbon-500)' }}>
          {hint}
        </div>
      )}
      {error && !recording && (
        <div style={{ padding: '0 20px 14px', font: '500 13px/1.4 var(--bw-font-primary)', color: 'var(--bw-pulse-600)' }}>
          {error}
        </div>
      )}
    </div>
  );
};

// ─── Topbar ────────────────────────────────────────────────────────────────
const Topbar = ({ step, totalSteps, name, callDateLabel, minimal }) => (
  <header className="topbar">
    <div className="brand">
      <img src="assets/logo-bakedwith.png" alt="bakedwith"/>
    </div>
    {!minimal && (name || callDateLabel) && (
      <div className="meta">
        {callDateLabel && (
          <span className="meta-time">
            <span className="dot"/>
            Termin · {callDateLabel}
          </span>
        )}
        {callDateLabel && name && <span className="sep">·</span>}
        {name && <span className="name">{name}</span>}
      </div>
    )}
  </header>
);

// ─── Helpers ───────────────────────────────────────────────────────────────
// Format an ISO-8601 timestamp as "Donnerstag, 30. April · 14:30" (de-DE).
const formatCallDate = (iso) => {
  if (!iso) return '';
  const d = new Date(iso);
  if (Number.isNaN(d.getTime())) return iso;
  const date = d.toLocaleDateString('de-DE', { weekday: 'long', day: 'numeric', month: 'long' });
  const time = d.toLocaleTimeString('de-DE', { hour: '2-digit', minute: '2-digit' });
  return `${date} · ${time}`;
};

const readBriefingFromUrl = () => {
  const params = new URLSearchParams(window.location.search);
  const id = params.get('id') || (crypto.randomUUID ? crypto.randomUUID() : `local-${Date.now()}`);
  const rawDate = params.get('d');
  return {
    id,
    name: params.get('n') || 'Freund:in',
    email: params.get('e') || null,
    callDateIso: rawDate || null,
    callDateLabel: rawDate ? formatCallDate(rawDate) : 'Termin folgt',
  };
};

Object.assign(window, { I, VoiceField, Topbar, fakeTranscript, formatCallDate, readBriefingFromUrl });
