import React, { useState, useEffect, useRef } from 'react';
import { Mic, Loader2, Brain } from 'lucide-react';
import { transcribeWithWhisper } from '../utils/whisper';
export function MicButton({ onTranscript, className = '' }) {
const [state, setState] = useState('idle'); // idle, recording, transcribing, processing
const [error, setError] = useState(null);
const mediaRecorderRef = useRef(null);
const streamRef = useRef(null);
const chunksRef = useRef([]);
const lastTapRef = useRef(0);
// Version indicator to verify updates
console.log('MicButton v2.0 loaded');
// Start recording
const startRecording = async () => {
try {
console.log('Starting recording...');
setError(null);
chunksRef.current = [];
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
streamRef.current = stream;
const mimeType = MediaRecorder.isTypeSupported('audio/webm') ? 'audio/webm' : 'audio/mp4';
const recorder = new MediaRecorder(stream, { mimeType });
mediaRecorderRef.current = recorder;
recorder.ondataavailable = (e) => {
if (e.data.size > 0) {
chunksRef.current.push(e.data);
}
};
recorder.onstop = async () => {
console.log('Recording stopped, creating blob...');
const blob = new Blob(chunksRef.current, { type: mimeType });
// Clean up stream
if (streamRef.current) {
streamRef.current.getTracks().forEach(track => track.stop());
streamRef.current = null;
}
// Start transcribing
setState('transcribing');
// Check if we're in an enhancement mode
const whisperMode = window.localStorage.getItem('whisperMode') || 'default';
const isEnhancementMode = whisperMode === 'prompt' || whisperMode === 'vibe' || whisperMode === 'instructions' || whisperMode === 'architect';
// Set up a timer to switch to processing state for enhancement modes
let processingTimer;
if (isEnhancementMode) {
processingTimer = setTimeout(() => {
setState('processing');
}, 2000); // Switch to processing after 2 seconds
}
try {
const text = await transcribeWithWhisper(blob);
if (text && onTranscript) {
onTranscript(text);
}
} catch (err) {
console.error('Transcription error:', err);
setError(err.message);
} finally {
if (processingTimer) {
clearTimeout(processingTimer);
}
setState('idle');
}
};
recorder.start();
setState('recording');
console.log('Recording started successfully');
} catch (err) {
console.error('Failed to start recording:', err);
setError('Microphone access denied');
setState('idle');
}
};
// Stop recording
const stopRecording = () => {
console.log('Stopping recording...');
if (mediaRecorderRef.current && mediaRecorderRef.current.state === 'recording') {
mediaRecorderRef.current.stop();
// Don't set state here - let the onstop handler do it
} else {
// If recorder isn't in recording state, force cleanup
console.log('Recorder not in recording state, forcing cleanup');
if (streamRef.current) {
streamRef.current.getTracks().forEach(track => track.stop());
streamRef.current = null;
}
setState('idle');
}
};
// Handle button click
const handleClick = (e) => {
// Prevent double firing on mobile
if (e) {
e.preventDefault();
e.stopPropagation();
}
// Debounce for mobile double-tap issue
const now = Date.now();
if (now - lastTapRef.current < 300) {
console.log('Ignoring rapid tap');
return;
}
lastTapRef.current = now;
console.log('Button clicked, current state:', state);
if (state === 'idle') {
startRecording();
} else if (state === 'recording') {
stopRecording();
}
// Do nothing if transcribing or processing
};
// Clean up on unmount
useEffect(() => {
return () => {
if (streamRef.current) {
streamRef.current.getTracks().forEach(track => track.stop());
}
};
}, []);
// Button appearance based on state
const getButtonAppearance = () => {
switch (state) {
case 'recording':
return {
icon: