diff --git a/server/index.js b/server/index.js
index 235f143b..b2130dc8 100755
--- a/server/index.js
+++ b/server/index.js
@@ -1984,155 +1984,6 @@ function handleShellConnection(ws) {
console.error('[ERROR] Shell WebSocket error:', error);
});
}
-// Audio transcription endpoint
-app.post('/api/transcribe', authenticateToken, async (req, res) => {
- try {
- const multer = (await import('multer')).default;
- const upload = multer({ storage: multer.memoryStorage() });
-
- // Handle multipart form data
- upload.single('audio')(req, res, async (err) => {
- if (err) {
- return res.status(400).json({ error: 'Failed to process audio file' });
- }
-
- if (!req.file) {
- return res.status(400).json({ error: 'No audio file provided' });
- }
-
- const apiKey = process.env.OPENAI_API_KEY;
- if (!apiKey) {
- return res.status(500).json({ error: 'OpenAI API key not configured. Please set OPENAI_API_KEY in server environment.' });
- }
-
- try {
- // Create form data for OpenAI
- const FormData = (await import('form-data')).default;
- const formData = new FormData();
- formData.append('file', req.file.buffer, {
- filename: req.file.originalname,
- contentType: req.file.mimetype
- });
- formData.append('model', 'whisper-1');
- formData.append('response_format', 'json');
- formData.append('language', 'en');
-
- // Make request to OpenAI
- const response = await fetch('https://api.openai.com/v1/audio/transcriptions', {
- method: 'POST',
- headers: {
- 'Authorization': `Bearer ${apiKey}`,
- ...formData.getHeaders()
- },
- body: formData
- });
-
- if (!response.ok) {
- const errorData = await response.json().catch(() => ({}));
- throw new Error(errorData.error?.message || `Whisper API error: ${response.status}`);
- }
-
- const data = await response.json();
- let transcribedText = data.text || '';
-
- // Check if enhancement mode is enabled
- const mode = req.body.mode || 'default';
-
- // If no transcribed text, return empty
- if (!transcribedText) {
- return res.json({ text: '' });
- }
-
- // If default mode, return transcribed text without enhancement
- if (mode === 'default') {
- return res.json({ text: transcribedText });
- }
-
- // Handle different enhancement modes
- try {
- const OpenAI = (await import('openai')).default;
- const openai = new OpenAI({ apiKey });
-
- let prompt, systemMessage, temperature = 0.7, maxTokens = 800;
-
- switch (mode) {
- case 'prompt':
- systemMessage = 'You are an expert prompt engineer who creates clear, detailed, and effective prompts.';
- prompt = `You are an expert prompt engineer. Transform the following rough instruction into a clear, detailed, and context-aware AI prompt.
-
-Your enhanced prompt should:
-1. Be specific and unambiguous
-2. Include relevant context and constraints
-3. Specify the desired output format
-4. Use clear, actionable language
-5. Include examples where helpful
-6. Consider edge cases and potential ambiguities
-
-Transform this rough instruction into a well-crafted prompt:
-"${transcribedText}"
-
-Enhanced prompt:`;
- break;
-
- case 'vibe':
- case 'instructions':
- case 'architect':
- systemMessage = 'You are a helpful assistant that formats ideas into clear, actionable instructions for AI agents.';
- temperature = 0.5; // Lower temperature for more controlled output
- prompt = `Transform the following idea into clear, well-structured instructions that an AI agent can easily understand and execute.
-
-IMPORTANT RULES:
-- Format as clear, step-by-step instructions
-- Add reasonable implementation details based on common patterns
-- Only include details directly related to what was asked
-- Do NOT add features or functionality not mentioned
-- Keep the original intent and scope intact
-- Use clear, actionable language an agent can follow
-
-Transform this idea into agent-friendly instructions:
-"${transcribedText}"
-
-Agent instructions:`;
- break;
-
- default:
- // No enhancement needed
- break;
- }
-
- // Only make GPT call if we have a prompt
- if (prompt) {
- const completion = await openai.chat.completions.create({
- model: 'gpt-4o-mini',
- messages: [
- { role: 'system', content: systemMessage },
- { role: 'user', content: prompt }
- ],
- temperature: temperature,
- max_tokens: maxTokens
- });
-
- transcribedText = completion.choices[0].message.content || transcribedText;
- }
-
- } catch (gptError) {
- console.error('GPT processing error:', gptError);
- // Fall back to original transcription if GPT fails
- }
-
- res.json({ text: transcribedText });
-
- } catch (error) {
- console.error('Transcription error:', error);
- res.status(500).json({ error: error.message });
- }
- });
- } catch (error) {
- console.error('Endpoint error:', error);
- res.status(500).json({ error: 'Internal server error' });
- }
-});
-
// Image upload endpoint
app.post('/api/projects/:projectName/upload-images', authenticateToken, async (req, res) => {
try {
diff --git a/src/components/chat/hooks/useChatComposerState.ts b/src/components/chat/hooks/useChatComposerState.ts
index 6e84982d..858faff9 100644
--- a/src/components/chat/hooks/useChatComposerState.ts
+++ b/src/components/chat/hooks/useChatComposerState.ts
@@ -878,30 +878,6 @@ export function useChatComposerState({
});
}, [canAbortSession, currentSessionId, pendingViewSessionRef, provider, selectedSession?.id, sendMessage]);
- const handleTranscript = useCallback((text: string) => {
- if (!text.trim()) {
- return;
- }
-
- setInput((previousInput) => {
- const newInput = previousInput.trim() ? `${previousInput} ${text}` : text;
- inputValueRef.current = newInput;
-
- setTimeout(() => {
- if (!textareaRef.current) {
- return;
- }
-
- textareaRef.current.style.height = 'auto';
- textareaRef.current.style.height = `${textareaRef.current.scrollHeight}px`;
- const lineHeight = parseInt(window.getComputedStyle(textareaRef.current).lineHeight);
- setIsTextareaExpanded(textareaRef.current.scrollHeight > lineHeight * 2);
- }, 0);
-
- return newInput;
- });
- }, []);
-
const handleGrantToolPermission = useCallback(
(suggestion: { entry: string; toolName: string }) => {
if (!suggestion || provider !== 'claude') {
@@ -994,7 +970,6 @@ export function useChatComposerState({
syncInputOverlayScroll,
handleClearInput,
handleAbortSession,
- handleTranscript,
handlePermissionDecision,
handleGrantToolPermission,
handleInputFocusChange,
diff --git a/src/components/chat/view/ChatInterface.tsx b/src/components/chat/view/ChatInterface.tsx
index cb78222c..19483f64 100644
--- a/src/components/chat/view/ChatInterface.tsx
+++ b/src/components/chat/view/ChatInterface.tsx
@@ -165,7 +165,6 @@ function ChatInterface({
syncInputOverlayScroll,
handleClearInput,
handleAbortSession,
- handleTranscript,
handlePermissionDecision,
handleGrantToolPermission,
handleInputFocusChange,
@@ -407,7 +406,6 @@ function ChatInterface({
})}
isTextareaExpanded={isTextareaExpanded}
sendByCtrlEnter={sendByCtrlEnter}
- onTranscript={handleTranscript}
/>
diff --git a/src/components/chat/view/subcomponents/ChatComposer.tsx b/src/components/chat/view/subcomponents/ChatComposer.tsx
index 2bf8eb50..e6da236d 100644
--- a/src/components/chat/view/subcomponents/ChatComposer.tsx
+++ b/src/components/chat/view/subcomponents/ChatComposer.tsx
@@ -11,7 +11,6 @@ import type {
SetStateAction,
TouchEvent,
} from 'react';
-import MicButton from '../../../mic-button/view/MicButton';
import type { PendingPermissionRequest, PermissionMode, Provider } from '../../types/types';
import CommandMenu from './CommandMenu';
import ClaudeStatus from './ClaudeStatus';
@@ -91,7 +90,6 @@ interface ChatComposerProps {
placeholder: string;
isTextareaExpanded: boolean;
sendByCtrlEnter?: boolean;
- onTranscript: (text: string) => void;
}
export default function ChatComposer({
@@ -148,7 +146,6 @@ export default function ChatComposer({
placeholder,
isTextareaExpanded,
sendByCtrlEnter,
- onTranscript,
}: ChatComposerProps) {
const { t } = useTranslation('chat');
const textareaRect = textareaRef.current?.getBoundingClientRect();
@@ -321,10 +318,6 @@ export default function ChatComposer({
-
-
-
-
)}
-
- setCommitMessage(transcript)}
- mode="default"
- className="p-1.5"
- />
-
diff --git a/src/components/mic-button/constants/constants.ts b/src/components/mic-button/constants/constants.ts
deleted file mode 100644
index 3bfbe62d..00000000
--- a/src/components/mic-button/constants/constants.ts
+++ /dev/null
@@ -1,45 +0,0 @@
-import type { MicButtonState } from '../types/types';
-
-export const MIC_BUTTON_STATES = {
- IDLE: 'idle',
- RECORDING: 'recording',
- TRANSCRIBING: 'transcribing',
- PROCESSING: 'processing',
-} as const;
-
-export const MIC_TAP_DEBOUNCE_MS = 300;
-export const PROCESSING_STATE_DELAY_MS = 2000;
-
-export const DEFAULT_WHISPER_MODE = 'default';
-
-// Modes that use post-transcription enhancement on the backend.
-export const ENHANCEMENT_WHISPER_MODES = new Set([
- 'prompt',
- 'vibe',
- 'instructions',
- 'architect',
-]);
-
-export const BUTTON_BACKGROUND_BY_STATE: Record = {
- idle: '#374151',
- recording: '#ef4444',
- transcribing: '#3b82f6',
- processing: '#a855f7',
-};
-
-export const MIC_ERROR_BY_NAME = {
- NotAllowedError: 'Microphone access denied. Please allow microphone permissions.',
- NotFoundError: 'No microphone found. Please check your audio devices.',
- NotSupportedError: 'Microphone not supported by this browser.',
- NotReadableError: 'Microphone is being used by another application.',
-} as const;
-
-export const MIC_NOT_AVAILABLE_ERROR =
- 'Microphone access not available. Please use HTTPS or a supported browser.';
-
-export const MIC_NOT_SUPPORTED_ERROR =
- 'Microphone not supported. Please use HTTPS or a modern browser.';
-
-export const MIC_SECURE_CONTEXT_ERROR =
- 'Microphone requires HTTPS. Please use a secure connection.';
-
diff --git a/src/components/mic-button/data/whisper.ts b/src/components/mic-button/data/whisper.ts
deleted file mode 100644
index be204d32..00000000
--- a/src/components/mic-button/data/whisper.ts
+++ /dev/null
@@ -1,52 +0,0 @@
-import { api } from '../../../utils/api';
-
-type WhisperStatus = 'transcribing';
-
-type WhisperResponse = {
- text?: string;
- error?: string;
-};
-
-export async function transcribeWithWhisper(
- audioBlob: Blob,
- onStatusChange?: (status: WhisperStatus) => void,
-): Promise {
- const formData = new FormData();
- const fileName = `recording_${Date.now()}.webm`;
- const file = new File([audioBlob], fileName, { type: audioBlob.type });
-
- formData.append('audio', file);
-
- const whisperMode = window.localStorage.getItem('whisperMode') || 'default';
- formData.append('mode', whisperMode);
-
- try {
- // Keep existing status callback behavior.
- if (onStatusChange) {
- onStatusChange('transcribing');
- }
-
- const response = (await api.transcribe(formData)) as Response;
-
- if (!response.ok) {
- const errorData = (await response.json().catch(() => ({}))) as WhisperResponse;
- throw new Error(
- errorData.error ||
- `Transcription error: ${response.status} ${response.statusText}`,
- );
- }
-
- const data = (await response.json()) as WhisperResponse;
- return data.text || '';
- } catch (error) {
- if (
- error instanceof Error
- && error.name === 'TypeError'
- && error.message.includes('fetch')
- ) {
- throw new Error('Cannot connect to server. Please ensure the backend is running.');
- }
- throw error;
- }
-}
-
diff --git a/src/components/mic-button/hooks/useMicButtonController.ts b/src/components/mic-button/hooks/useMicButtonController.ts
deleted file mode 100644
index dfddec78..00000000
--- a/src/components/mic-button/hooks/useMicButtonController.ts
+++ /dev/null
@@ -1,204 +0,0 @@
-import { useEffect, useRef, useState } from 'react';
-import type { MouseEvent } from 'react';
-import { transcribeWithWhisper } from '../data/whisper';
-import {
- DEFAULT_WHISPER_MODE,
- ENHANCEMENT_WHISPER_MODES,
- MIC_BUTTON_STATES,
- MIC_ERROR_BY_NAME,
- MIC_NOT_AVAILABLE_ERROR,
- MIC_NOT_SUPPORTED_ERROR,
- MIC_SECURE_CONTEXT_ERROR,
- MIC_TAP_DEBOUNCE_MS,
- PROCESSING_STATE_DELAY_MS,
-} from '../constants/constants';
-import type { MicButtonState } from '../types/types';
-
-type UseMicButtonControllerArgs = {
- onTranscript?: (transcript: string) => void;
-};
-
-type UseMicButtonControllerResult = {
- state: MicButtonState;
- error: string | null;
- isSupported: boolean;
- handleButtonClick: (event?: MouseEvent) => void;
-};
-
-const getRecordingErrorMessage = (error: unknown): string => {
- if (error instanceof Error && error.message.includes('HTTPS')) {
- return error.message;
- }
-
- if (error instanceof DOMException) {
- return MIC_ERROR_BY_NAME[error.name as keyof typeof MIC_ERROR_BY_NAME] || 'Microphone access failed';
- }
-
- return 'Microphone access failed';
-};
-
-const getRecorderMimeType = (): string => (
- MediaRecorder.isTypeSupported('audio/webm') ? 'audio/webm' : 'audio/mp4'
-);
-
-export function useMicButtonController({
- onTranscript,
-}: UseMicButtonControllerArgs): UseMicButtonControllerResult {
- const [state, setState] = useState(MIC_BUTTON_STATES.IDLE);
- const [error, setError] = useState(null);
- const [isSupported, setIsSupported] = useState(true);
-
- const mediaRecorderRef = useRef(null);
- const streamRef = useRef(null);
- const chunksRef = useRef([]);
- const lastTapRef = useRef(0);
- const processingTimerRef = useRef(null);
-
- const clearProcessingTimer = (): void => {
- if (processingTimerRef.current !== null) {
- window.clearTimeout(processingTimerRef.current);
- processingTimerRef.current = null;
- }
- };
-
- const stopStreamTracks = (): void => {
- if (!streamRef.current) {
- return;
- }
-
- streamRef.current.getTracks().forEach((track) => track.stop());
- streamRef.current = null;
- };
-
- const handleStopRecording = async (mimeType: string): Promise => {
- const audioBlob = new Blob(chunksRef.current, { type: mimeType });
-
- // Release the microphone immediately once recording ends.
- stopStreamTracks();
- setState(MIC_BUTTON_STATES.TRANSCRIBING);
-
- const whisperMode = window.localStorage.getItem('whisperMode') || DEFAULT_WHISPER_MODE;
- const shouldShowProcessingState = ENHANCEMENT_WHISPER_MODES.has(whisperMode);
-
- if (shouldShowProcessingState) {
- processingTimerRef.current = window.setTimeout(() => {
- setState(MIC_BUTTON_STATES.PROCESSING);
- }, PROCESSING_STATE_DELAY_MS);
- }
-
- try {
- const transcript = await transcribeWithWhisper(audioBlob);
- if (transcript && onTranscript) {
- onTranscript(transcript);
- }
- } catch (transcriptionError) {
- const message = transcriptionError instanceof Error ? transcriptionError.message : 'Transcription error';
- setError(message);
- } finally {
- clearProcessingTimer();
- setState(MIC_BUTTON_STATES.IDLE);
- }
- };
-
- const startRecording = async (): Promise => {
- try {
- setError(null);
- chunksRef.current = [];
-
- if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
- throw new Error(MIC_NOT_AVAILABLE_ERROR);
- }
-
- const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
- streamRef.current = stream;
-
- const mimeType = getRecorderMimeType();
- const recorder = new MediaRecorder(stream, { mimeType });
- mediaRecorderRef.current = recorder;
-
- recorder.ondataavailable = (event: BlobEvent) => {
- if (event.data.size > 0) {
- chunksRef.current.push(event.data);
- }
- };
-
- recorder.onstop = () => {
- void handleStopRecording(mimeType);
- };
-
- recorder.start();
- setState(MIC_BUTTON_STATES.RECORDING);
- } catch (recordingError) {
- stopStreamTracks();
- setError(getRecordingErrorMessage(recordingError));
- setState(MIC_BUTTON_STATES.IDLE);
- }
- };
-
- const stopRecording = (): void => {
- if (mediaRecorderRef.current && mediaRecorderRef.current.state === 'recording') {
- mediaRecorderRef.current.stop();
- return;
- }
-
- stopStreamTracks();
- setState(MIC_BUTTON_STATES.IDLE);
- };
-
- const handleButtonClick = (event?: MouseEvent): void => {
- if (event) {
- event.preventDefault();
- event.stopPropagation();
- }
-
- if (!isSupported) {
- return;
- }
-
- // Mobile tap handling can trigger duplicate click events in quick succession.
- const now = Date.now();
- if (now - lastTapRef.current < MIC_TAP_DEBOUNCE_MS) {
- return;
- }
- lastTapRef.current = now;
-
- if (state === MIC_BUTTON_STATES.IDLE) {
- void startRecording();
- return;
- }
-
- if (state === MIC_BUTTON_STATES.RECORDING) {
- stopRecording();
- }
- };
-
- useEffect(() => {
- // getUserMedia needs both browser support and a secure context.
- if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
- setIsSupported(false);
- setError(MIC_NOT_SUPPORTED_ERROR);
- return;
- }
-
- if (location.protocol !== 'https:' && location.hostname !== 'localhost') {
- setIsSupported(false);
- setError(MIC_SECURE_CONTEXT_ERROR);
- return;
- }
-
- setIsSupported(true);
- setError(null);
- }, []);
-
- useEffect(() => () => {
- clearProcessingTimer();
- stopStreamTracks();
- }, []);
-
- return {
- state,
- error,
- isSupported,
- handleButtonClick,
- };
-}
diff --git a/src/components/mic-button/types/types.ts b/src/components/mic-button/types/types.ts
deleted file mode 100644
index c0469822..00000000
--- a/src/components/mic-button/types/types.ts
+++ /dev/null
@@ -1,2 +0,0 @@
-export type MicButtonState = 'idle' | 'recording' | 'transcribing' | 'processing';
-
diff --git a/src/components/mic-button/view/MicButton.tsx b/src/components/mic-button/view/MicButton.tsx
deleted file mode 100644
index 72926cef..00000000
--- a/src/components/mic-button/view/MicButton.tsx
+++ /dev/null
@@ -1,32 +0,0 @@
-import { useMicButtonController } from '../hooks/useMicButtonController';
-import MicButtonView from './MicButtonView';
-
-type MicButtonProps = {
- onTranscript?: (transcript: string) => void;
- className?: string;
- mode?: string;
-};
-
-export default function MicButton({
- onTranscript,
- className = '',
- mode: _mode,
-}: MicButtonProps) {
- const { state, error, isSupported, handleButtonClick } = useMicButtonController({
- onTranscript,
- });
-
- // Keep `mode` in the public props for backwards compatibility.
- void _mode;
-
- return (
-
- );
-}
-
diff --git a/src/components/mic-button/view/MicButtonView.tsx b/src/components/mic-button/view/MicButtonView.tsx
deleted file mode 100644
index 7471ca5c..00000000
--- a/src/components/mic-button/view/MicButtonView.tsx
+++ /dev/null
@@ -1,86 +0,0 @@
-import { Brain, Loader2, Mic } from 'lucide-react';
-import type { MouseEvent, ReactElement } from 'react';
-import { BUTTON_BACKGROUND_BY_STATE, MIC_BUTTON_STATES } from '../constants/constants';
-import type { MicButtonState } from '../types/types';
-
-type MicButtonViewProps = {
- state: MicButtonState;
- error: string | null;
- isSupported: boolean;
- className: string;
- onButtonClick: (event?: MouseEvent) => void;
-};
-
-const getButtonIcon = (state: MicButtonState, isSupported: boolean): ReactElement => {
- if (!isSupported) {
- return ;
- }
-
- if (state === MIC_BUTTON_STATES.TRANSCRIBING) {
- return ;
- }
-
- if (state === MIC_BUTTON_STATES.PROCESSING) {
- return ;
- }
-
- if (state === MIC_BUTTON_STATES.RECORDING) {
- return ;
- }
-
- return ;
-};
-
-export default function MicButtonView({
- state,
- error,
- isSupported,
- className,
- onButtonClick,
-}: MicButtonViewProps) {
- const isDisabled = !isSupported || state === MIC_BUTTON_STATES.TRANSCRIBING || state === MIC_BUTTON_STATES.PROCESSING;
- const icon = getButtonIcon(state, isSupported);
-
- return (
-
-
-
- {error && (
-
- {error}
-
- )}
-
- {state === MIC_BUTTON_STATES.RECORDING && (
-
- )}
-
- {state === MIC_BUTTON_STATES.PROCESSING && (
-
- )}
-
- );
-}
diff --git a/src/components/quick-settings-panel/constants.ts b/src/components/quick-settings-panel/constants.ts
index 5f1a8e21..15c15458 100644
--- a/src/components/quick-settings-panel/constants.ts
+++ b/src/components/quick-settings-panel/constants.ts
@@ -2,21 +2,12 @@ import {
ArrowDown,
Brain,
Eye,
- FileText,
Languages,
Maximize2,
- Mic,
- Sparkles,
} from 'lucide-react';
-import type {
- PreferenceToggleItem,
- WhisperMode,
- WhisperOption,
-} from './types';
+import type { PreferenceToggleItem } from './types';
export const HANDLE_POSITION_STORAGE_KEY = 'quickSettingsHandlePosition';
-export const WHISPER_MODE_STORAGE_KEY = 'whisperMode';
-export const WHISPER_MODE_CHANGED_EVENT = 'whisperModeChanged';
export const DEFAULT_HANDLE_POSITION = 50;
export const HANDLE_POSITION_MIN = 10;
@@ -64,30 +55,3 @@ export const INPUT_SETTING_TOGGLES: PreferenceToggleItem[] = [
icon: Languages,
},
];
-
-export const WHISPER_OPTIONS: WhisperOption[] = [
- {
- value: 'default',
- titleKey: 'quickSettings.whisper.modes.default',
- descriptionKey: 'quickSettings.whisper.modes.defaultDescription',
- icon: Mic,
- },
- {
- value: 'prompt',
- titleKey: 'quickSettings.whisper.modes.prompt',
- descriptionKey: 'quickSettings.whisper.modes.promptDescription',
- icon: Sparkles,
- },
- {
- value: 'vibe',
- titleKey: 'quickSettings.whisper.modes.vibe',
- descriptionKey: 'quickSettings.whisper.modes.vibeDescription',
- icon: FileText,
- },
-];
-
-export const VIBE_MODE_ALIASES: WhisperMode[] = [
- 'vibe',
- 'instructions',
- 'architect',
-];
diff --git a/src/components/quick-settings-panel/hooks/useWhisperMode.ts b/src/components/quick-settings-panel/hooks/useWhisperMode.ts
deleted file mode 100644
index eeda67ce..00000000
--- a/src/components/quick-settings-panel/hooks/useWhisperMode.ts
+++ /dev/null
@@ -1,59 +0,0 @@
-import { useCallback, useState } from 'react';
-import {
- VIBE_MODE_ALIASES,
- WHISPER_MODE_CHANGED_EVENT,
- WHISPER_MODE_STORAGE_KEY,
-} from '../constants';
-import type { WhisperMode, WhisperOptionValue } from '../types';
-
-const ALL_VALID_MODES: WhisperMode[] = [
- 'default',
- 'prompt',
- 'vibe',
- 'instructions',
- 'architect',
-];
-
-const isWhisperMode = (value: string): value is WhisperMode => (
- ALL_VALID_MODES.includes(value as WhisperMode)
-);
-
-const readStoredMode = (): WhisperMode => {
- if (typeof window === 'undefined') {
- return 'default';
- }
-
- const storedValue = localStorage.getItem(WHISPER_MODE_STORAGE_KEY);
- if (!storedValue) {
- return 'default';
- }
-
- return isWhisperMode(storedValue) ? storedValue : 'default';
-};
-
-export function useWhisperMode() {
- const [whisperMode, setWhisperModeState] = useState(readStoredMode);
-
- const setWhisperMode = useCallback((value: WhisperOptionValue) => {
- setWhisperModeState(value);
- localStorage.setItem(WHISPER_MODE_STORAGE_KEY, value);
- window.dispatchEvent(new Event(WHISPER_MODE_CHANGED_EVENT));
- }, []);
-
- const isOptionSelected = useCallback(
- (value: WhisperOptionValue) => {
- if (value === 'vibe') {
- return VIBE_MODE_ALIASES.includes(whisperMode);
- }
-
- return whisperMode === value;
- },
- [whisperMode],
- );
-
- return {
- whisperMode,
- setWhisperMode,
- isOptionSelected,
- };
-}
diff --git a/src/components/quick-settings-panel/types.ts b/src/components/quick-settings-panel/types.ts
index 4a12fc01..16002694 100644
--- a/src/components/quick-settings-panel/types.ts
+++ b/src/components/quick-settings-panel/types.ts
@@ -16,20 +16,4 @@ export type PreferenceToggleItem = {
icon: LucideIcon;
};
-export type WhisperMode =
- | 'default'
- | 'prompt'
- | 'vibe'
- | 'instructions'
- | 'architect';
-
-export type WhisperOptionValue = 'default' | 'prompt' | 'vibe';
-
-export type WhisperOption = {
- value: WhisperOptionValue;
- titleKey: string;
- descriptionKey: string;
- icon: LucideIcon;
-};
-
export type QuickSettingsHandleStyle = CSSProperties;
diff --git a/src/components/quick-settings-panel/view/QuickSettingsContent.tsx b/src/components/quick-settings-panel/view/QuickSettingsContent.tsx
index 60d19912..8d805fe9 100644
--- a/src/components/quick-settings-panel/view/QuickSettingsContent.tsx
+++ b/src/components/quick-settings-panel/view/QuickSettingsContent.tsx
@@ -15,7 +15,6 @@ import type {
} from '../types';
import QuickSettingsSection from './QuickSettingsSection';
import QuickSettingsToggleRow from './QuickSettingsToggleRow';
-import QuickSettingsWhisperSection from './QuickSettingsWhisperSection';
type QuickSettingsContentProps = {
isDarkMode: boolean;
@@ -73,8 +72,6 @@ export default function QuickSettingsContent({
{t('quickSettings.sendByCtrlEnterDescription')}
-
-
);
}
diff --git a/src/components/quick-settings-panel/view/QuickSettingsWhisperSection.tsx b/src/components/quick-settings-panel/view/QuickSettingsWhisperSection.tsx
deleted file mode 100644
index 9eb5f744..00000000
--- a/src/components/quick-settings-panel/view/QuickSettingsWhisperSection.tsx
+++ /dev/null
@@ -1,44 +0,0 @@
-import { useTranslation } from 'react-i18next';
-import { TOGGLE_ROW_CLASS, WHISPER_OPTIONS } from '../constants';
-import { useWhisperMode } from '../hooks/useWhisperMode';
-import QuickSettingsSection from './QuickSettingsSection';
-
-export default function QuickSettingsWhisperSection() {
- const { t } = useTranslation('settings');
- const { setWhisperMode, isOptionSelected } = useWhisperMode();
-
- return (
- // This section stays hidden intentionally until dictation modes are reintroduced.
-
-
- {WHISPER_OPTIONS.map(({ value, icon: Icon, titleKey, descriptionKey }) => (
-
- ))}
-
-
- );
-}
diff --git a/src/i18n/locales/de/settings.json b/src/i18n/locales/de/settings.json
index 25c289dd..95707593 100644
--- a/src/i18n/locales/de/settings.json
+++ b/src/i18n/locales/de/settings.json
@@ -55,8 +55,7 @@
"appearance": "Darstellung",
"toolDisplay": "Werkzeuganzeige",
"viewOptions": "Anzeigeoptionen",
- "inputSettings": "Eingabeeinstellungen",
- "whisperDictation": "Whisper-Diktat"
+ "inputSettings": "Eingabeeinstellungen"
},
"darkMode": "Darkmode",
"autoExpandTools": "Werkzeuge automatisch erweitern",
@@ -71,16 +70,6 @@
"openPanel": "Einstellungspanel öffnen",
"draggingStatus": "Wird gezogen...",
"toggleAndMove": "Klicken zum Umschalten, ziehen zum Verschieben"
- },
- "whisper": {
- "modes": {
- "default": "Standardmodus",
- "defaultDescription": "Direkte Transkription deiner Sprache",
- "prompt": "Prompt-Verbesserung",
- "promptDescription": "Rohe Ideen in klare, detaillierte KI-Prompts umwandeln",
- "vibe": "Vibe-Modus",
- "vibeDescription": "Ideen als klare Agentenanweisungen mit Details formatieren"
- }
}
},
"terminalShortcuts": {
diff --git a/src/i18n/locales/en/settings.json b/src/i18n/locales/en/settings.json
index 8596e045..c8a0ba12 100644
--- a/src/i18n/locales/en/settings.json
+++ b/src/i18n/locales/en/settings.json
@@ -55,8 +55,7 @@
"appearance": "Appearance",
"toolDisplay": "Tool Display",
"viewOptions": "View Options",
- "inputSettings": "Input Settings",
- "whisperDictation": "Whisper Dictation"
+ "inputSettings": "Input Settings"
},
"darkMode": "Dark Mode",
"autoExpandTools": "Auto-expand tools",
@@ -71,16 +70,6 @@
"openPanel": "Open settings panel",
"draggingStatus": "Dragging...",
"toggleAndMove": "Click to toggle, drag to move"
- },
- "whisper": {
- "modes": {
- "default": "Default Mode",
- "defaultDescription": "Direct transcription of your speech",
- "prompt": "Prompt Enhancement",
- "promptDescription": "Transform rough ideas into clear, detailed AI prompts",
- "vibe": "Vibe Mode",
- "vibeDescription": "Format ideas as clear agent instructions with details"
- }
}
},
"terminalShortcuts": {
@@ -498,4 +487,4 @@
"tab": "tab",
"runningStatus": "running"
}
-}
\ No newline at end of file
+}
diff --git a/src/i18n/locales/ja/settings.json b/src/i18n/locales/ja/settings.json
index 60b454ca..1ea3671c 100644
--- a/src/i18n/locales/ja/settings.json
+++ b/src/i18n/locales/ja/settings.json
@@ -55,8 +55,7 @@
"appearance": "外観",
"toolDisplay": "ツール表示",
"viewOptions": "表示オプション",
- "inputSettings": "入力設定",
- "whisperDictation": "Whisper音声入力"
+ "inputSettings": "入力設定"
},
"darkMode": "ダークモード",
"autoExpandTools": "ツールを自動展開",
@@ -71,16 +70,6 @@
"openPanel": "設定パネルを開く",
"draggingStatus": "ドラッグ中...",
"toggleAndMove": "クリックで切替、ドラッグで移動"
- },
- "whisper": {
- "modes": {
- "default": "標準モード",
- "defaultDescription": "音声をそのまま文字起こしします",
- "prompt": "プロンプト強化",
- "promptDescription": "ラフなアイデアを明確で詳細なAIプロンプトに変換します",
- "vibe": "バイブモード",
- "vibeDescription": "アイデアを明確なエージェント指示に整形します"
- }
}
},
"terminalShortcuts": {
@@ -492,4 +481,4 @@
"tab": "タブ",
"runningStatus": "実行中"
}
-}
\ No newline at end of file
+}
diff --git a/src/i18n/locales/ko/settings.json b/src/i18n/locales/ko/settings.json
index b8a1f450..789209f7 100644
--- a/src/i18n/locales/ko/settings.json
+++ b/src/i18n/locales/ko/settings.json
@@ -55,8 +55,7 @@
"appearance": "외관",
"toolDisplay": "도구 표시",
"viewOptions": "보기 옵션",
- "inputSettings": "입력 설정",
- "whisperDictation": "Whisper 음성 인식"
+ "inputSettings": "입력 설정"
},
"darkMode": "다크 모드",
"autoExpandTools": "도구 자동 펼치기",
@@ -71,16 +70,6 @@
"openPanel": "설정 패널 열기",
"draggingStatus": "드래그 중...",
"toggleAndMove": "클릭하여 토글, 드래그하여 이동"
- },
- "whisper": {
- "modes": {
- "default": "기본 모드",
- "defaultDescription": "음성을 그대로 텍스트로 변환",
- "prompt": "프롬프트 향상",
- "promptDescription": "거친 아이디어를 명확하고 상세한 AI 프롬프트로 변환",
- "vibe": "Vibe 모드",
- "vibeDescription": "아이디어를 상세한 에이전트 지침 형식으로 변환"
- }
}
},
"terminalShortcuts": {
@@ -492,4 +481,4 @@
"tab": "탭",
"runningStatus": "실행 중"
}
-}
\ No newline at end of file
+}
diff --git a/src/i18n/locales/ru/settings.json b/src/i18n/locales/ru/settings.json
index f8991ab0..5b448f35 100644
--- a/src/i18n/locales/ru/settings.json
+++ b/src/i18n/locales/ru/settings.json
@@ -55,8 +55,7 @@
"appearance": "Внешний вид",
"toolDisplay": "Отображение инструментов",
"viewOptions": "Параметры просмотра",
- "inputSettings": "Настройки ввода",
- "whisperDictation": "Диктовка Whisper"
+ "inputSettings": "Настройки ввода"
},
"darkMode": "Темная тема",
"autoExpandTools": "Автоматически разворачивать инструменты",
@@ -71,16 +70,6 @@
"openPanel": "Открыть панель настроек",
"draggingStatus": "Перетаскивание...",
"toggleAndMove": "Нажмите для переключения, перетащите для перемещения"
- },
- "whisper": {
- "modes": {
- "default": "Режим по умолчанию",
- "defaultDescription": "Прямая транскрипция вашей речи",
- "prompt": "Улучшение запроса",
- "promptDescription": "Преобразование грубых идей в четкие, детальные AI-запросы",
- "vibe": "Режим Vibe",
- "vibeDescription": "Форматирование идей как четких инструкций агента с деталями"
- }
}
},
"terminalShortcuts": {
@@ -471,4 +460,4 @@
"tab": "вкладка",
"runningStatus": "запущен"
}
-}
\ No newline at end of file
+}
diff --git a/src/i18n/locales/zh-CN/settings.json b/src/i18n/locales/zh-CN/settings.json
index d9f2b2cd..83bb293d 100644
--- a/src/i18n/locales/zh-CN/settings.json
+++ b/src/i18n/locales/zh-CN/settings.json
@@ -55,8 +55,7 @@
"appearance": "外观",
"toolDisplay": "工具显示",
"viewOptions": "视图选项",
- "inputSettings": "输入设置",
- "whisperDictation": "Whisper 听写"
+ "inputSettings": "输入设置"
},
"darkMode": "深色模式",
"autoExpandTools": "自动展开工具",
@@ -71,16 +70,6 @@
"openPanel": "打开设置面板",
"draggingStatus": "正在拖拽...",
"toggleAndMove": "点击切换,拖拽移动"
- },
- "whisper": {
- "modes": {
- "default": "默认模式",
- "defaultDescription": "直接转录您的语音",
- "prompt": "提示词增强",
- "promptDescription": "将粗略的想法转化为清晰、详细的 AI 提示词",
- "vibe": "Vibe 模式",
- "vibeDescription": "将想法格式化为带有详细说明的清晰智能体指令"
- }
}
},
"terminalShortcuts": {
@@ -492,4 +481,4 @@
"tab": "标签",
"runningStatus": "运行中"
}
-}
\ No newline at end of file
+}
diff --git a/src/utils/api.js b/src/utils/api.js
index 7c14a677..438cab82 100644
--- a/src/utils/api.js
+++ b/src/utils/api.js
@@ -147,13 +147,6 @@ export const api = {
headers: {}, // Let browser set Content-Type for FormData
}),
- transcribe: (formData) =>
- authenticatedFetch('/api/transcribe', {
- method: 'POST',
- body: formData,
- headers: {}, // Let browser set Content-Type for FormData
- }),
-
// TaskMaster endpoints
taskmaster: {
// Initialize TaskMaster in a project