refactor: remove unused whispher transcribe logic (#637)

This commit is contained in:
Haile
2026-04-10 16:34:34 +03:00
committed by GitHub
parent 2207d05c1c
commit 590dd42649
23 changed files with 12 additions and 855 deletions

View File

@@ -878,30 +878,6 @@ export function useChatComposerState({
});
}, [canAbortSession, currentSessionId, pendingViewSessionRef, provider, selectedSession?.id, sendMessage]);
const handleTranscript = useCallback((text: string) => {
if (!text.trim()) {
return;
}
setInput((previousInput) => {
const newInput = previousInput.trim() ? `${previousInput} ${text}` : text;
inputValueRef.current = newInput;
setTimeout(() => {
if (!textareaRef.current) {
return;
}
textareaRef.current.style.height = 'auto';
textareaRef.current.style.height = `${textareaRef.current.scrollHeight}px`;
const lineHeight = parseInt(window.getComputedStyle(textareaRef.current).lineHeight);
setIsTextareaExpanded(textareaRef.current.scrollHeight > lineHeight * 2);
}, 0);
return newInput;
});
}, []);
const handleGrantToolPermission = useCallback(
(suggestion: { entry: string; toolName: string }) => {
if (!suggestion || provider !== 'claude') {
@@ -994,7 +970,6 @@ export function useChatComposerState({
syncInputOverlayScroll,
handleClearInput,
handleAbortSession,
handleTranscript,
handlePermissionDecision,
handleGrantToolPermission,
handleInputFocusChange,

View File

@@ -165,7 +165,6 @@ function ChatInterface({
syncInputOverlayScroll,
handleClearInput,
handleAbortSession,
handleTranscript,
handlePermissionDecision,
handleGrantToolPermission,
handleInputFocusChange,
@@ -407,7 +406,6 @@ function ChatInterface({
})}
isTextareaExpanded={isTextareaExpanded}
sendByCtrlEnter={sendByCtrlEnter}
onTranscript={handleTranscript}
/>
</div>

View File

@@ -11,7 +11,6 @@ import type {
SetStateAction,
TouchEvent,
} from 'react';
import MicButton from '../../../mic-button/view/MicButton';
import type { PendingPermissionRequest, PermissionMode, Provider } from '../../types/types';
import CommandMenu from './CommandMenu';
import ClaudeStatus from './ClaudeStatus';
@@ -91,7 +90,6 @@ interface ChatComposerProps {
placeholder: string;
isTextareaExpanded: boolean;
sendByCtrlEnter?: boolean;
onTranscript: (text: string) => void;
}
export default function ChatComposer({
@@ -148,7 +146,6 @@ export default function ChatComposer({
placeholder,
isTextareaExpanded,
sendByCtrlEnter,
onTranscript,
}: ChatComposerProps) {
const { t } = useTranslation('chat');
const textareaRect = textareaRef.current?.getBoundingClientRect();
@@ -321,10 +318,6 @@ export default function ChatComposer({
</svg>
</button>
<div className="absolute right-16 top-1/2 -translate-y-1/2 transform sm:right-16" style={{ display: 'none' }}>
<MicButton onTranscript={onTranscript} className="h-10 w-10 sm:h-10 sm:w-10" />
</div>
<button
type="submit"
disabled={!input.trim() || isLoading}

View File

@@ -1,6 +1,5 @@
import { Check, ChevronDown, GitCommit, RefreshCw, Sparkles } from 'lucide-react';
import { useState } from 'react';
import MicButton from '../../../mic-button/view/MicButton';
import type { ConfirmationRequest } from '../../types/types';
// Persists commit messages across unmount/remount, keyed by project path
@@ -147,13 +146,6 @@ export default function CommitComposer({
<Sparkles className="h-4 w-4" />
)}
</button>
<div style={{ display: 'none' }}>
<MicButton
onTranscript={(transcript) => setCommitMessage(transcript)}
mode="default"
className="p-1.5"
/>
</div>
</div>
</div>

View File

@@ -1,45 +0,0 @@
import type { MicButtonState } from '../types/types';
export const MIC_BUTTON_STATES = {
IDLE: 'idle',
RECORDING: 'recording',
TRANSCRIBING: 'transcribing',
PROCESSING: 'processing',
} as const;
export const MIC_TAP_DEBOUNCE_MS = 300;
export const PROCESSING_STATE_DELAY_MS = 2000;
export const DEFAULT_WHISPER_MODE = 'default';
// Modes that use post-transcription enhancement on the backend.
export const ENHANCEMENT_WHISPER_MODES = new Set([
'prompt',
'vibe',
'instructions',
'architect',
]);
export const BUTTON_BACKGROUND_BY_STATE: Record<MicButtonState, string> = {
idle: '#374151',
recording: '#ef4444',
transcribing: '#3b82f6',
processing: '#a855f7',
};
export const MIC_ERROR_BY_NAME = {
NotAllowedError: 'Microphone access denied. Please allow microphone permissions.',
NotFoundError: 'No microphone found. Please check your audio devices.',
NotSupportedError: 'Microphone not supported by this browser.',
NotReadableError: 'Microphone is being used by another application.',
} as const;
export const MIC_NOT_AVAILABLE_ERROR =
'Microphone access not available. Please use HTTPS or a supported browser.';
export const MIC_NOT_SUPPORTED_ERROR =
'Microphone not supported. Please use HTTPS or a modern browser.';
export const MIC_SECURE_CONTEXT_ERROR =
'Microphone requires HTTPS. Please use a secure connection.';

View File

@@ -1,52 +0,0 @@
import { api } from '../../../utils/api';
type WhisperStatus = 'transcribing';
type WhisperResponse = {
text?: string;
error?: string;
};
export async function transcribeWithWhisper(
audioBlob: Blob,
onStatusChange?: (status: WhisperStatus) => void,
): Promise<string> {
const formData = new FormData();
const fileName = `recording_${Date.now()}.webm`;
const file = new File([audioBlob], fileName, { type: audioBlob.type });
formData.append('audio', file);
const whisperMode = window.localStorage.getItem('whisperMode') || 'default';
formData.append('mode', whisperMode);
try {
// Keep existing status callback behavior.
if (onStatusChange) {
onStatusChange('transcribing');
}
const response = (await api.transcribe(formData)) as Response;
if (!response.ok) {
const errorData = (await response.json().catch(() => ({}))) as WhisperResponse;
throw new Error(
errorData.error ||
`Transcription error: ${response.status} ${response.statusText}`,
);
}
const data = (await response.json()) as WhisperResponse;
return data.text || '';
} catch (error) {
if (
error instanceof Error
&& error.name === 'TypeError'
&& error.message.includes('fetch')
) {
throw new Error('Cannot connect to server. Please ensure the backend is running.');
}
throw error;
}
}

View File

@@ -1,204 +0,0 @@
import { useEffect, useRef, useState } from 'react';
import type { MouseEvent } from 'react';
import { transcribeWithWhisper } from '../data/whisper';
import {
DEFAULT_WHISPER_MODE,
ENHANCEMENT_WHISPER_MODES,
MIC_BUTTON_STATES,
MIC_ERROR_BY_NAME,
MIC_NOT_AVAILABLE_ERROR,
MIC_NOT_SUPPORTED_ERROR,
MIC_SECURE_CONTEXT_ERROR,
MIC_TAP_DEBOUNCE_MS,
PROCESSING_STATE_DELAY_MS,
} from '../constants/constants';
import type { MicButtonState } from '../types/types';
type UseMicButtonControllerArgs = {
onTranscript?: (transcript: string) => void;
};
type UseMicButtonControllerResult = {
state: MicButtonState;
error: string | null;
isSupported: boolean;
handleButtonClick: (event?: MouseEvent<HTMLButtonElement>) => void;
};
const getRecordingErrorMessage = (error: unknown): string => {
if (error instanceof Error && error.message.includes('HTTPS')) {
return error.message;
}
if (error instanceof DOMException) {
return MIC_ERROR_BY_NAME[error.name as keyof typeof MIC_ERROR_BY_NAME] || 'Microphone access failed';
}
return 'Microphone access failed';
};
const getRecorderMimeType = (): string => (
MediaRecorder.isTypeSupported('audio/webm') ? 'audio/webm' : 'audio/mp4'
);
export function useMicButtonController({
onTranscript,
}: UseMicButtonControllerArgs): UseMicButtonControllerResult {
const [state, setState] = useState<MicButtonState>(MIC_BUTTON_STATES.IDLE);
const [error, setError] = useState<string | null>(null);
const [isSupported, setIsSupported] = useState(true);
const mediaRecorderRef = useRef<MediaRecorder | null>(null);
const streamRef = useRef<MediaStream | null>(null);
const chunksRef = useRef<BlobPart[]>([]);
const lastTapRef = useRef(0);
const processingTimerRef = useRef<number | null>(null);
const clearProcessingTimer = (): void => {
if (processingTimerRef.current !== null) {
window.clearTimeout(processingTimerRef.current);
processingTimerRef.current = null;
}
};
const stopStreamTracks = (): void => {
if (!streamRef.current) {
return;
}
streamRef.current.getTracks().forEach((track) => track.stop());
streamRef.current = null;
};
const handleStopRecording = async (mimeType: string): Promise<void> => {
const audioBlob = new Blob(chunksRef.current, { type: mimeType });
// Release the microphone immediately once recording ends.
stopStreamTracks();
setState(MIC_BUTTON_STATES.TRANSCRIBING);
const whisperMode = window.localStorage.getItem('whisperMode') || DEFAULT_WHISPER_MODE;
const shouldShowProcessingState = ENHANCEMENT_WHISPER_MODES.has(whisperMode);
if (shouldShowProcessingState) {
processingTimerRef.current = window.setTimeout(() => {
setState(MIC_BUTTON_STATES.PROCESSING);
}, PROCESSING_STATE_DELAY_MS);
}
try {
const transcript = await transcribeWithWhisper(audioBlob);
if (transcript && onTranscript) {
onTranscript(transcript);
}
} catch (transcriptionError) {
const message = transcriptionError instanceof Error ? transcriptionError.message : 'Transcription error';
setError(message);
} finally {
clearProcessingTimer();
setState(MIC_BUTTON_STATES.IDLE);
}
};
const startRecording = async (): Promise<void> => {
try {
setError(null);
chunksRef.current = [];
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
throw new Error(MIC_NOT_AVAILABLE_ERROR);
}
const stream = await navigator.mediaDevices.getUserMedia({ audio: true });
streamRef.current = stream;
const mimeType = getRecorderMimeType();
const recorder = new MediaRecorder(stream, { mimeType });
mediaRecorderRef.current = recorder;
recorder.ondataavailable = (event: BlobEvent) => {
if (event.data.size > 0) {
chunksRef.current.push(event.data);
}
};
recorder.onstop = () => {
void handleStopRecording(mimeType);
};
recorder.start();
setState(MIC_BUTTON_STATES.RECORDING);
} catch (recordingError) {
stopStreamTracks();
setError(getRecordingErrorMessage(recordingError));
setState(MIC_BUTTON_STATES.IDLE);
}
};
const stopRecording = (): void => {
if (mediaRecorderRef.current && mediaRecorderRef.current.state === 'recording') {
mediaRecorderRef.current.stop();
return;
}
stopStreamTracks();
setState(MIC_BUTTON_STATES.IDLE);
};
const handleButtonClick = (event?: MouseEvent<HTMLButtonElement>): void => {
if (event) {
event.preventDefault();
event.stopPropagation();
}
if (!isSupported) {
return;
}
// Mobile tap handling can trigger duplicate click events in quick succession.
const now = Date.now();
if (now - lastTapRef.current < MIC_TAP_DEBOUNCE_MS) {
return;
}
lastTapRef.current = now;
if (state === MIC_BUTTON_STATES.IDLE) {
void startRecording();
return;
}
if (state === MIC_BUTTON_STATES.RECORDING) {
stopRecording();
}
};
useEffect(() => {
// getUserMedia needs both browser support and a secure context.
if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
setIsSupported(false);
setError(MIC_NOT_SUPPORTED_ERROR);
return;
}
if (location.protocol !== 'https:' && location.hostname !== 'localhost') {
setIsSupported(false);
setError(MIC_SECURE_CONTEXT_ERROR);
return;
}
setIsSupported(true);
setError(null);
}, []);
useEffect(() => () => {
clearProcessingTimer();
stopStreamTracks();
}, []);
return {
state,
error,
isSupported,
handleButtonClick,
};
}

View File

@@ -1,2 +0,0 @@
export type MicButtonState = 'idle' | 'recording' | 'transcribing' | 'processing';

View File

@@ -1,32 +0,0 @@
import { useMicButtonController } from '../hooks/useMicButtonController';
import MicButtonView from './MicButtonView';
type MicButtonProps = {
onTranscript?: (transcript: string) => void;
className?: string;
mode?: string;
};
export default function MicButton({
onTranscript,
className = '',
mode: _mode,
}: MicButtonProps) {
const { state, error, isSupported, handleButtonClick } = useMicButtonController({
onTranscript,
});
// Keep `mode` in the public props for backwards compatibility.
void _mode;
return (
<MicButtonView
state={state}
error={error}
isSupported={isSupported}
className={className}
onButtonClick={handleButtonClick}
/>
);
}

View File

@@ -1,86 +0,0 @@
import { Brain, Loader2, Mic } from 'lucide-react';
import type { MouseEvent, ReactElement } from 'react';
import { BUTTON_BACKGROUND_BY_STATE, MIC_BUTTON_STATES } from '../constants/constants';
import type { MicButtonState } from '../types/types';
type MicButtonViewProps = {
state: MicButtonState;
error: string | null;
isSupported: boolean;
className: string;
onButtonClick: (event?: MouseEvent<HTMLButtonElement>) => void;
};
const getButtonIcon = (state: MicButtonState, isSupported: boolean): ReactElement => {
if (!isSupported) {
return <Mic className="h-5 w-5" />;
}
if (state === MIC_BUTTON_STATES.TRANSCRIBING) {
return <Loader2 className="h-5 w-5 animate-spin" />;
}
if (state === MIC_BUTTON_STATES.PROCESSING) {
return <Brain className="h-5 w-5 animate-pulse" />;
}
if (state === MIC_BUTTON_STATES.RECORDING) {
return <Mic className="h-5 w-5 text-white" />;
}
return <Mic className="h-5 w-5" />;
};
export default function MicButtonView({
state,
error,
isSupported,
className,
onButtonClick,
}: MicButtonViewProps) {
const isDisabled = !isSupported || state === MIC_BUTTON_STATES.TRANSCRIBING || state === MIC_BUTTON_STATES.PROCESSING;
const icon = getButtonIcon(state, isSupported);
return (
<div className="relative">
<button
type="button"
style={{ backgroundColor: BUTTON_BACKGROUND_BY_STATE[state] }}
className={`
touch-action-manipulation flex h-12
w-12 items-center justify-center
rounded-full text-white transition-all
duration-200 focus:outline-none focus:ring-2 focus:ring-blue-500
focus:ring-offset-2
dark:ring-offset-gray-800
${isDisabled ? 'cursor-not-allowed opacity-75' : 'cursor-pointer'}
${state === MIC_BUTTON_STATES.RECORDING ? 'animate-pulse' : ''}
hover:opacity-90
${className}
`}
onClick={onButtonClick}
disabled={isDisabled}
>
{icon}
</button>
{error && (
<div
className="animate-fade-in absolute left-1/2 top-full z-10 mt-2
-translate-x-1/2 transform whitespace-nowrap rounded bg-red-500 px-2 py-1 text-xs
text-white"
>
{error}
</div>
)}
{state === MIC_BUTTON_STATES.RECORDING && (
<div className="pointer-events-none absolute -inset-1 animate-ping rounded-full border-2 border-red-500" />
)}
{state === MIC_BUTTON_STATES.PROCESSING && (
<div className="pointer-events-none absolute -inset-1 animate-ping rounded-full border-2 border-purple-500" />
)}
</div>
);
}

View File

@@ -2,21 +2,12 @@ import {
ArrowDown,
Brain,
Eye,
FileText,
Languages,
Maximize2,
Mic,
Sparkles,
} from 'lucide-react';
import type {
PreferenceToggleItem,
WhisperMode,
WhisperOption,
} from './types';
import type { PreferenceToggleItem } from './types';
export const HANDLE_POSITION_STORAGE_KEY = 'quickSettingsHandlePosition';
export const WHISPER_MODE_STORAGE_KEY = 'whisperMode';
export const WHISPER_MODE_CHANGED_EVENT = 'whisperModeChanged';
export const DEFAULT_HANDLE_POSITION = 50;
export const HANDLE_POSITION_MIN = 10;
@@ -64,30 +55,3 @@ export const INPUT_SETTING_TOGGLES: PreferenceToggleItem[] = [
icon: Languages,
},
];
export const WHISPER_OPTIONS: WhisperOption[] = [
{
value: 'default',
titleKey: 'quickSettings.whisper.modes.default',
descriptionKey: 'quickSettings.whisper.modes.defaultDescription',
icon: Mic,
},
{
value: 'prompt',
titleKey: 'quickSettings.whisper.modes.prompt',
descriptionKey: 'quickSettings.whisper.modes.promptDescription',
icon: Sparkles,
},
{
value: 'vibe',
titleKey: 'quickSettings.whisper.modes.vibe',
descriptionKey: 'quickSettings.whisper.modes.vibeDescription',
icon: FileText,
},
];
export const VIBE_MODE_ALIASES: WhisperMode[] = [
'vibe',
'instructions',
'architect',
];

View File

@@ -1,59 +0,0 @@
import { useCallback, useState } from 'react';
import {
VIBE_MODE_ALIASES,
WHISPER_MODE_CHANGED_EVENT,
WHISPER_MODE_STORAGE_KEY,
} from '../constants';
import type { WhisperMode, WhisperOptionValue } from '../types';
const ALL_VALID_MODES: WhisperMode[] = [
'default',
'prompt',
'vibe',
'instructions',
'architect',
];
const isWhisperMode = (value: string): value is WhisperMode => (
ALL_VALID_MODES.includes(value as WhisperMode)
);
const readStoredMode = (): WhisperMode => {
if (typeof window === 'undefined') {
return 'default';
}
const storedValue = localStorage.getItem(WHISPER_MODE_STORAGE_KEY);
if (!storedValue) {
return 'default';
}
return isWhisperMode(storedValue) ? storedValue : 'default';
};
export function useWhisperMode() {
const [whisperMode, setWhisperModeState] = useState<WhisperMode>(readStoredMode);
const setWhisperMode = useCallback((value: WhisperOptionValue) => {
setWhisperModeState(value);
localStorage.setItem(WHISPER_MODE_STORAGE_KEY, value);
window.dispatchEvent(new Event(WHISPER_MODE_CHANGED_EVENT));
}, []);
const isOptionSelected = useCallback(
(value: WhisperOptionValue) => {
if (value === 'vibe') {
return VIBE_MODE_ALIASES.includes(whisperMode);
}
return whisperMode === value;
},
[whisperMode],
);
return {
whisperMode,
setWhisperMode,
isOptionSelected,
};
}

View File

@@ -16,20 +16,4 @@ export type PreferenceToggleItem = {
icon: LucideIcon;
};
export type WhisperMode =
| 'default'
| 'prompt'
| 'vibe'
| 'instructions'
| 'architect';
export type WhisperOptionValue = 'default' | 'prompt' | 'vibe';
export type WhisperOption = {
value: WhisperOptionValue;
titleKey: string;
descriptionKey: string;
icon: LucideIcon;
};
export type QuickSettingsHandleStyle = CSSProperties;

View File

@@ -15,7 +15,6 @@ import type {
} from '../types';
import QuickSettingsSection from './QuickSettingsSection';
import QuickSettingsToggleRow from './QuickSettingsToggleRow';
import QuickSettingsWhisperSection from './QuickSettingsWhisperSection';
type QuickSettingsContentProps = {
isDarkMode: boolean;
@@ -73,8 +72,6 @@ export default function QuickSettingsContent({
{t('quickSettings.sendByCtrlEnterDescription')}
</p>
</QuickSettingsSection>
<QuickSettingsWhisperSection />
</div>
);
}

View File

@@ -1,44 +0,0 @@
import { useTranslation } from 'react-i18next';
import { TOGGLE_ROW_CLASS, WHISPER_OPTIONS } from '../constants';
import { useWhisperMode } from '../hooks/useWhisperMode';
import QuickSettingsSection from './QuickSettingsSection';
export default function QuickSettingsWhisperSection() {
const { t } = useTranslation('settings');
const { setWhisperMode, isOptionSelected } = useWhisperMode();
return (
// This section stays hidden intentionally until dictation modes are reintroduced.
<QuickSettingsSection
title={t('quickSettings.sections.whisperDictation')}
className="hidden"
>
<div className="space-y-2">
{WHISPER_OPTIONS.map(({ value, icon: Icon, titleKey, descriptionKey }) => (
<label
key={value}
className={`${TOGGLE_ROW_CLASS} flex items-start`}
>
<input
type="radio"
name="whisperMode"
value={value}
checked={isOptionSelected(value)}
onChange={() => setWhisperMode(value)}
className="mt-0.5 h-4 w-4 border-gray-300 text-blue-600 focus:ring-blue-500 dark:border-gray-600 dark:bg-gray-800 dark:text-blue-500 dark:checked:bg-blue-600 dark:focus:ring-blue-400"
/>
<div className="ml-3 flex-1">
<span className="flex items-center gap-2 text-sm font-medium text-gray-900 dark:text-white">
<Icon className="h-4 w-4 text-gray-600 dark:text-gray-400" />
{t(titleKey)}
</span>
<p className="mt-1 text-xs text-gray-500 dark:text-gray-400">
{t(descriptionKey)}
</p>
</div>
</label>
))}
</div>
</QuickSettingsSection>
);
}

View File

@@ -55,8 +55,7 @@
"appearance": "Darstellung",
"toolDisplay": "Werkzeuganzeige",
"viewOptions": "Anzeigeoptionen",
"inputSettings": "Eingabeeinstellungen",
"whisperDictation": "Whisper-Diktat"
"inputSettings": "Eingabeeinstellungen"
},
"darkMode": "Darkmode",
"autoExpandTools": "Werkzeuge automatisch erweitern",
@@ -71,16 +70,6 @@
"openPanel": "Einstellungspanel öffnen",
"draggingStatus": "Wird gezogen...",
"toggleAndMove": "Klicken zum Umschalten, ziehen zum Verschieben"
},
"whisper": {
"modes": {
"default": "Standardmodus",
"defaultDescription": "Direkte Transkription deiner Sprache",
"prompt": "Prompt-Verbesserung",
"promptDescription": "Rohe Ideen in klare, detaillierte KI-Prompts umwandeln",
"vibe": "Vibe-Modus",
"vibeDescription": "Ideen als klare Agentenanweisungen mit Details formatieren"
}
}
},
"terminalShortcuts": {

View File

@@ -55,8 +55,7 @@
"appearance": "Appearance",
"toolDisplay": "Tool Display",
"viewOptions": "View Options",
"inputSettings": "Input Settings",
"whisperDictation": "Whisper Dictation"
"inputSettings": "Input Settings"
},
"darkMode": "Dark Mode",
"autoExpandTools": "Auto-expand tools",
@@ -71,16 +70,6 @@
"openPanel": "Open settings panel",
"draggingStatus": "Dragging...",
"toggleAndMove": "Click to toggle, drag to move"
},
"whisper": {
"modes": {
"default": "Default Mode",
"defaultDescription": "Direct transcription of your speech",
"prompt": "Prompt Enhancement",
"promptDescription": "Transform rough ideas into clear, detailed AI prompts",
"vibe": "Vibe Mode",
"vibeDescription": "Format ideas as clear agent instructions with details"
}
}
},
"terminalShortcuts": {
@@ -498,4 +487,4 @@
"tab": "tab",
"runningStatus": "running"
}
}
}

View File

@@ -55,8 +55,7 @@
"appearance": "外観",
"toolDisplay": "ツール表示",
"viewOptions": "表示オプション",
"inputSettings": "入力設定",
"whisperDictation": "Whisper音声入力"
"inputSettings": "入力設定"
},
"darkMode": "ダークモード",
"autoExpandTools": "ツールを自動展開",
@@ -71,16 +70,6 @@
"openPanel": "設定パネルを開く",
"draggingStatus": "ドラッグ中...",
"toggleAndMove": "クリックで切替、ドラッグで移動"
},
"whisper": {
"modes": {
"default": "標準モード",
"defaultDescription": "音声をそのまま文字起こしします",
"prompt": "プロンプト強化",
"promptDescription": "ラフなアイデアを明確で詳細なAIプロンプトに変換します",
"vibe": "バイブモード",
"vibeDescription": "アイデアを明確なエージェント指示に整形します"
}
}
},
"terminalShortcuts": {
@@ -492,4 +481,4 @@
"tab": "タブ",
"runningStatus": "実行中"
}
}
}

View File

@@ -55,8 +55,7 @@
"appearance": "외관",
"toolDisplay": "도구 표시",
"viewOptions": "보기 옵션",
"inputSettings": "입력 설정",
"whisperDictation": "Whisper 음성 인식"
"inputSettings": "입력 설정"
},
"darkMode": "다크 모드",
"autoExpandTools": "도구 자동 펼치기",
@@ -71,16 +70,6 @@
"openPanel": "설정 패널 열기",
"draggingStatus": "드래그 중...",
"toggleAndMove": "클릭하여 토글, 드래그하여 이동"
},
"whisper": {
"modes": {
"default": "기본 모드",
"defaultDescription": "음성을 그대로 텍스트로 변환",
"prompt": "프롬프트 향상",
"promptDescription": "거친 아이디어를 명확하고 상세한 AI 프롬프트로 변환",
"vibe": "Vibe 모드",
"vibeDescription": "아이디어를 상세한 에이전트 지침 형식으로 변환"
}
}
},
"terminalShortcuts": {
@@ -492,4 +481,4 @@
"tab": "탭",
"runningStatus": "실행 중"
}
}
}

View File

@@ -55,8 +55,7 @@
"appearance": "Внешний вид",
"toolDisplay": "Отображение инструментов",
"viewOptions": "Параметры просмотра",
"inputSettings": "Настройки ввода",
"whisperDictation": "Диктовка Whisper"
"inputSettings": "Настройки ввода"
},
"darkMode": "Темная тема",
"autoExpandTools": "Автоматически разворачивать инструменты",
@@ -71,16 +70,6 @@
"openPanel": "Открыть панель настроек",
"draggingStatus": "Перетаскивание...",
"toggleAndMove": "Нажмите для переключения, перетащите для перемещения"
},
"whisper": {
"modes": {
"default": "Режим по умолчанию",
"defaultDescription": "Прямая транскрипция вашей речи",
"prompt": "Улучшение запроса",
"promptDescription": "Преобразование грубых идей в четкие, детальные AI-запросы",
"vibe": "Режим Vibe",
"vibeDescription": "Форматирование идей как четких инструкций агента с деталями"
}
}
},
"terminalShortcuts": {
@@ -472,4 +461,4 @@
"tab": "вкладка",
"runningStatus": "запущен"
}
}
}

View File

@@ -55,8 +55,7 @@
"appearance": "外观",
"toolDisplay": "工具显示",
"viewOptions": "视图选项",
"inputSettings": "输入设置",
"whisperDictation": "Whisper 听写"
"inputSettings": "输入设置"
},
"darkMode": "深色模式",
"autoExpandTools": "自动展开工具",
@@ -71,16 +70,6 @@
"openPanel": "打开设置面板",
"draggingStatus": "正在拖拽...",
"toggleAndMove": "点击切换,拖拽移动"
},
"whisper": {
"modes": {
"default": "默认模式",
"defaultDescription": "直接转录您的语音",
"prompt": "提示词增强",
"promptDescription": "将粗略的想法转化为清晰、详细的 AI 提示词",
"vibe": "Vibe 模式",
"vibeDescription": "将想法格式化为带有详细说明的清晰智能体指令"
}
}
},
"terminalShortcuts": {
@@ -492,4 +481,4 @@
"tab": "标签",
"runningStatus": "运行中"
}
}
}

View File

@@ -147,13 +147,6 @@ export const api = {
headers: {}, // Let browser set Content-Type for FormData
}),
transcribe: (formData) =>
authenticatedFetch('/api/transcribe', {
method: 'POST',
body: formData,
headers: {}, // Let browser set Content-Type for FormData
}),
// TaskMaster endpoints
taskmaster: {
// Initialize TaskMaster in a project