mirror of
https://github.com/siteboon/claudecodeui.git
synced 2025-12-10 14:59:46 +00:00
Update package dependencies, add Git API routes, and implement audio transcription functionality. Introduce new components for Git management, enhance chat interface with microphone support, and improve UI elements for better user experience.
This commit is contained in:
156
server/index.js
156
server/index.js
@@ -28,8 +28,11 @@ const fs = require('fs').promises;
|
||||
const { spawn } = require('child_process');
|
||||
const os = require('os');
|
||||
const pty = require('node-pty');
|
||||
const fetch = require('node-fetch');
|
||||
|
||||
const { getProjects, getSessions, getSessionMessages, renameProject, deleteSession, deleteProject, addProjectManually } = require('./projects');
|
||||
const { spawnClaude, abortClaudeSession } = require('./claude-cli');
|
||||
const gitRoutes = require('./routes/git');
|
||||
|
||||
// File system watcher for projects folder
|
||||
let projectsWatcher = null;
|
||||
@@ -144,6 +147,9 @@ app.use(cors());
|
||||
app.use(express.json());
|
||||
app.use(express.static(path.join(__dirname, '../dist')));
|
||||
|
||||
// Git API Routes
|
||||
app.use('/api/git', gitRoutes);
|
||||
|
||||
// API Routes
|
||||
app.get('/api/config', (req, res) => {
|
||||
// Always use the server's actual IP and port for WebSocket connections
|
||||
@@ -651,6 +657,156 @@ function handleShellConnection(ws) {
|
||||
console.error('❌ Shell WebSocket error:', error);
|
||||
});
|
||||
}
|
||||
// Audio transcription endpoint
|
||||
app.post('/api/transcribe', async (req, res) => {
|
||||
try {
|
||||
const multer = require('multer');
|
||||
const upload = multer({ storage: multer.memoryStorage() });
|
||||
|
||||
// Handle multipart form data
|
||||
upload.single('audio')(req, res, async (err) => {
|
||||
if (err) {
|
||||
return res.status(400).json({ error: 'Failed to process audio file' });
|
||||
}
|
||||
|
||||
if (!req.file) {
|
||||
return res.status(400).json({ error: 'No audio file provided' });
|
||||
}
|
||||
|
||||
const apiKey = process.env.OPENAI_API_KEY;
|
||||
if (!apiKey) {
|
||||
return res.status(500).json({ error: 'OpenAI API key not configured. Please set OPENAI_API_KEY in server environment.' });
|
||||
}
|
||||
|
||||
try {
|
||||
// Create form data for OpenAI
|
||||
const FormData = require('form-data');
|
||||
const formData = new FormData();
|
||||
formData.append('file', req.file.buffer, {
|
||||
filename: req.file.originalname,
|
||||
contentType: req.file.mimetype
|
||||
});
|
||||
formData.append('model', 'whisper-1');
|
||||
formData.append('response_format', 'json');
|
||||
formData.append('language', 'en');
|
||||
|
||||
// Make request to OpenAI
|
||||
const fetch = require('node-fetch');
|
||||
const response = await fetch('https://api.openai.com/v1/audio/transcriptions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Authorization': `Bearer ${apiKey}`,
|
||||
...formData.getHeaders()
|
||||
},
|
||||
body: formData
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({}));
|
||||
throw new Error(errorData.error?.message || `Whisper API error: ${response.status}`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
let transcribedText = data.text || '';
|
||||
|
||||
// Check if enhancement mode is enabled
|
||||
const mode = req.body.mode || 'default';
|
||||
|
||||
// If no transcribed text, return empty
|
||||
if (!transcribedText) {
|
||||
return res.json({ text: '' });
|
||||
}
|
||||
|
||||
// If default mode, return transcribed text without enhancement
|
||||
if (mode === 'default') {
|
||||
return res.json({ text: transcribedText });
|
||||
}
|
||||
|
||||
// Handle different enhancement modes
|
||||
try {
|
||||
const OpenAI = require('openai');
|
||||
const openai = new OpenAI({ apiKey });
|
||||
|
||||
let prompt, systemMessage, temperature = 0.7, maxTokens = 800;
|
||||
|
||||
switch (mode) {
|
||||
case 'prompt':
|
||||
systemMessage = 'You are an expert prompt engineer who creates clear, detailed, and effective prompts.';
|
||||
prompt = `You are an expert prompt engineer. Transform the following rough instruction into a clear, detailed, and context-aware AI prompt.
|
||||
|
||||
Your enhanced prompt should:
|
||||
1. Be specific and unambiguous
|
||||
2. Include relevant context and constraints
|
||||
3. Specify the desired output format
|
||||
4. Use clear, actionable language
|
||||
5. Include examples where helpful
|
||||
6. Consider edge cases and potential ambiguities
|
||||
|
||||
Transform this rough instruction into a well-crafted prompt:
|
||||
"${transcribedText}"
|
||||
|
||||
Enhanced prompt:`;
|
||||
break;
|
||||
|
||||
case 'vibe':
|
||||
case 'instructions':
|
||||
case 'architect':
|
||||
systemMessage = 'You are a helpful assistant that formats ideas into clear, actionable instructions for AI agents.';
|
||||
temperature = 0.5; // Lower temperature for more controlled output
|
||||
prompt = `Transform the following idea into clear, well-structured instructions that an AI agent can easily understand and execute.
|
||||
|
||||
IMPORTANT RULES:
|
||||
- Format as clear, step-by-step instructions
|
||||
- Add reasonable implementation details based on common patterns
|
||||
- Only include details directly related to what was asked
|
||||
- Do NOT add features or functionality not mentioned
|
||||
- Keep the original intent and scope intact
|
||||
- Use clear, actionable language an agent can follow
|
||||
|
||||
Transform this idea into agent-friendly instructions:
|
||||
"${transcribedText}"
|
||||
|
||||
Agent instructions:`;
|
||||
break;
|
||||
|
||||
default:
|
||||
// No enhancement needed
|
||||
break;
|
||||
}
|
||||
|
||||
// Only make GPT call if we have a prompt
|
||||
if (prompt) {
|
||||
const completion = await openai.chat.completions.create({
|
||||
model: 'gpt-4o-mini',
|
||||
messages: [
|
||||
{ role: 'system', content: systemMessage },
|
||||
{ role: 'user', content: prompt }
|
||||
],
|
||||
temperature: temperature,
|
||||
max_tokens: maxTokens
|
||||
});
|
||||
|
||||
transcribedText = completion.choices[0].message.content || transcribedText;
|
||||
}
|
||||
|
||||
} catch (gptError) {
|
||||
console.error('GPT processing error:', gptError);
|
||||
// Fall back to original transcription if GPT fails
|
||||
}
|
||||
|
||||
res.json({ text: transcribedText });
|
||||
|
||||
} catch (error) {
|
||||
console.error('Transcription error:', error);
|
||||
res.status(500).json({ error: error.message });
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
console.error('Endpoint error:', error);
|
||||
res.status(500).json({ error: 'Internal server error' });
|
||||
}
|
||||
});
|
||||
|
||||
|
||||
// Serve React app for all other routes
|
||||
app.get('*', (req, res) => {
|
||||
|
||||
Reference in New Issue
Block a user