mirror of
https://github.com/siteboon/claudecodeui.git
synced 2026-04-17 19:11:33 +00:00
Merge branch 'main' into feature/backend-ts-support-andunification-of-auth-settings-on-frontend
This commit is contained in:
361
server/cli.js
361
server/cli.js
@@ -7,6 +7,7 @@
|
||||
* Commands:
|
||||
* (no args) - Start the server (default)
|
||||
* start - Start the server
|
||||
* sandbox - Manage Docker sandbox environments
|
||||
* status - Show configuration and data locations
|
||||
* help - Show help information
|
||||
* version - Show version information
|
||||
@@ -154,6 +155,7 @@ Usage:
|
||||
|
||||
Commands:
|
||||
start Start the CloudCLI server (default)
|
||||
sandbox Manage Docker sandbox environments
|
||||
status Show configuration and data locations
|
||||
update Update to the latest version
|
||||
help Show this help information
|
||||
@@ -168,8 +170,7 @@ Options:
|
||||
Examples:
|
||||
$ cloudcli # Start with defaults
|
||||
$ cloudcli --port 8080 # Start on port 8080
|
||||
$ cloudcli -p 3000 # Short form for port
|
||||
$ cloudcli start --port 4000 # Explicit start command
|
||||
$ cloudcli sandbox ~/my-project # Run in a Docker sandbox
|
||||
$ cloudcli status # Show configuration
|
||||
|
||||
Environment Variables:
|
||||
@@ -248,6 +249,353 @@ async function updatePackage() {
|
||||
}
|
||||
}
|
||||
|
||||
// ── Sandbox command ─────────────────────────────────────────
|
||||
|
||||
const SANDBOX_TEMPLATES = {
|
||||
claude: 'docker.io/cloudcliai/sandbox:claude-code',
|
||||
codex: 'docker.io/cloudcliai/sandbox:codex',
|
||||
gemini: 'docker.io/cloudcliai/sandbox:gemini',
|
||||
};
|
||||
|
||||
const SANDBOX_SECRETS = {
|
||||
claude: 'anthropic',
|
||||
codex: 'openai',
|
||||
gemini: 'google',
|
||||
};
|
||||
|
||||
function parseSandboxArgs(args) {
|
||||
const result = {
|
||||
subcommand: null,
|
||||
workspace: null,
|
||||
agent: 'claude',
|
||||
name: null,
|
||||
port: 3001,
|
||||
template: null,
|
||||
env: [],
|
||||
};
|
||||
|
||||
const subcommands = ['ls', 'stop', 'start', 'rm', 'logs', 'help'];
|
||||
|
||||
for (let i = 0; i < args.length; i++) {
|
||||
const arg = args[i];
|
||||
|
||||
if (i === 0 && subcommands.includes(arg)) {
|
||||
result.subcommand = arg;
|
||||
} else if (arg === '--agent' || arg === '-a') {
|
||||
result.agent = args[++i];
|
||||
} else if (arg === '--name' || arg === '-n') {
|
||||
result.name = args[++i];
|
||||
} else if (arg === '--port') {
|
||||
result.port = parseInt(args[++i], 10);
|
||||
} else if (arg === '--template' || arg === '-t') {
|
||||
result.template = args[++i];
|
||||
} else if (arg === '--env' || arg === '-e') {
|
||||
result.env.push(args[++i]);
|
||||
} else if (!arg.startsWith('-')) {
|
||||
if (!result.subcommand) {
|
||||
result.workspace = arg;
|
||||
} else {
|
||||
result.name = arg; // for stop/start/rm/logs <name>
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Default subcommand based on what we got
|
||||
if (!result.subcommand) {
|
||||
result.subcommand = 'create';
|
||||
}
|
||||
|
||||
// Derive name from workspace path if not set
|
||||
if (!result.name && result.workspace) {
|
||||
result.name = path.basename(path.resolve(result.workspace.replace(/^~/, os.homedir())));
|
||||
}
|
||||
|
||||
// Default template from agent
|
||||
if (!result.template) {
|
||||
result.template = SANDBOX_TEMPLATES[result.agent] || SANDBOX_TEMPLATES.claude;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
function showSandboxHelp() {
|
||||
console.log(`
|
||||
${c.bright('CloudCLI Sandbox')} — Run CloudCLI inside Docker Sandboxes
|
||||
|
||||
Usage:
|
||||
cloudcli sandbox <workspace> Create and start a sandbox
|
||||
cloudcli sandbox <subcommand> [name] Manage sandboxes
|
||||
|
||||
Subcommands:
|
||||
${c.bright('(default)')} Create a sandbox and start the web UI
|
||||
${c.bright('ls')} List all sandboxes
|
||||
${c.bright('start')} Restart a stopped sandbox and re-launch the web UI
|
||||
${c.bright('stop')} Stop a sandbox (preserves state)
|
||||
${c.bright('rm')} Remove a sandbox
|
||||
${c.bright('logs')} Show CloudCLI server logs
|
||||
${c.bright('help')} Show this help
|
||||
|
||||
Options:
|
||||
-a, --agent <agent> Agent to use: claude, codex, gemini (default: claude)
|
||||
-n, --name <name> Sandbox name (default: derived from workspace folder)
|
||||
-t, --template <image> Custom template image
|
||||
-e, --env <KEY=VALUE> Set environment variable (repeatable)
|
||||
--port <port> Host port for the web UI (default: 3001)
|
||||
|
||||
Examples:
|
||||
$ cloudcli sandbox ~/my-project
|
||||
$ cloudcli sandbox ~/my-project --agent codex --port 8080
|
||||
$ cloudcli sandbox ~/my-project --env SERVER_PORT=8080 --env HOST=0.0.0.0
|
||||
$ cloudcli sandbox ls
|
||||
$ cloudcli sandbox stop my-project
|
||||
$ cloudcli sandbox start my-project
|
||||
$ cloudcli sandbox rm my-project
|
||||
|
||||
Prerequisites:
|
||||
1. Install sbx CLI: https://docs.docker.com/ai/sandboxes/get-started/
|
||||
2. Authenticate and store your API key:
|
||||
sbx login
|
||||
sbx secret set -g anthropic # for Claude
|
||||
sbx secret set -g openai # for Codex
|
||||
sbx secret set -g google # for Gemini
|
||||
|
||||
Advanced usage:
|
||||
For branch mode, multiple workspaces, memory limits, network policies,
|
||||
or passing prompts to the agent, use sbx directly with the template:
|
||||
|
||||
sbx run --template docker.io/cloudcliai/sandbox:claude-code claude ~/my-project --branch my-feature
|
||||
sbx run --template docker.io/cloudcliai/sandbox:claude-code claude ~/project ~/libs:ro --memory 8g
|
||||
|
||||
Full Docker Sandboxes docs: https://docs.docker.com/ai/sandboxes/usage/
|
||||
`);
|
||||
}
|
||||
|
||||
async function sandboxCommand(args) {
|
||||
const { execFileSync, spawn: spawnProcess } = await import('child_process');
|
||||
|
||||
// Safe execution — uses execFileSync (no shell) to prevent injection
|
||||
const sbx = (subcmd, opts = {}) => {
|
||||
const result = execFileSync('sbx', subcmd, {
|
||||
encoding: 'utf8',
|
||||
stdio: opts.inherit ? 'inherit' : 'pipe',
|
||||
});
|
||||
return result || '';
|
||||
};
|
||||
|
||||
const opts = parseSandboxArgs(args);
|
||||
|
||||
if (opts.subcommand === 'help') {
|
||||
showSandboxHelp();
|
||||
return;
|
||||
}
|
||||
|
||||
// Validate name (alphanumeric, hyphens, underscores only)
|
||||
if (opts.name && !/^[\w-]+$/.test(opts.name)) {
|
||||
console.error(`\n${c.error('❌')} Invalid sandbox name: ${opts.name}`);
|
||||
console.log(` Names may only contain letters, numbers, hyphens, and underscores.\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
// Check sbx is installed
|
||||
try {
|
||||
sbx(['version']);
|
||||
} catch {
|
||||
console.error(`\n${c.error('❌')} ${c.bright('sbx')} CLI not found.\n`);
|
||||
console.log(` Install it from: ${c.info('https://docs.docker.com/ai/sandboxes/get-started/')}`);
|
||||
console.log(` Then run: ${c.bright('sbx login')}`);
|
||||
console.log(` And store your API key: ${c.bright('sbx secret set -g anthropic')}\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
switch (opts.subcommand) {
|
||||
|
||||
case 'ls':
|
||||
sbx(['ls'], { inherit: true });
|
||||
break;
|
||||
|
||||
case 'stop':
|
||||
if (!opts.name) {
|
||||
console.error(`\n${c.error('❌')} Sandbox name required: cloudcli sandbox stop <name>\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
sbx(['stop', opts.name], { inherit: true });
|
||||
break;
|
||||
|
||||
case 'rm':
|
||||
if (!opts.name) {
|
||||
console.error(`\n${c.error('❌')} Sandbox name required: cloudcli sandbox rm <name>\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
sbx(['rm', opts.name], { inherit: true });
|
||||
break;
|
||||
|
||||
case 'logs':
|
||||
if (!opts.name) {
|
||||
console.error(`\n${c.error('❌')} Sandbox name required: cloudcli sandbox logs <name>\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
try {
|
||||
sbx(['exec', opts.name, 'bash', '-c', 'cat /tmp/cloudcli-ui.log'], { inherit: true });
|
||||
} catch (e) {
|
||||
console.error(`\n${c.error('❌')} Could not read logs: ${e.message || 'Is the sandbox running?'}\n`);
|
||||
}
|
||||
break;
|
||||
|
||||
case 'start': {
|
||||
if (!opts.name) {
|
||||
console.error(`\n${c.error('❌')} Sandbox name required: cloudcli sandbox start <name>\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
console.log(`\n${c.info('▶')} Starting sandbox ${c.bright(opts.name)}...`);
|
||||
const restartRun = spawnProcess('sbx', ['run', opts.name], {
|
||||
detached: true,
|
||||
stdio: ['ignore', 'ignore', 'ignore'],
|
||||
});
|
||||
restartRun.unref();
|
||||
await new Promise(resolve => setTimeout(resolve, 5000));
|
||||
|
||||
console.log(`${c.info('▶')} Launching CloudCLI web server...`);
|
||||
sbx(['exec', opts.name, 'bash', '-c', 'cloudcli start --port 3001 &']);
|
||||
|
||||
console.log(`${c.info('▶')} Forwarding port ${opts.port} → 3001...`);
|
||||
try {
|
||||
sbx(['ports', opts.name, '--publish', `${opts.port}:3001`]);
|
||||
} catch (e) {
|
||||
const msg = e.stdout || e.stderr || e.message || '';
|
||||
if (msg.includes('address already in use')) {
|
||||
const altPort = opts.port + 1;
|
||||
console.log(`${c.warn('⚠')} Port ${opts.port} in use, trying ${altPort}...`);
|
||||
try {
|
||||
sbx(['ports', opts.name, '--publish', `${altPort}:3001`]);
|
||||
opts.port = altPort;
|
||||
} catch {
|
||||
console.error(`${c.error('❌')} Ports ${opts.port} and ${altPort} both in use. Use --port to specify a free port.`);
|
||||
process.exit(1);
|
||||
}
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
console.log(`\n${c.ok('✔')} ${c.bright('CloudCLI is ready!')}`);
|
||||
console.log(` ${c.info('→')} ${c.bright(`http://localhost:${opts.port}`)}\n`);
|
||||
break;
|
||||
}
|
||||
|
||||
case 'create': {
|
||||
if (!opts.workspace) {
|
||||
console.error(`\n${c.error('❌')} Workspace path required: cloudcli sandbox <path>\n`);
|
||||
console.log(` Example: ${c.bright('cloudcli sandbox ~/my-project')}\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const workspace = opts.workspace.startsWith('~')
|
||||
? opts.workspace.replace(/^~/, os.homedir())
|
||||
: path.resolve(opts.workspace);
|
||||
|
||||
if (!fs.existsSync(workspace)) {
|
||||
console.error(`\n${c.error('❌')} Workspace path not found: ${c.dim(workspace)}\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
|
||||
const secret = SANDBOX_SECRETS[opts.agent] || 'anthropic';
|
||||
|
||||
// Check if the required secret is stored
|
||||
try {
|
||||
const secretList = sbx(['secret', 'ls']);
|
||||
if (!secretList.includes(secret)) {
|
||||
console.error(`\n${c.error('❌')} No ${c.bright(secret)} API key found.\n`);
|
||||
console.log(` Run: ${c.bright(`sbx secret set -g ${secret}`)}\n`);
|
||||
process.exit(1);
|
||||
}
|
||||
} catch { /* sbx secret ls not available, skip check */ }
|
||||
|
||||
console.log(`\n${c.bright('CloudCLI Sandbox')}`);
|
||||
console.log(c.dim('─'.repeat(50)));
|
||||
console.log(` Agent: ${c.info(opts.agent)} ${c.dim(`(${secret} credentials)`)}`);
|
||||
console.log(` Workspace: ${c.dim(workspace)}`);
|
||||
console.log(` Name: ${c.dim(opts.name)}`);
|
||||
console.log(` Template: ${c.dim(opts.template)}`);
|
||||
console.log(` Port: ${c.dim(String(opts.port))}`);
|
||||
if (opts.env.length > 0) {
|
||||
console.log(` Env: ${c.dim(opts.env.join(', '))}`);
|
||||
}
|
||||
console.log(c.dim('─'.repeat(50)));
|
||||
|
||||
// Step 1: Launch sandbox with sbx run in background.
|
||||
// sbx run creates the sandbox (or reconnects) AND holds an active session,
|
||||
// which prevents the sandbox from auto-stopping.
|
||||
console.log(`\n${c.info('▶')} Creating sandbox ${c.bright(opts.name)}...`);
|
||||
const bgRun = spawnProcess('sbx', [
|
||||
'run', '--template', opts.template, '--name', opts.name, opts.agent, workspace,
|
||||
], {
|
||||
detached: true,
|
||||
stdio: ['ignore', 'ignore', 'ignore'],
|
||||
});
|
||||
bgRun.unref();
|
||||
// Wait for sandbox to be ready
|
||||
await new Promise(resolve => setTimeout(resolve, 5000));
|
||||
|
||||
// Step 2: Inject environment variables
|
||||
if (opts.env.length > 0) {
|
||||
console.log(`${c.info('▶')} Setting environment variables...`);
|
||||
const exports = opts.env
|
||||
.filter(e => /^\w+=.+$/.test(e))
|
||||
.map(e => `export ${e}`)
|
||||
.join('\n');
|
||||
if (exports) {
|
||||
sbx(['exec', opts.name, 'bash', '-c', `echo '${exports}' >> /etc/sandbox-persistent.sh`]);
|
||||
}
|
||||
const invalid = opts.env.filter(e => !/^\w+=.+$/.test(e));
|
||||
if (invalid.length > 0) {
|
||||
console.log(`${c.warn('⚠')} Skipped invalid env vars: ${invalid.join(', ')} (expected KEY=VALUE)`);
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Start CloudCLI inside the sandbox
|
||||
console.log(`${c.info('▶')} Launching CloudCLI web server...`);
|
||||
sbx(['exec', opts.name, 'bash', '-c', 'cloudcli start --port 3001 &']);
|
||||
|
||||
// Step 4: Forward port
|
||||
console.log(`${c.info('▶')} Forwarding port ${opts.port} → 3001...`);
|
||||
try {
|
||||
sbx(['ports', opts.name, '--publish', `${opts.port}:3001`]);
|
||||
} catch (e) {
|
||||
const msg = e.stdout || e.stderr || e.message || '';
|
||||
if (msg.includes('address already in use')) {
|
||||
const altPort = opts.port + 1;
|
||||
console.log(`${c.warn('⚠')} Port ${opts.port} in use, trying ${altPort}...`);
|
||||
try {
|
||||
sbx(['ports', opts.name, '--publish', `${altPort}:3001`]);
|
||||
opts.port = altPort;
|
||||
} catch {
|
||||
console.error(`${c.error('❌')} Ports ${opts.port} and ${altPort} both in use. Use --port to specify a free port.`);
|
||||
process.exit(1);
|
||||
}
|
||||
} else {
|
||||
throw e;
|
||||
}
|
||||
}
|
||||
|
||||
// Done
|
||||
console.log(`\n${c.ok('✔')} ${c.bright('CloudCLI is ready!')}`);
|
||||
console.log(` ${c.info('→')} Open ${c.bright(`http://localhost:${opts.port}`)}`);
|
||||
console.log(`\n${c.dim(' Manage with:')}`);
|
||||
console.log(` ${c.dim('$')} sbx ls`);
|
||||
console.log(` ${c.dim('$')} sbx stop ${opts.name}`);
|
||||
console.log(` ${c.dim('$')} sbx start ${opts.name}`);
|
||||
console.log(` ${c.dim('$')} sbx rm ${opts.name}`);
|
||||
console.log(`\n${c.dim(' Or install globally:')} npm install -g @cloudcli-ai/cloudcli\n`);
|
||||
break;
|
||||
}
|
||||
|
||||
default:
|
||||
showSandboxHelp();
|
||||
}
|
||||
}
|
||||
|
||||
// ── Server ──────────────────────────────────────────────────
|
||||
|
||||
// Start the server
|
||||
async function startServer() {
|
||||
// Check for updates silently on startup
|
||||
@@ -278,6 +626,10 @@ function parseArgs(args) {
|
||||
parsed.command = 'version';
|
||||
} else if (!arg.startsWith('-')) {
|
||||
parsed.command = arg;
|
||||
if (arg === 'sandbox') {
|
||||
parsed.remainingArgs = args.slice(i + 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -287,7 +639,7 @@ function parseArgs(args) {
|
||||
// Main CLI handler
|
||||
async function main() {
|
||||
const args = process.argv.slice(2);
|
||||
const { command, options } = parseArgs(args);
|
||||
const { command, options, remainingArgs } = parseArgs(args);
|
||||
|
||||
// Apply CLI options to environment variables
|
||||
if (options.serverPort) {
|
||||
@@ -303,6 +655,9 @@ async function main() {
|
||||
case 'start':
|
||||
await startServer();
|
||||
break;
|
||||
case 'sandbox':
|
||||
await sandboxCommand(remainingArgs || []);
|
||||
break;
|
||||
case 'status':
|
||||
case 'info':
|
||||
showStatus();
|
||||
|
||||
@@ -435,13 +435,20 @@ app.post('/api/system/update', authenticateToken, async (req, res) => {
|
||||
|
||||
console.log('Starting system update from directory:', projectRoot);
|
||||
|
||||
// Run the update command based on install mode
|
||||
const updateCommand = installMode === 'git'
|
||||
? 'git checkout main && git pull && npm install'
|
||||
: 'npm install -g @cloudcli-ai/cloudcli@latest';
|
||||
// Platform deployments use their own update workflow from the project root.
|
||||
const updateCommand = IS_PLATFORM
|
||||
// In platform, husky and dev dependencies are not needed
|
||||
? 'npm run update:platform'
|
||||
: installMode === 'git'
|
||||
? 'git checkout main && git pull && npm install'
|
||||
: 'npm install -g @cloudcli-ai/cloudcli@latest';
|
||||
|
||||
const updateCwd = IS_PLATFORM || installMode === 'git'
|
||||
? projectRoot
|
||||
: os.homedir();
|
||||
|
||||
const child = spawn('sh', ['-c', updateCommand], {
|
||||
cwd: installMode === 'git' ? projectRoot : os.homedir(),
|
||||
cwd: updateCwd,
|
||||
env: process.env
|
||||
});
|
||||
|
||||
|
||||
@@ -2,7 +2,6 @@ import express from 'express';
|
||||
import { promises as fs } from 'fs';
|
||||
import path from 'path';
|
||||
import os from 'os';
|
||||
import { spawn } from 'child_process';
|
||||
import sqlite3 from 'sqlite3';
|
||||
import { open } from 'sqlite';
|
||||
import crypto from 'crypto';
|
||||
@@ -578,221 +577,4 @@ router.get('/sessions', async (req, res) => {
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
// GET /api/cursor/sessions/:sessionId - Get specific Cursor session from SQLite
|
||||
router.get('/sessions/:sessionId', async (req, res) => {
|
||||
try {
|
||||
const { sessionId } = req.params;
|
||||
const { projectPath } = req.query;
|
||||
|
||||
// Calculate cwdID hash for the project path
|
||||
const cwdId = crypto.createHash('md5').update(projectPath || process.cwd()).digest('hex');
|
||||
const storeDbPath = path.join(os.homedir(), '.cursor', 'chats', cwdId, sessionId, 'store.db');
|
||||
|
||||
|
||||
// Open SQLite database
|
||||
const db = await open({
|
||||
filename: storeDbPath,
|
||||
driver: sqlite3.Database,
|
||||
mode: sqlite3.OPEN_READONLY
|
||||
});
|
||||
|
||||
// Get all blobs to build the DAG structure
|
||||
const allBlobs = await db.all(`
|
||||
SELECT rowid, id, data FROM blobs
|
||||
`);
|
||||
|
||||
// Build the DAG structure from parent-child relationships
|
||||
const blobMap = new Map(); // id -> blob data
|
||||
const parentRefs = new Map(); // blob id -> [parent blob ids]
|
||||
const childRefs = new Map(); // blob id -> [child blob ids]
|
||||
const jsonBlobs = []; // Clean JSON messages
|
||||
|
||||
for (const blob of allBlobs) {
|
||||
blobMap.set(blob.id, blob);
|
||||
|
||||
// Check if this is a JSON blob (actual message) or protobuf (DAG structure)
|
||||
if (blob.data && blob.data[0] === 0x7B) { // Starts with '{' - JSON blob
|
||||
try {
|
||||
const parsed = JSON.parse(blob.data.toString('utf8'));
|
||||
jsonBlobs.push({ ...blob, parsed });
|
||||
} catch (e) {
|
||||
console.log('Failed to parse JSON blob:', blob.rowid);
|
||||
}
|
||||
} else if (blob.data) { // Protobuf blob - extract parent references
|
||||
const parents = [];
|
||||
let i = 0;
|
||||
|
||||
// Scan for parent references (0x0A 0x20 followed by 32-byte hash)
|
||||
while (i < blob.data.length - 33) {
|
||||
if (blob.data[i] === 0x0A && blob.data[i+1] === 0x20) {
|
||||
const parentHash = blob.data.slice(i+2, i+34).toString('hex');
|
||||
if (blobMap.has(parentHash)) {
|
||||
parents.push(parentHash);
|
||||
}
|
||||
i += 34;
|
||||
} else {
|
||||
i++;
|
||||
}
|
||||
}
|
||||
|
||||
if (parents.length > 0) {
|
||||
parentRefs.set(blob.id, parents);
|
||||
// Update child references
|
||||
for (const parentId of parents) {
|
||||
if (!childRefs.has(parentId)) {
|
||||
childRefs.set(parentId, []);
|
||||
}
|
||||
childRefs.get(parentId).push(blob.id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Perform topological sort to get chronological order
|
||||
const visited = new Set();
|
||||
const sorted = [];
|
||||
|
||||
// DFS-based topological sort
|
||||
function visit(nodeId) {
|
||||
if (visited.has(nodeId)) return;
|
||||
visited.add(nodeId);
|
||||
|
||||
// Visit all parents first (dependencies)
|
||||
const parents = parentRefs.get(nodeId) || [];
|
||||
for (const parentId of parents) {
|
||||
visit(parentId);
|
||||
}
|
||||
|
||||
// Add this node after all its parents
|
||||
const blob = blobMap.get(nodeId);
|
||||
if (blob) {
|
||||
sorted.push(blob);
|
||||
}
|
||||
}
|
||||
|
||||
// Start with nodes that have no parents (roots)
|
||||
for (const blob of allBlobs) {
|
||||
if (!parentRefs.has(blob.id)) {
|
||||
visit(blob.id);
|
||||
}
|
||||
}
|
||||
|
||||
// Visit any remaining nodes (disconnected components)
|
||||
for (const blob of allBlobs) {
|
||||
visit(blob.id);
|
||||
}
|
||||
|
||||
// Now extract JSON messages in the order they appear in the sorted DAG
|
||||
const messageOrder = new Map(); // JSON blob id -> order index
|
||||
let orderIndex = 0;
|
||||
|
||||
for (const blob of sorted) {
|
||||
// Check if this blob references any JSON messages
|
||||
if (blob.data && blob.data[0] !== 0x7B) { // Protobuf blob
|
||||
// Look for JSON blob references
|
||||
for (const jsonBlob of jsonBlobs) {
|
||||
try {
|
||||
const jsonIdBytes = Buffer.from(jsonBlob.id, 'hex');
|
||||
if (blob.data.includes(jsonIdBytes)) {
|
||||
if (!messageOrder.has(jsonBlob.id)) {
|
||||
messageOrder.set(jsonBlob.id, orderIndex++);
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// Skip if can't convert ID
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort JSON blobs by their appearance order in the DAG
|
||||
const sortedJsonBlobs = jsonBlobs.sort((a, b) => {
|
||||
const orderA = messageOrder.get(a.id) ?? Number.MAX_SAFE_INTEGER;
|
||||
const orderB = messageOrder.get(b.id) ?? Number.MAX_SAFE_INTEGER;
|
||||
if (orderA !== orderB) return orderA - orderB;
|
||||
// Fallback to rowid if not in order map
|
||||
return a.rowid - b.rowid;
|
||||
});
|
||||
|
||||
// Use sorted JSON blobs
|
||||
const blobs = sortedJsonBlobs.map((blob, idx) => ({
|
||||
...blob,
|
||||
sequence_num: idx + 1,
|
||||
original_rowid: blob.rowid
|
||||
}));
|
||||
|
||||
// Get metadata from meta table
|
||||
const metaRows = await db.all(`
|
||||
SELECT key, value FROM meta
|
||||
`);
|
||||
|
||||
// Parse metadata
|
||||
let metadata = {};
|
||||
for (const row of metaRows) {
|
||||
if (row.value) {
|
||||
try {
|
||||
// Try to decode as hex-encoded JSON
|
||||
const hexMatch = row.value.toString().match(/^[0-9a-fA-F]+$/);
|
||||
if (hexMatch) {
|
||||
const jsonStr = Buffer.from(row.value, 'hex').toString('utf8');
|
||||
metadata[row.key] = JSON.parse(jsonStr);
|
||||
} else {
|
||||
metadata[row.key] = row.value.toString();
|
||||
}
|
||||
} catch (e) {
|
||||
metadata[row.key] = row.value.toString();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Extract messages from sorted JSON blobs
|
||||
const messages = [];
|
||||
for (const blob of blobs) {
|
||||
try {
|
||||
// We already parsed JSON blobs earlier
|
||||
const parsed = blob.parsed;
|
||||
|
||||
if (parsed) {
|
||||
// Filter out ONLY system messages at the server level
|
||||
// Check both direct role and nested message.role
|
||||
const role = parsed?.role || parsed?.message?.role;
|
||||
if (role === 'system') {
|
||||
continue; // Skip only system messages
|
||||
}
|
||||
messages.push({
|
||||
id: blob.id,
|
||||
sequence: blob.sequence_num,
|
||||
rowid: blob.original_rowid,
|
||||
content: parsed
|
||||
});
|
||||
}
|
||||
} catch (e) {
|
||||
// Skip blobs that cause errors
|
||||
console.log(`Skipping blob ${blob.id}: ${e.message}`);
|
||||
}
|
||||
}
|
||||
|
||||
await db.close();
|
||||
|
||||
res.json({
|
||||
success: true,
|
||||
session: {
|
||||
id: sessionId,
|
||||
projectPath: projectPath,
|
||||
messages: messages,
|
||||
metadata: metadata,
|
||||
cwdId: cwdId
|
||||
}
|
||||
});
|
||||
|
||||
} catch (error) {
|
||||
console.error('Error reading Cursor session:', error);
|
||||
res.status(500).json({
|
||||
error: 'Failed to read Cursor session',
|
||||
details: error.message
|
||||
});
|
||||
}
|
||||
});
|
||||
|
||||
export default router;
|
||||
Reference in New Issue
Block a user