refactor: improve session limit and offset validation in provider routes

This commit is contained in:
Haileyesus
2026-04-29 11:46:53 +03:00
parent 10f35c238d
commit 0f93ef2781
4 changed files with 105 additions and 48 deletions

View File

@@ -338,21 +338,28 @@ router.get(
const limitRaw = readOptionalQueryString(req.query.limit);
const offsetRaw = readOptionalQueryString(req.query.offset);
const limit = limitRaw === undefined ? null : Number.parseInt(limitRaw, 10);
const offset = offsetRaw === undefined ? 0 : Number.parseInt(offsetRaw, 10);
if (limitRaw !== undefined && Number.isNaN(limit)) {
throw new AppError('limit must be a valid integer.', {
code: 'INVALID_QUERY_PARAMETER',
statusCode: 400,
});
let limit: number | null = null;
if (limitRaw !== undefined) {
const parsedLimit = Number.parseInt(limitRaw, 10);
if (Number.isNaN(parsedLimit) || parsedLimit < 0) {
throw new AppError('limit must be a non-negative integer.', {
code: 'INVALID_QUERY_PARAMETER',
statusCode: 400,
});
}
limit = parsedLimit;
}
if (offsetRaw !== undefined && Number.isNaN(offset)) {
throw new AppError('offset must be a valid integer.', {
code: 'INVALID_QUERY_PARAMETER',
statusCode: 400,
});
let offset = 0;
if (offsetRaw !== undefined) {
const parsedOffset = Number.parseInt(offsetRaw, 10);
if (Number.isNaN(parsedOffset) || parsedOffset < 0) {
throw new AppError('offset must be a non-negative integer.', {
code: 'INVALID_QUERY_PARAMETER',
statusCode: 400,
});
}
offset = parsedOffset;
}
const result = await sessionsService.fetchHistory(sessionId, {

View File

@@ -196,10 +196,14 @@ function createWordMatcher(
return phraseRegex.test(text);
}
if (phraseRegex.test(text) || words.length === 1) {
if (phraseRegex.test(text)) {
return true;
}
if (words.length === 1) {
return allWordsMatch(text.toLowerCase());
}
return allWordsMatch(text.toLowerCase());
};
@@ -534,31 +538,47 @@ async function findMatchedFileKeys(
const requireExactPhrase = words.length > 1 && normalizedQuery.length > 0;
if (requireExactPhrase) {
const matchedForPhrase = new Set<string>();
const fileChunks = chunkArray(
searchablePathEntries.map((entry) => entry.absolutePath),
RIPGREP_FILE_CHUNK_SIZE,
);
let matchedForPhrase = searchablePathEntries.slice();
let nextChunkIndex = 0;
const workerCount = Math.min(RIPGREP_CHUNK_CONCURRENCY, fileChunks.length);
const workers = Array.from({ length: workerCount }, async () => {
while (nextChunkIndex < fileChunks.length && !signal?.aborted) {
const currentIndex = nextChunkIndex;
nextChunkIndex += 1;
const chunkMatches = await runRipgrepFilesWithMatches(normalizedQuery, fileChunks[currentIndex], signal);
for (const matchedPath of chunkMatches) {
matchedForPhrase.add(matchedPath);
}
// Keep ripgrep as an over-approximation for exact phrase mode by requiring
// each word to appear somewhere in the file, then defer strict phrase
// validation to the in-memory matcher.
for (const word of words) {
if (signal?.aborted) {
return new Set();
}
});
await Promise.all(workers);
if (signal?.aborted) {
return new Set();
const matchedForWord = new Set<string>();
const fileChunks = chunkArray(
matchedForPhrase.map((entry) => entry.absolutePath),
RIPGREP_FILE_CHUNK_SIZE,
);
let nextChunkIndex = 0;
const workerCount = Math.min(RIPGREP_CHUNK_CONCURRENCY, fileChunks.length);
const workers = Array.from({ length: workerCount }, async () => {
while (nextChunkIndex < fileChunks.length && !signal?.aborted) {
const currentIndex = nextChunkIndex;
nextChunkIndex += 1;
const chunkMatches = await runRipgrepFilesWithMatches(word, fileChunks[currentIndex], signal);
for (const matchedPath of chunkMatches) {
matchedForWord.add(matchedPath);
}
}
});
await Promise.all(workers);
if (signal?.aborted) {
return new Set();
}
matchedForPhrase = matchedForPhrase.filter((entry) => matchedForWord.has(entry.normalizedPath));
if (matchedForPhrase.length === 0) {
break;
}
}
return matchedForPhrase;
return new Set(matchedForPhrase.map((entry) => entry.normalizedPath));
}
let remainingEntries = searchablePathEntries.slice();