1152 lines
43 KiB
JavaScript
1152 lines
43 KiB
JavaScript
// Story Summary - Recall Engine
|
||
// L1 chunk + L2 event 召回
|
||
// - 全量向量打分
|
||
// - 实体权重归一化分配
|
||
// - 指数衰减加权 Query Embedding
|
||
// - L0 floor 加权
|
||
// - RRF 混合检索(向量 + 文本)
|
||
// - MMR 去重(融合后执行)
|
||
// - floor 稀疏去重
|
||
|
||
import { getAllEventVectors, getAllChunkVectors, getChunksByFloors, getMeta } from './chunk-store.js';
|
||
import { embed, getEngineFingerprint } from './embedder.js';
|
||
import { xbLog } from '../../../core/debug-core.js';
|
||
import { getContext } from '../../../../../../extensions.js';
|
||
import { getSummaryStore, getFacts, getNewCharacters, isRelationFact } from '../data/store.js';
|
||
import { filterText } from './text-filter.js';
|
||
import {
|
||
searchStateAtoms,
|
||
buildL0FloorBonus,
|
||
stateToVirtualChunks,
|
||
mergeAndSparsify,
|
||
} from './state-recall.js';
|
||
import { ensureEventTextIndex, searchEventsByText } from './text-search.js';
|
||
import {
|
||
extractRareTerms,
|
||
extractNounsFromFactsO,
|
||
} from './tokenizer.js';
|
||
|
||
const MODULE_ID = 'recall';
|
||
|
||
const CONFIG = {
|
||
QUERY_MSG_COUNT: 5,
|
||
QUERY_DECAY_BETA: 0.7,
|
||
QUERY_MAX_CHARS: 600,
|
||
QUERY_CONTEXT_CHARS: 240,
|
||
|
||
CAUSAL_CHAIN_MAX_DEPTH: 10,
|
||
CAUSAL_INJECT_MAX: 30,
|
||
|
||
CANDIDATE_CHUNKS: 200,
|
||
CANDIDATE_EVENTS: 150,
|
||
|
||
MAX_CHUNKS: 40,
|
||
MAX_EVENTS: 120,
|
||
|
||
MIN_SIMILARITY_CHUNK: 0.6,
|
||
MIN_SIMILARITY_CHUNK_RECENT: 0.5,
|
||
MIN_SIMILARITY_EVENT: 0.65,
|
||
MMR_LAMBDA: 0.72,
|
||
|
||
L0_FLOOR_BONUS_FACTOR: 0.10,
|
||
FLOOR_MAX_CHUNKS: 2,
|
||
FLOOR_LIMIT: 1,
|
||
|
||
RRF_K: 60,
|
||
TEXT_SEARCH_LIMIT: 80,
|
||
|
||
// TEXT-only 质量控制
|
||
TEXT_SOFT_MIN_SIM: 0.50,
|
||
TEXT_TOTAL_MAX: 6,
|
||
};
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// 工具函数
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function cosineSimilarity(a, b) {
|
||
if (!a?.length || !b?.length || a.length !== b.length) return 0;
|
||
let dot = 0, nA = 0, nB = 0;
|
||
for (let i = 0; i < a.length; i++) {
|
||
dot += a[i] * b[i];
|
||
nA += a[i] * a[i];
|
||
nB += b[i] * b[i];
|
||
}
|
||
return nA && nB ? dot / (Math.sqrt(nA) * Math.sqrt(nB)) : 0;
|
||
}
|
||
|
||
function normalizeVec(v) {
|
||
let s = 0;
|
||
for (let i = 0; i < v.length; i++) s += v[i] * v[i];
|
||
s = Math.sqrt(s) || 1;
|
||
return v.map(x => x / s);
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// RRF 融合
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function fuseEventsByRRF(vectorRanked, textRanked, eventById, k = CONFIG.RRF_K) {
|
||
const map = new Map();
|
||
|
||
const upsert = (id) => {
|
||
if (!map.has(id)) {
|
||
map.set(id, { id, rrf: 0, vRank: Infinity, tRank: Infinity, type: 'TEXT', rawSim: 0, vector: null });
|
||
}
|
||
return map.get(id);
|
||
};
|
||
|
||
vectorRanked.forEach((r, i) => {
|
||
const id = r.event?.id;
|
||
if (!id) return;
|
||
const o = upsert(id);
|
||
o.vRank = i + 1;
|
||
o.rrf += 1 / (k + i + 1);
|
||
o.type = o.tRank !== Infinity ? 'HYBRID' : 'VECTOR';
|
||
o.vector = r.vector;
|
||
o.rawSim = r.rawSim || 0;
|
||
});
|
||
|
||
textRanked.forEach((r) => {
|
||
const o = upsert(r.id);
|
||
o.tRank = r.textRank;
|
||
o.rrf += 1 / (k + r.textRank);
|
||
o.type = o.vRank !== Infinity ? 'HYBRID' : 'TEXT';
|
||
});
|
||
|
||
const typePriority = { HYBRID: 0, VECTOR: 1, TEXT: 2 };
|
||
|
||
return Array.from(map.values())
|
||
.map(o => ({ ...o, event: eventById.get(o.id) }))
|
||
.filter(x => x.event)
|
||
.sort((a, b) => {
|
||
if (b.rrf !== a.rrf) return b.rrf - a.rrf;
|
||
if (typePriority[a.type] !== typePriority[b.type]) {
|
||
return typePriority[a.type] - typePriority[b.type];
|
||
}
|
||
if (a.vRank !== b.vRank) return a.vRank - b.vRank;
|
||
return a.tRank - b.tRank;
|
||
});
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// 因果链追溯
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function buildEventIndex(allEvents) {
|
||
const map = new Map();
|
||
for (const e of allEvents || []) {
|
||
if (e?.id) map.set(e.id, e);
|
||
}
|
||
return map;
|
||
}
|
||
|
||
function traceCausalAncestors(recalledEvents, eventIndex, maxDepth = CONFIG.CAUSAL_CHAIN_MAX_DEPTH) {
|
||
const out = new Map();
|
||
const idRe = /^evt-\d+$/;
|
||
|
||
function visit(parentId, depth, chainFrom) {
|
||
if (depth > maxDepth) return;
|
||
if (!idRe.test(parentId)) return;
|
||
|
||
const ev = eventIndex.get(parentId);
|
||
if (!ev) return;
|
||
|
||
const existed = out.get(parentId);
|
||
if (!existed) {
|
||
out.set(parentId, { event: ev, depth, chainFrom: [chainFrom] });
|
||
} else {
|
||
if (depth < existed.depth) existed.depth = depth;
|
||
if (!existed.chainFrom.includes(chainFrom)) existed.chainFrom.push(chainFrom);
|
||
}
|
||
|
||
for (const next of (ev.causedBy || [])) {
|
||
visit(String(next || '').trim(), depth + 1, chainFrom);
|
||
}
|
||
}
|
||
|
||
for (const r of recalledEvents || []) {
|
||
const rid = r?.event?.id;
|
||
if (!rid) continue;
|
||
for (const cid of (r.event?.causedBy || [])) {
|
||
visit(String(cid || '').trim(), 1, rid);
|
||
}
|
||
}
|
||
|
||
return out;
|
||
}
|
||
|
||
function sortCausalEvents(causalArray) {
|
||
return causalArray.sort((a, b) => {
|
||
const refDiff = b.chainFrom.length - a.chainFrom.length;
|
||
if (refDiff !== 0) return refDiff;
|
||
const depthDiff = a.depth - b.depth;
|
||
if (depthDiff !== 0) return depthDiff;
|
||
return String(a.event.id).localeCompare(String(b.event.id));
|
||
});
|
||
}
|
||
|
||
function normalize(s) {
|
||
return String(s || '').normalize('NFKC').replace(/[\u200B-\u200D\uFEFF]/g, '').trim();
|
||
}
|
||
|
||
function parseFloorRange(summary) {
|
||
if (!summary) return null;
|
||
const match = String(summary).match(/\(#(\d+)(?:-(\d+))?\)/);
|
||
if (!match) return null;
|
||
const start = Math.max(0, parseInt(match[1], 10) - 1);
|
||
const end = Math.max(0, (match[2] ? parseInt(match[2], 10) : parseInt(match[1], 10)) - 1);
|
||
return { start, end };
|
||
}
|
||
|
||
function cleanForRecall(text) {
|
||
return filterText(text).replace(/\[tts:[^\]]*\]/gi, '').trim();
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// 从谓词提取关系对象
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function extractRelationTarget(p) {
|
||
if (!p) return '';
|
||
let m = String(p).match(/^对(.+)的看法$/);
|
||
if (m) return m[1].trim();
|
||
m = String(p).match(/^与(.+)的关系$/);
|
||
if (m) return m[1].trim();
|
||
return '';
|
||
}
|
||
|
||
function buildExpDecayWeights(n, beta) {
|
||
const last = n - 1;
|
||
const w = Array.from({ length: n }, (_, i) => Math.exp(beta * (i - last)));
|
||
const sum = w.reduce((a, b) => a + b, 0) || 1;
|
||
return w.map(x => x / sum);
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// Query 构建
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function buildQuerySegments(chat, count, excludeLastAi, pendingUserMessage = null) {
|
||
if (!chat?.length) return [];
|
||
|
||
const { name1 } = getContext();
|
||
|
||
let messages = chat;
|
||
if (excludeLastAi && messages.length > 0 && !messages[messages.length - 1]?.is_user) {
|
||
messages = messages.slice(0, -1);
|
||
}
|
||
|
||
if (pendingUserMessage) {
|
||
const lastMsg = messages[messages.length - 1];
|
||
const lastMsgText = lastMsg?.mes?.trim() || "";
|
||
const pendingText = pendingUserMessage.trim();
|
||
|
||
if (lastMsgText !== pendingText) {
|
||
messages = [...messages, { is_user: true, name: name1 || "用户", mes: pendingUserMessage }];
|
||
}
|
||
}
|
||
|
||
return messages.slice(-count).map((m, idx, arr) => {
|
||
const speaker = m.name || (m.is_user ? (name1 || "用户") : "角色");
|
||
const clean = cleanForRecall(m.mes);
|
||
if (!clean) return '';
|
||
const limit = idx === arr.length - 1 ? CONFIG.QUERY_MAX_CHARS : CONFIG.QUERY_CONTEXT_CHARS;
|
||
return `${speaker}: ${clean.slice(0, limit)}`;
|
||
}).filter(Boolean);
|
||
}
|
||
|
||
async function embedWeightedQuery(segments, vectorConfig) {
|
||
if (!segments?.length) return null;
|
||
|
||
const weights = buildExpDecayWeights(segments.length, CONFIG.QUERY_DECAY_BETA);
|
||
const vecs = await embed(segments, vectorConfig);
|
||
const dims = vecs?.[0]?.length || 0;
|
||
if (!dims) return null;
|
||
|
||
const out = new Array(dims).fill(0);
|
||
for (let i = 0; i < vecs.length; i++) {
|
||
for (let j = 0; j < dims; j++) out[j] += (vecs[i][j] || 0) * weights[i];
|
||
}
|
||
|
||
return { vector: normalizeVec(out), weights };
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// 实体抽取
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function buildEntityLexicon(store, allEvents) {
|
||
const { name1 } = getContext();
|
||
const userName = normalize(name1);
|
||
const set = new Set();
|
||
|
||
const facts = getFacts(store);
|
||
for (const f of facts) {
|
||
if (f?.retracted) continue;
|
||
const s = normalize(f?.s);
|
||
if (s) set.add(s);
|
||
if (isRelationFact(f)) {
|
||
const o = normalize(f?.o);
|
||
if (o) set.add(o);
|
||
}
|
||
}
|
||
|
||
const chars = getNewCharacters(store);
|
||
for (const m of chars || []) {
|
||
const s = normalize(typeof m === 'string' ? m : m?.name);
|
||
if (s) set.add(s);
|
||
}
|
||
|
||
for (const e of allEvents || []) {
|
||
for (const p of e.participants || []) {
|
||
const s = normalize(p);
|
||
if (s) set.add(s);
|
||
}
|
||
}
|
||
|
||
for (const a of store?.json?.arcs || []) {
|
||
const s = normalize(a?.name);
|
||
if (s) set.add(s);
|
||
}
|
||
|
||
const stop = new Set([userName, '我', '你', '他', '她', '它', '用户', '角色', 'assistant'].map(normalize).filter(Boolean));
|
||
|
||
return Array.from(set)
|
||
.filter(s => s.length >= 2 && !stop.has(s) && !/^[\s\p{P}\p{S}]+$/u.test(s) && !/<[^>]+>/.test(s))
|
||
.slice(0, 5000);
|
||
}
|
||
|
||
function buildFactGraph(facts) {
|
||
const graph = new Map();
|
||
const { name1 } = getContext();
|
||
const userName = normalize(name1);
|
||
|
||
for (const f of facts || []) {
|
||
if (f?.retracted) continue;
|
||
if (!isRelationFact(f)) continue;
|
||
|
||
const s = normalize(f?.s);
|
||
const target = normalize(extractRelationTarget(f?.p));
|
||
if (!s || !target) continue;
|
||
if (s === userName || target === userName) continue;
|
||
|
||
if (!graph.has(s)) graph.set(s, new Set());
|
||
if (!graph.has(target)) graph.set(target, new Set());
|
||
graph.get(s).add(target);
|
||
graph.get(target).add(s);
|
||
}
|
||
|
||
return graph;
|
||
}
|
||
|
||
function expandByFacts(presentEntities, facts, maxDepth = 2) {
|
||
const graph = buildFactGraph(facts);
|
||
const expanded = new Map();
|
||
const seeds = Array.from(presentEntities || []).map(normalize).filter(Boolean);
|
||
|
||
seeds.forEach(e => expanded.set(e, 1.0));
|
||
|
||
let frontier = [...seeds];
|
||
for (let d = 1; d <= maxDepth && frontier.length; d++) {
|
||
const next = [];
|
||
const decay = Math.pow(0.5, d);
|
||
|
||
for (const e of frontier) {
|
||
const neighbors = graph.get(e);
|
||
if (!neighbors) continue;
|
||
|
||
for (const neighbor of neighbors) {
|
||
if (!expanded.has(neighbor)) {
|
||
expanded.set(neighbor, decay);
|
||
next.push(neighbor);
|
||
}
|
||
}
|
||
}
|
||
|
||
frontier = next.slice(0, 20);
|
||
}
|
||
|
||
return Array.from(expanded.entries())
|
||
.sort((a, b) => b[1] - a[1])
|
||
.slice(0, 15)
|
||
.map(([term]) => term);
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// 实体权重归一化(用于加分分配)
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function normalizeEntityWeights(queryEntityWeights) {
|
||
if (!queryEntityWeights?.size) return new Map();
|
||
|
||
const total = Array.from(queryEntityWeights.values()).reduce((a, b) => a + b, 0);
|
||
if (total <= 0) return new Map();
|
||
|
||
const normalized = new Map();
|
||
for (const [entity, weight] of queryEntityWeights) {
|
||
normalized.set(entity, weight / total);
|
||
}
|
||
return normalized;
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// 文本路 Query 构建(分层高信号词)
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
async function buildTextSearchQuery(segments, queryEntities, facts, expandedTerms) {
|
||
const breakdown = {
|
||
entities: [],
|
||
rareTerms: [],
|
||
factsO: [],
|
||
expanded: [],
|
||
};
|
||
|
||
breakdown.entities = [...(queryEntities || [])];
|
||
|
||
const q2Segments = segments.slice(-2);
|
||
const q2Text = q2Segments.join(' ');
|
||
|
||
try {
|
||
breakdown.rareTerms = await extractRareTerms(q2Text, 15);
|
||
} catch (e) {
|
||
xbLog.warn(MODULE_ID, '稀有词提取失败', e);
|
||
breakdown.rareTerms = [];
|
||
}
|
||
|
||
const entitySet = new Set(breakdown.entities.map(e => e.toLowerCase()));
|
||
breakdown.rareTerms = breakdown.rareTerms.filter(t => !entitySet.has(t.toLowerCase()));
|
||
|
||
const relevantSubjects = new Set(queryEntities || []);
|
||
try {
|
||
breakdown.factsO = await extractNounsFromFactsO(facts, relevantSubjects, 5);
|
||
} catch (e) {
|
||
xbLog.warn(MODULE_ID, 'facts O 提取失败', e);
|
||
breakdown.factsO = [];
|
||
}
|
||
|
||
const existingSet = new Set([
|
||
...breakdown.entities,
|
||
...breakdown.rareTerms,
|
||
].map(e => e.toLowerCase()));
|
||
breakdown.factsO = breakdown.factsO.filter(t => !existingSet.has(t.toLowerCase()));
|
||
|
||
const allExistingSet = new Set([
|
||
...breakdown.entities,
|
||
...breakdown.rareTerms,
|
||
...breakdown.factsO,
|
||
].map(e => e.toLowerCase()));
|
||
|
||
breakdown.expanded = (expandedTerms || [])
|
||
.filter(t => !allExistingSet.has(t.toLowerCase()))
|
||
.slice(0, 3);
|
||
|
||
const queryParts = [
|
||
...breakdown.entities,
|
||
...breakdown.entities,
|
||
...breakdown.rareTerms,
|
||
...breakdown.factsO,
|
||
...breakdown.expanded,
|
||
];
|
||
|
||
const query = queryParts.join(' ');
|
||
|
||
return { query, breakdown };
|
||
}
|
||
|
||
function stripFloorTag(s) {
|
||
return String(s || '').replace(/\s*\(#\d+(?:-\d+)?\)\s*$/, '').trim();
|
||
}
|
||
|
||
export function buildEventEmbeddingText(ev) {
|
||
const parts = [];
|
||
|
||
if (ev?.title) parts.push(ev.title);
|
||
|
||
const people = (ev?.participants || []).join(' ');
|
||
if (people) parts.push(people);
|
||
|
||
if (ev?.type) parts.push(ev.type);
|
||
|
||
const summary = stripFloorTag(ev?.summary);
|
||
if (summary) parts.push(summary);
|
||
|
||
return parts.filter(Boolean).join(' ');
|
||
}
|
||
|
||
/**
|
||
* 从分段消息中提取实体,继承消息权重
|
||
* @param {string[]} segments
|
||
* @param {number[]} weights
|
||
* @param {string[]} lexicon
|
||
* @returns {Map<string, number>}
|
||
*/
|
||
function extractEntitiesWithWeights(segments, weights, lexicon) {
|
||
const entityWeights = new Map();
|
||
|
||
if (!segments?.length || !lexicon?.length) return entityWeights;
|
||
|
||
for (let i = 0; i < segments.length; i++) {
|
||
const text = normalize(segments[i]);
|
||
const weight = weights?.[i] || 0;
|
||
|
||
for (const entity of lexicon) {
|
||
if (text.includes(entity)) {
|
||
const existing = entityWeights.get(entity) || 0;
|
||
if (weight > existing) {
|
||
entityWeights.set(entity, weight);
|
||
}
|
||
}
|
||
}
|
||
}
|
||
|
||
return entityWeights;
|
||
}
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// MMR
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function mmrSelect(candidates, k, lambda, getVector, getScore) {
|
||
const selected = [];
|
||
const ids = new Set();
|
||
|
||
while (selected.length < k && candidates.length) {
|
||
let best = null, bestScore = -Infinity;
|
||
|
||
for (const c of candidates) {
|
||
if (ids.has(c._id)) continue;
|
||
|
||
const rel = getScore(c);
|
||
let div = 0;
|
||
|
||
if (selected.length) {
|
||
const vC = getVector(c);
|
||
if (vC?.length) {
|
||
for (const s of selected) {
|
||
const sim = cosineSimilarity(vC, getVector(s));
|
||
if (sim > div) div = sim;
|
||
}
|
||
}
|
||
}
|
||
|
||
const score = lambda * rel - (1 - lambda) * div;
|
||
if (score > bestScore) {
|
||
bestScore = score;
|
||
best = c;
|
||
}
|
||
}
|
||
|
||
if (!best) break;
|
||
selected.push(best);
|
||
ids.add(best._id);
|
||
}
|
||
|
||
return selected;
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// L1 Chunks 检索
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
async function searchChunks(queryVector, vectorConfig, l0FloorBonus = new Map(), lastSummarizedFloor = -1) {
|
||
const { chatId } = getContext();
|
||
if (!chatId || !queryVector?.length) return [];
|
||
|
||
const meta = await getMeta(chatId);
|
||
const fp = getEngineFingerprint(vectorConfig);
|
||
if (meta.fingerprint && meta.fingerprint !== fp) return [];
|
||
|
||
const chunkVectors = await getAllChunkVectors(chatId);
|
||
if (!chunkVectors.length) return [];
|
||
|
||
const scored = chunkVectors.map(cv => {
|
||
const match = String(cv.chunkId).match(/c-(\d+)-(\d+)/);
|
||
const floor = match ? parseInt(match[1], 10) : 0;
|
||
const baseSim = cosineSimilarity(queryVector, cv.vector);
|
||
const l0Bonus = l0FloorBonus.get(floor) || 0;
|
||
|
||
return {
|
||
_id: cv.chunkId,
|
||
chunkId: cv.chunkId,
|
||
floor,
|
||
chunkIdx: match ? parseInt(match[2], 10) : 0,
|
||
similarity: baseSim + l0Bonus,
|
||
_baseSimilarity: baseSim,
|
||
_l0Bonus: l0Bonus,
|
||
vector: cv.vector,
|
||
};
|
||
});
|
||
|
||
const candidates = scored
|
||
.filter(s => {
|
||
const threshold = s.floor > lastSummarizedFloor
|
||
? CONFIG.MIN_SIMILARITY_CHUNK_RECENT
|
||
: CONFIG.MIN_SIMILARITY_CHUNK;
|
||
return s.similarity >= threshold;
|
||
})
|
||
.sort((a, b) => b.similarity - a.similarity)
|
||
.slice(0, CONFIG.CANDIDATE_CHUNKS);
|
||
|
||
const preFilterStats = {
|
||
total: scored.length,
|
||
passThreshold: candidates.length,
|
||
thresholdRemote: CONFIG.MIN_SIMILARITY_CHUNK,
|
||
thresholdRecent: CONFIG.MIN_SIMILARITY_CHUNK_RECENT,
|
||
distribution: {
|
||
'0.8+': scored.filter(s => s.similarity >= 0.8).length,
|
||
'0.7-0.8': scored.filter(s => s.similarity >= 0.7 && s.similarity < 0.8).length,
|
||
'0.6-0.7': scored.filter(s => s.similarity >= 0.6 && s.similarity < 0.7).length,
|
||
'0.55-0.6': scored.filter(s => s.similarity >= 0.55 && s.similarity < 0.6).length,
|
||
'<0.55': scored.filter(s => s.similarity < 0.55).length,
|
||
},
|
||
};
|
||
|
||
const dynamicK = Math.min(CONFIG.MAX_CHUNKS, candidates.length);
|
||
|
||
const selected = mmrSelect(
|
||
candidates,
|
||
dynamicK,
|
||
CONFIG.MMR_LAMBDA,
|
||
c => c.vector,
|
||
c => c.similarity
|
||
);
|
||
|
||
const bestByFloor = new Map();
|
||
for (const s of selected) {
|
||
const prev = bestByFloor.get(s.floor);
|
||
if (!prev || s.similarity > prev.similarity) {
|
||
bestByFloor.set(s.floor, s);
|
||
}
|
||
}
|
||
|
||
const sparse = Array.from(bestByFloor.values()).sort((a, b) => b.similarity - a.similarity);
|
||
|
||
const floors = [...new Set(sparse.map(c => c.floor))];
|
||
const chunks = await getChunksByFloors(chatId, floors);
|
||
const chunkMap = new Map(chunks.map(c => [c.chunkId, c]));
|
||
|
||
const results = sparse.map(item => {
|
||
const chunk = chunkMap.get(item.chunkId);
|
||
if (!chunk) return null;
|
||
return {
|
||
chunkId: item.chunkId,
|
||
floor: item.floor,
|
||
chunkIdx: item.chunkIdx,
|
||
speaker: chunk.speaker,
|
||
isUser: chunk.isUser,
|
||
text: chunk.text,
|
||
similarity: item.similarity,
|
||
};
|
||
}).filter(Boolean);
|
||
|
||
if (results.length > 0) {
|
||
results._preFilterStats = preFilterStats;
|
||
}
|
||
|
||
return results;
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// L2 Events 检索(RRF 混合 + MMR 后置)
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
async function searchEvents(queryVector, queryTextForSearch, allEvents, vectorConfig, store, normalizedEntityWeights, l0FloorBonus = new Map()) {
|
||
const { chatId } = getContext();
|
||
if (!chatId || !queryVector?.length) return [];
|
||
|
||
const meta = await getMeta(chatId);
|
||
const fp = getEngineFingerprint(vectorConfig);
|
||
if (meta.fingerprint && meta.fingerprint !== fp) return [];
|
||
|
||
const eventVectors = await getAllEventVectors(chatId);
|
||
const vectorMap = new Map(eventVectors.map(v => [v.eventId, v.vector]));
|
||
if (!vectorMap.size) return [];
|
||
|
||
// 构建/更新文本索引
|
||
const revision = `${chatId}:${store?.updatedAt || 0}:${allEvents.length}`;
|
||
ensureEventTextIndex(allEvents, revision);
|
||
|
||
// 文本路检索
|
||
const textRanked = searchEventsByText(queryTextForSearch, CONFIG.TEXT_SEARCH_LIMIT);
|
||
const textGapInfo = textRanked._gapInfo || null;
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════
|
||
// 向量路检索(只保留 L0 加权)
|
||
// ═══════════════════════════════════════════════════════════════════════
|
||
|
||
const ENTITY_BONUS_POOL = 0.10;
|
||
|
||
const scored = (allEvents || []).map((event, idx) => {
|
||
const v = vectorMap.get(event.id);
|
||
const rawSim = v ? cosineSimilarity(queryVector, v) : 0;
|
||
|
||
let bonus = 0;
|
||
|
||
// L0 加权
|
||
const range = parseFloorRange(event.summary);
|
||
if (range) {
|
||
for (let f = range.start; f <= range.end; f++) {
|
||
if (l0FloorBonus.has(f)) {
|
||
bonus += l0FloorBonus.get(f);
|
||
break;
|
||
}
|
||
}
|
||
}
|
||
|
||
const participants = (event.participants || []).map(p => normalize(p));
|
||
let maxEntityWeight = 0;
|
||
for (const p of participants) {
|
||
const w = normalizedEntityWeights.get(p) || 0;
|
||
if (w > maxEntityWeight) {
|
||
maxEntityWeight = w;
|
||
}
|
||
}
|
||
const entityBonus = ENTITY_BONUS_POOL * maxEntityWeight;
|
||
bonus += entityBonus;
|
||
|
||
return {
|
||
_id: event.id,
|
||
_idx: idx,
|
||
event,
|
||
rawSim,
|
||
finalScore: rawSim + bonus,
|
||
vector: v,
|
||
_entityBonus: entityBonus,
|
||
_hasPresent: maxEntityWeight > 0,
|
||
};
|
||
});
|
||
|
||
const rawSimById = new Map(scored.map(s => [s._id, s.rawSim]));
|
||
const entityBonusById = new Map(scored.map(s => [s._id, s._entityBonus]));
|
||
const hasPresentById = new Map(scored.map(s => [s._id, s._hasPresent]));
|
||
|
||
const preFilterDistribution = {
|
||
total: scored.length,
|
||
'0.85+': scored.filter(s => s.finalScore >= 0.85).length,
|
||
'0.7-0.85': scored.filter(s => s.finalScore >= 0.7 && s.finalScore < 0.85).length,
|
||
'0.6-0.7': scored.filter(s => s.finalScore >= 0.6 && s.finalScore < 0.7).length,
|
||
'0.5-0.6': scored.filter(s => s.finalScore >= 0.5 && s.finalScore < 0.6).length,
|
||
'<0.5': scored.filter(s => s.finalScore < 0.5).length,
|
||
passThreshold: scored.filter(s => s.finalScore >= CONFIG.MIN_SIMILARITY_EVENT).length,
|
||
threshold: CONFIG.MIN_SIMILARITY_EVENT,
|
||
};
|
||
|
||
const candidates = scored
|
||
.filter(s => s.finalScore >= CONFIG.MIN_SIMILARITY_EVENT)
|
||
.sort((a, b) => b.finalScore - a.finalScore)
|
||
.slice(0, CONFIG.CANDIDATE_EVENTS);
|
||
|
||
const vectorRanked = candidates.map(s => ({
|
||
event: s.event,
|
||
similarity: s.finalScore,
|
||
rawSim: s.rawSim,
|
||
vector: s.vector,
|
||
}));
|
||
|
||
const eventById = new Map(allEvents.map(e => [e.id, e]));
|
||
const fused = fuseEventsByRRF(vectorRanked, textRanked, eventById);
|
||
|
||
const textOnlyStats = {
|
||
total: 0,
|
||
passedSoftCheck: 0,
|
||
filtered: 0,
|
||
finalIncluded: 0,
|
||
truncatedByLimit: 0,
|
||
};
|
||
|
||
const filtered = fused.filter(x => {
|
||
if (x.type !== 'TEXT') return true;
|
||
|
||
textOnlyStats.total++;
|
||
const sim = x.rawSim || rawSimById.get(x.id) || 0;
|
||
if (sim >= CONFIG.TEXT_SOFT_MIN_SIM) {
|
||
textOnlyStats.passedSoftCheck++;
|
||
return true;
|
||
}
|
||
|
||
textOnlyStats.filtered++;
|
||
return false;
|
||
});
|
||
|
||
const mmrInput = filtered.slice(0, CONFIG.CANDIDATE_EVENTS).map(x => ({
|
||
...x,
|
||
_id: x.id,
|
||
}));
|
||
|
||
const mmrOutput = mmrSelect(
|
||
mmrInput,
|
||
CONFIG.MAX_EVENTS,
|
||
CONFIG.MMR_LAMBDA,
|
||
c => c.vector || null,
|
||
c => c.rrf
|
||
);
|
||
|
||
let textOnlyCount = 0;
|
||
const finalResults = mmrOutput.filter(x => {
|
||
if (x.type !== 'TEXT') return true;
|
||
if (textOnlyCount < CONFIG.TEXT_TOTAL_MAX) {
|
||
textOnlyCount++;
|
||
return true;
|
||
}
|
||
textOnlyStats.truncatedByLimit++;
|
||
return false;
|
||
});
|
||
textOnlyStats.finalIncluded = textOnlyCount;
|
||
|
||
const results = finalResults.map(x => ({
|
||
event: x.event,
|
||
similarity: x.rrf,
|
||
_recallType: hasPresentById.get(x.event?.id) ? 'DIRECT' : 'SIMILAR',
|
||
_recallReason: x.type,
|
||
_rrfDetail: { vRank: x.vRank, tRank: x.tRank, rrf: x.rrf },
|
||
_entityBonus: entityBonusById.get(x.event?.id) || 0,
|
||
_rawSim: rawSimById.get(x.event?.id) || 0,
|
||
}));
|
||
|
||
// 统计信息附加到第一条结果
|
||
if (results.length > 0) {
|
||
results[0]._preFilterDistribution = preFilterDistribution;
|
||
results[0]._rrfStats = {
|
||
vectorCount: vectorRanked.length,
|
||
textCount: textRanked.length,
|
||
hybridCount: fused.filter(x => x.type === 'HYBRID').length,
|
||
vectorOnlyCount: fused.filter(x => x.type === 'VECTOR').length,
|
||
textOnlyTotal: textOnlyStats.total,
|
||
};
|
||
results[0]._textOnlyStats = textOnlyStats;
|
||
results[0]._textGapInfo = textGapInfo;
|
||
}
|
||
|
||
return results;
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// 日志
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
function formatRecallLog({
|
||
elapsed,
|
||
segments,
|
||
weights,
|
||
chunkResults,
|
||
eventResults,
|
||
allEvents,
|
||
normalizedEntityWeights = new Map(),
|
||
causalEvents = [],
|
||
chunkPreFilterStats = null,
|
||
l0Results = [],
|
||
textGapInfo = null,
|
||
expandedTerms = [],
|
||
textQueryBreakdown = null,
|
||
}) {
|
||
const lines = [
|
||
'\u2554' + '\u2550'.repeat(62) + '\u2557',
|
||
'\u2551 记忆召回报告 \u2551',
|
||
'\u2560' + '\u2550'.repeat(62) + '\u2563',
|
||
`\u2551 耗时: ${elapsed}ms`,
|
||
'\u255a' + '\u2550'.repeat(62) + '\u255d',
|
||
'',
|
||
'\u250c' + '\u2500'.repeat(61) + '\u2510',
|
||
'\u2502 【查询构建】最近 5 条消息,指数衰减加权 (β=0.7) \u2502',
|
||
'\u2514' + '\u2500'.repeat(61) + '\u2518',
|
||
];
|
||
|
||
const segmentsSorted = segments.map((s, i) => ({
|
||
idx: i + 1,
|
||
weight: weights?.[i] ?? 0,
|
||
text: s,
|
||
})).sort((a, b) => b.weight - a.weight);
|
||
|
||
segmentsSorted.forEach((s, rank) => {
|
||
const bar = '\u2588'.repeat(Math.round(s.weight * 20));
|
||
const preview = s.text.length > 60 ? s.text.slice(0, 60) + '...' : s.text;
|
||
const marker = rank === 0 ? ' ◀ 主导' : '';
|
||
lines.push(` ${(s.weight * 100).toFixed(1).padStart(5)}% ${bar.padEnd(12)} ${preview}${marker}`);
|
||
});
|
||
|
||
lines.push('');
|
||
lines.push('\u250c' + '\u2500'.repeat(61) + '\u2510');
|
||
lines.push('\u2502 【提取实体】 \u2502');
|
||
lines.push('\u2514' + '\u2500'.repeat(61) + '\u2518');
|
||
|
||
if (normalizedEntityWeights?.size) {
|
||
const sorted = Array.from(normalizedEntityWeights.entries())
|
||
.sort((a, b) => b[1] - a[1])
|
||
.slice(0, 8);
|
||
const formatted = sorted
|
||
.map(([e, w]) => `${e}(${(w * 100).toFixed(0)}%)`)
|
||
.join(' | ');
|
||
lines.push(` ${formatted}`);
|
||
} else {
|
||
lines.push(' (无)');
|
||
}
|
||
if (expandedTerms?.length) {
|
||
lines.push(` 扩散: ${expandedTerms.join('、')}`);
|
||
}
|
||
|
||
lines.push('');
|
||
lines.push('\u250c' + '\u2500'.repeat(61) + '\u2510');
|
||
lines.push('\u2502 【文本路 Query 构成】 \u2502');
|
||
lines.push('\u2514' + '\u2500'.repeat(61) + '\u2518');
|
||
|
||
if (textQueryBreakdown) {
|
||
const bd = textQueryBreakdown;
|
||
if (bd.entities?.length) {
|
||
lines.push(` 强信号-实体 (${bd.entities.length}): ${bd.entities.slice(0, 8).join(' | ')}${bd.entities.length > 8 ? ' ...' : ''}`);
|
||
} else {
|
||
lines.push(' 强信号-实体: (无)');
|
||
}
|
||
|
||
if (bd.rareTerms?.length) {
|
||
lines.push(` 强信号-稀有词 (${bd.rareTerms.length}): ${bd.rareTerms.slice(0, 10).join(' | ')}${bd.rareTerms.length > 10 ? ' ...' : ''}`);
|
||
} else {
|
||
lines.push(' 强信号-稀有词: (无)');
|
||
}
|
||
|
||
if (bd.factsO?.length) {
|
||
lines.push(` 中信号-facts O (${bd.factsO.length}): ${bd.factsO.join(' | ')}`);
|
||
} else {
|
||
lines.push(' 中信号-facts O: (无)');
|
||
}
|
||
|
||
if (bd.expanded?.length) {
|
||
lines.push(` 背景扩展 (${bd.expanded.length}): ${bd.expanded.join(' | ')}`);
|
||
} else {
|
||
lines.push(' 背景扩展: (无)');
|
||
}
|
||
} else {
|
||
lines.push(' (降级模式,无分层信息)');
|
||
}
|
||
|
||
lines.push('');
|
||
lines.push(' 实体归一化(用于加分):');
|
||
if (normalizedEntityWeights?.size) {
|
||
const sorted = Array.from(normalizedEntityWeights.entries())
|
||
.sort((a, b) => b[1] - a[1])
|
||
.slice(0, 8);
|
||
const formatted = sorted
|
||
.map(([e, w]) => `${e}(${(w * 100).toFixed(0)}%)`)
|
||
.join(' | ');
|
||
lines.push(` ${formatted}`);
|
||
} else {
|
||
lines.push(' (无)');
|
||
}
|
||
|
||
lines.push('');
|
||
lines.push('\u250c' + '\u2500'.repeat(61) + '\u2510');
|
||
lines.push('\u2502 【召回统计】 \u2502');
|
||
lines.push('\u2514' + '\u2500'.repeat(61) + '\u2518');
|
||
|
||
// L0
|
||
const l0Floors = [...new Set(l0Results.map(r => r.floor))].sort((a, b) => a - b);
|
||
lines.push(' L0 语义锚点:');
|
||
if (l0Results.length) {
|
||
lines.push(` 选入: ${l0Results.length} 条 | 影响楼层: ${l0Floors.join(', ')} (+${CONFIG.L0_FLOOR_BONUS_FACTOR} 加权)`);
|
||
} else {
|
||
lines.push(' (无数据)');
|
||
}
|
||
|
||
// L1
|
||
lines.push('');
|
||
lines.push(' L1 原文片段:');
|
||
if (chunkPreFilterStats) {
|
||
const dist = chunkPreFilterStats.distribution || {};
|
||
lines.push(` 全量: ${chunkPreFilterStats.total} 条 | 通过阈值(远期≥${chunkPreFilterStats.thresholdRemote}, 待整理≥${chunkPreFilterStats.thresholdRecent}): ${chunkPreFilterStats.passThreshold} 条 | 最终: ${chunkResults.length} 条`);
|
||
lines.push(` 匹配度: 0.8+: ${dist['0.8+'] || 0} | 0.7-0.8: ${dist['0.7-0.8'] || 0} | 0.6-0.7: ${dist['0.6-0.7'] || 0}`);
|
||
} else {
|
||
lines.push(` 选入: ${chunkResults.length} 条`);
|
||
}
|
||
|
||
// L2
|
||
const rrfStats = eventResults[0]?._rrfStats || {};
|
||
const textOnlyStats = eventResults[0]?._textOnlyStats || {};
|
||
|
||
lines.push('');
|
||
lines.push(' L2 事件记忆 (RRF 混合检索):');
|
||
lines.push(` 总事件: ${allEvents.length} 条 | 最终: ${eventResults.length} 条`);
|
||
lines.push(` 向量路: ${rrfStats.vectorCount || 0} 条 | 文本路: ${rrfStats.textCount || 0} 条`);
|
||
lines.push(` HYBRID: ${rrfStats.hybridCount || 0} 条 | 纯 VECTOR: ${rrfStats.vectorOnlyCount || 0} 条`);
|
||
|
||
lines.push('');
|
||
lines.push(' TEXT-only 质量控制:');
|
||
lines.push(` 候选: ${textOnlyStats.total || 0} 条`);
|
||
lines.push(` 通过软校验 (sim>=${CONFIG.TEXT_SOFT_MIN_SIM}): ${textOnlyStats.passedSoftCheck || 0} 条`);
|
||
lines.push(` 语义过滤: ${textOnlyStats.filtered || 0} 条`);
|
||
lines.push(` 限额截断 (max=${CONFIG.TEXT_TOTAL_MAX}): ${textOnlyStats.truncatedByLimit || 0} 条`);
|
||
lines.push(` 最终入选: ${textOnlyStats.finalIncluded || 0} 条`);
|
||
|
||
const textOnlyEvents = eventResults.filter(e => e._recallReason === 'TEXT');
|
||
if (textOnlyEvents.length > 0) {
|
||
lines.push('');
|
||
lines.push(' TEXT-only 入选事件:');
|
||
textOnlyEvents.forEach((e, i) => {
|
||
const ev = e.event || {};
|
||
const id = ev.id || '?';
|
||
const title = (ev.title || '').slice(0, 25) || '(无标题)';
|
||
const sim = (e._rawSim || 0).toFixed(2);
|
||
const tRank = e._rrfDetail?.tRank ?? '?';
|
||
lines.push(` ${i + 1}. [${id}] ${title.padEnd(25)} sim=${sim} tRank=${tRank}`);
|
||
});
|
||
}
|
||
const entityBoostedEvents = eventResults.filter(e => e._entityBonus > 0).length;
|
||
lines.push('');
|
||
lines.push(` 实体加分事件: ${entityBoostedEvents} 条`);
|
||
|
||
if (textGapInfo) {
|
||
lines.push('');
|
||
lines.push(' 文本检索 (BM25 动态 top-K):');
|
||
lines.push(` 命中: ${textGapInfo.total} 条 | 返回: ${textGapInfo.returned} 条 (覆盖 ${textGapInfo.coverage} 总分)`);
|
||
if (textGapInfo.scoreRange) {
|
||
const s = textGapInfo.scoreRange;
|
||
lines.push(` 分数: Top=${s.top} | 截断=${s.cutoff} | P50=${s.p50} | Last=${s.last}`);
|
||
}
|
||
}
|
||
|
||
// Causal
|
||
if (causalEvents.length) {
|
||
const maxRefs = Math.max(...causalEvents.map(c => c.chainFrom?.length || 0));
|
||
const maxDepth = Math.max(...causalEvents.map(c => c.depth || 0));
|
||
lines.push('');
|
||
lines.push(' 因果链追溯:');
|
||
lines.push(` 追溯: ${causalEvents.length} 条 | 最大被引: ${maxRefs} 次 | 最大深度: ${maxDepth}`);
|
||
}
|
||
|
||
lines.push('');
|
||
return lines.join('\n');
|
||
}
|
||
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
// 主入口
|
||
// ═══════════════════════════════════════════════════════════════════════════
|
||
|
||
export async function recallMemory(queryText, allEvents, vectorConfig, options = {}) {
|
||
const T0 = performance.now();
|
||
const { chat } = getContext();
|
||
const store = getSummaryStore();
|
||
const lastSummarizedFloor = store?.lastSummarizedMesId ?? -1;
|
||
const { pendingUserMessage = null } = options;
|
||
|
||
if (!allEvents?.length) {
|
||
return { events: [], chunks: [], elapsed: 0, logText: 'No events.' };
|
||
}
|
||
|
||
const segments = buildQuerySegments(chat, CONFIG.QUERY_MSG_COUNT, !!options.excludeLastAi, pendingUserMessage);
|
||
|
||
let queryVector, weights;
|
||
try {
|
||
const result = await embedWeightedQuery(segments, vectorConfig);
|
||
queryVector = result?.vector;
|
||
weights = result?.weights;
|
||
} catch (e) {
|
||
xbLog.error(MODULE_ID, '查询向量生成失败', e);
|
||
return { events: [], chunks: [], elapsed: Math.round(performance.now() - T0), logText: 'Query embedding failed.' };
|
||
}
|
||
|
||
if (!queryVector?.length) {
|
||
return { events: [], chunks: [], elapsed: Math.round(performance.now() - T0), logText: 'Empty query vector.' };
|
||
}
|
||
|
||
const lexicon = buildEntityLexicon(store, allEvents);
|
||
const queryEntityWeights = extractEntitiesWithWeights(segments, weights, lexicon);
|
||
const queryEntities = Array.from(queryEntityWeights.keys());
|
||
const facts = getFacts(store);
|
||
const expandedTerms = expandByFacts(queryEntities, facts, 2);
|
||
const normalizedEntityWeights = normalizeEntityWeights(queryEntityWeights);
|
||
|
||
let queryTextForSearch = '';
|
||
let textQueryBreakdown = null;
|
||
try {
|
||
const result = await buildTextSearchQuery(segments, queryEntities, facts, expandedTerms);
|
||
queryTextForSearch = result.query;
|
||
textQueryBreakdown = result.breakdown;
|
||
} catch (e) {
|
||
xbLog.warn(MODULE_ID, '文本路 Query 构建失败,降级到简单拼接', e);
|
||
const lastSeg = segments[segments.length - 1] || '';
|
||
queryTextForSearch = [lastSeg, ...queryEntities, ...expandedTerms].join(' ');
|
||
}
|
||
|
||
// L0 召回
|
||
let l0Results = [];
|
||
let l0FloorBonus = new Map();
|
||
let l0VirtualChunks = [];
|
||
|
||
try {
|
||
l0Results = await searchStateAtoms(queryVector, vectorConfig);
|
||
l0FloorBonus = buildL0FloorBonus(l0Results, CONFIG.L0_FLOOR_BONUS_FACTOR);
|
||
l0VirtualChunks = stateToVirtualChunks(l0Results);
|
||
} catch (e) {
|
||
xbLog.warn(MODULE_ID, 'L0 召回失败,降级处理', e);
|
||
}
|
||
|
||
const [chunkResults, eventResults] = await Promise.all([
|
||
searchChunks(queryVector, vectorConfig, l0FloorBonus, lastSummarizedFloor),
|
||
searchEvents(queryVector, queryTextForSearch, allEvents, vectorConfig, store, normalizedEntityWeights, l0FloorBonus),
|
||
]);
|
||
|
||
const chunkPreFilterStats = chunkResults._preFilterStats || null;
|
||
const textGapInfo = eventResults[0]?._textGapInfo || null;
|
||
|
||
const mergedChunks = mergeAndSparsify(l0VirtualChunks, chunkResults, CONFIG.FLOOR_MAX_CHUNKS);
|
||
|
||
// 因果链追溯
|
||
const eventIndex = buildEventIndex(allEvents);
|
||
const causalMap = traceCausalAncestors(eventResults, eventIndex);
|
||
|
||
const recalledIdSet = new Set(eventResults.map(x => x?.event?.id).filter(Boolean));
|
||
const causalEvents = Array.from(causalMap.values())
|
||
.filter(x => x?.event?.id && !recalledIdSet.has(x.event.id))
|
||
.map(x => ({
|
||
event: x.event,
|
||
similarity: 0,
|
||
_recallType: 'CAUSAL',
|
||
_recallReason: `因果链(${x.chainFrom.join(',')})`,
|
||
_causalDepth: x.depth,
|
||
_chainFrom: x.chainFrom,
|
||
chainFrom: x.chainFrom,
|
||
depth: x.depth,
|
||
}));
|
||
|
||
sortCausalEvents(causalEvents);
|
||
const causalEventsTruncated = causalEvents.slice(0, CONFIG.CAUSAL_INJECT_MAX);
|
||
|
||
const elapsed = Math.round(performance.now() - T0);
|
||
const logText = formatRecallLog({
|
||
elapsed,
|
||
queryText,
|
||
segments,
|
||
weights,
|
||
chunkResults: mergedChunks,
|
||
eventResults,
|
||
allEvents,
|
||
normalizedEntityWeights,
|
||
causalEvents: causalEventsTruncated,
|
||
chunkPreFilterStats,
|
||
l0Results,
|
||
textGapInfo,
|
||
expandedTerms,
|
||
textQueryBreakdown,
|
||
});
|
||
|
||
console.group('%c[Recall]', 'color: #7c3aed; font-weight: bold');
|
||
console.log(`Elapsed: ${elapsed}ms | L0: ${l0Results.length} | Entities: ${queryEntities.join(', ') || '(none)'}`);
|
||
console.log(`L1: ${mergedChunks.length} | L2: ${eventResults.length}/${allEvents.length} | Causal: ${causalEventsTruncated.length}`);
|
||
console.groupEnd();
|
||
|
||
return { events: eventResults, causalEvents: causalEventsTruncated, chunks: mergedChunks, elapsed, logText, queryEntities, l0Results };
|
||
}
|
||
|
||
export function buildQueryText(chat, count = 2, excludeLastAi = false) {
|
||
if (!chat?.length) return '';
|
||
|
||
let messages = chat;
|
||
if (excludeLastAi && messages.length > 0 && !messages[messages.length - 1]?.is_user) {
|
||
messages = messages.slice(0, -1);
|
||
}
|
||
|
||
return messages.slice(-count).map(m => {
|
||
const text = cleanForRecall(m.mes);
|
||
const speaker = m.name || (m.is_user ? '用户' : '角色');
|
||
return `${speaker}: ${text.slice(0, 500)}`;
|
||
}).filter(Boolean).join('\n');
|
||
}
|