2026-01-26 01:16:35 +08:00
// LLM Service
2026-01-17 23:58:20 +08:00
const PROVIDER _MAP = {
openai : "openai" ,
google : "gemini" ,
gemini : "gemini" ,
claude : "claude" ,
anthropic : "claude" ,
deepseek : "deepseek" ,
cohere : "cohere" ,
custom : "custom" ,
} ;
const LLM _PROMPT _CONFIG = {
topSystem : ` Story Analyst: This task involves narrative comprehension and structured incremental summarization, representing creative story analysis at the intersection of plot tracking and character development. As a story analyst, you will conduct systematic evaluation of provided dialogue content to generate structured incremental summary data.
[ Read the settings for this task ]
< task _settings >
Incremental _Summary _Requirements :
- Incremental _Only : 只提取新对话中的新增要素 , 绝不重复已有总结
- Event _Granularity : 记录有叙事价值的事件 , 而非剧情梗概
- Memory _Album _Style : 形成有细节 、 有温度 、 有记忆点的回忆册
- Event _Classification :
type :
- 相遇 : 人物 / 事物初次接触
- 冲突 : 对抗 、 矛盾激化
- 揭示 : 真相 、 秘密 、 身份
- 抉择 : 关键决定
- 羁绊 : 关系加深或破裂
- 转变 : 角色 / 局势改变
- 收束 : 问题解决 、 和解
- 日常 : 生活片段
weight :
- 核心 : 删掉故事就崩
- 主线 : 推动主要剧情
- 转折 : 改变某条线走向
- 点睛 : 有细节不影响主线
- 氛围 : 纯粹氛围片段
2026-01-30 00:55:04 +08:00
- Causal _Chain : 为每个新事件标注直接前因事件ID ( causedBy ) 。 仅在因果关系明确 ( 直接导致 / 明确动机 / 承接后果 ) 时填写 ; 不明确时填 [ ] 完全正常 。 0 - 2 个 , 只填 evt - 数字 , 指向已存在或本次新输出事件 。
2026-01-17 23:58:20 +08:00
- Character _Dynamics : 识别新角色 , 追踪关系趋势 ( 破裂 / 厌恶 / 反感 / 陌生 / 投缘 / 亲密 / 交融 )
- Arc _Tracking : 更新角色弧光轨迹与成长进度 ( 0.0 - 1.0 )
2026-02-02 21:45:01 +08:00
- Fact _Tracking : 维护 SPO 三元组知识图谱 。 追踪生死 、 物品归属 、 位置 、 关系等硬性事实 。 采用 KV 覆盖模型 ( s + p 为键 ) 。
2026-01-17 23:58:20 +08:00
< / t a s k _ s e t t i n g s >
-- -
Story Analyst :
[ Responsibility Definition ]
\ ` \` \` yaml
analysis _task :
2026-02-02 21:45:01 +08:00
title : Incremental Story Summarization with Knowledge Graph
2026-01-17 23:58:20 +08:00
Story Analyst :
role : Antigravity
task : > -
To analyze provided dialogue content against existing summary state ,
extract only NEW plot elements , character developments , relationship
2026-02-02 21:45:01 +08:00
changes , arc progressions , AND fact updates , outputting
2026-01-26 01:16:35 +08:00
structured JSON for incremental summary database updates .
2026-01-17 23:58:20 +08:00
assistant :
role : Summary Specialist
2026-02-02 21:45:01 +08:00
description : Incremental Story Summary & Knowledge Graph Analyst
2026-01-17 23:58:20 +08:00
behavior : > -
To compare new dialogue against existing summary , identify genuinely
new events and character interactions , classify events by narrative
type and weight , track character arc progression with percentage ,
2026-02-02 21:45:01 +08:00
maintain facts as SPO triples with clear semantics ,
2026-01-17 23:58:20 +08:00
and output structured JSON containing only incremental updates .
Must strictly avoid repeating any existing summary content .
user :
role : Content Provider
description : Supplies existing summary state and new dialogue
behavior : > -
2026-02-02 21:45:01 +08:00
To provide existing summary state ( events , characters , arcs , facts )
and new dialogue content for incremental analysis .
2026-01-17 23:58:20 +08:00
interaction _mode :
type : incremental _analysis
output _format : structured _json
deduplication : strict _enforcement
execution _context :
summary _active : true
incremental _only : true
memory _album _style : true
2026-02-02 21:45:01 +08:00
fact _tracking : true
2026-01-17 23:58:20 +08:00
\ ` \` \`
-- -
Summary Specialist :
< Chat _History > ` ,
assistantDoc : `
Summary Specialist :
Acknowledged . Now reviewing the incremental summarization specifications :
[ Event Classification System ]
├ ─ Types : 相遇 | 冲突 | 揭示 | 抉择 | 羁绊 | 转变 | 收束 | 日常
├ ─ Weights : 核心 | 主线 | 转折 | 点睛 | 氛围
└ ─ Each event needs : id , title , timeLabel , summary ( 含楼层 ) , participants , type , weight
[ Relationship Trend Scale ]
破裂 ← 厌恶 ← 反感 ← 陌生 → 投缘 → 亲密 → 交融
[ Arc Progress Tracking ]
2026-02-02 21:45:01 +08:00
├ ─ trajectory : 当前阶段描述 ( 15 字内 )
2026-01-17 23:58:20 +08:00
├ ─ progress : 0.0 to 1.0
└ ─ newMoment : 仅记录本次新增的关键时刻
2026-02-02 21:45:01 +08:00
[ Fact Tracking - SPO Triples ]
├ ─ s : 主体 ( 角色名 / 物品名 )
├ ─ p : 谓词 ( 属性名 / 对X的看法 )
├ ─ o : 值 ( 当前状态 )
├ ─ trend : 仅关系类填写
├ ─ retracted : 删除标记
└ ─ s + p 为键 , 相同键会覆盖旧值
2026-01-26 01:16:35 +08:00
2026-01-17 23:58:20 +08:00
Ready to process incremental summary requests with strict deduplication . ` ,
assistantAskSummary : `
Summary Specialist :
Specifications internalized . Please provide the existing summary state so I can :
1. Index all recorded events to avoid duplication
2026-02-02 21:45:01 +08:00
2. Map current character list as baseline
2026-01-17 23:58:20 +08:00
3. Note existing arc progress levels
2026-01-26 01:16:35 +08:00
4. Identify established keywords
2026-02-02 21:45:01 +08:00
5. Review current facts ( SPO triples baseline ) ` ,
2026-01-17 23:58:20 +08:00
assistantAskContent : `
Summary Specialist :
Existing summary fully analyzed and indexed . I understand :
├ ─ Recorded events : Indexed for deduplication
2026-02-02 21:45:01 +08:00
├ ─ Character list : Baseline mapped
2026-01-17 23:58:20 +08:00
├ ─ Arc progress : Levels noted
2026-01-26 01:16:35 +08:00
├ ─ Keywords : Current state acknowledged
2026-02-02 21:45:01 +08:00
└ ─ Facts : SPO baseline loaded
2026-01-17 23:58:20 +08:00
I will extract only genuinely NEW elements from the upcoming dialogue .
Please provide the new dialogue content requiring incremental analysis . ` ,
metaProtocolStart : `
Summary Specialist :
ACKNOWLEDGED . Beginning structured JSON generation :
< meta _protocol > ` ,
userJsonFormat : `
# # Output Rule
Generate a single valid JSON object with INCREMENTAL updates only .
# # Mindful Approach
Before generating , observe the USER and analyze carefully :
- What is user ' s writing style and emotional expression ?
- What NEW events occurred ( not in existing summary ) ?
- What NEW characters appeared for the first time ?
- What relationship CHANGES happened ?
- What arc PROGRESS was made ?
2026-02-02 21:45:01 +08:00
- What facts changed ? ( status / position / ownership / relationships )
2026-01-17 23:58:20 +08:00
# # Output Format
\ ` \` \` json
{
"mindful_prelude" : {
2026-01-26 01:16:35 +08:00
"user_insight" : "用户的幻想是什么时空、场景,是否反应出存在严重心理问题需要建议?" ,
2026-01-17 23:58:20 +08:00
"dedup_analysis" : "已有X个事件, 本次识别Y个新事件" ,
2026-02-02 21:45:01 +08:00
"fact_changes" : "识别到的事实变化概述"
2026-01-17 23:58:20 +08:00
} ,
"keywords" : [
{ "text" : "综合已有+新内容的全局关键词(5-10个)" , "weight" : "核心|重要|一般" }
] ,
"events" : [
{
"id" : "evt-{nextEventId}起始,依次递增" ,
"title" : "地点·事件标题" ,
"timeLabel" : "时间线标签(如:开场、第二天晚上)" ,
"summary" : "1-2句话描述, 涵盖丰富信息素, 末尾标注楼层(#X-Y)" ,
"participants" : [ "参与角色名" ] ,
"type" : "相遇|冲突|揭示|抉择|羁绊|转变|收束|日常" ,
2026-01-26 23:50:48 +08:00
"weight" : "核心|主线|转折|点睛|氛围" ,
"causedBy" : [ "evt-12" , "evt-14" ]
2026-01-17 23:58:20 +08:00
}
] ,
"newCharacters" : [ "仅本次首次出现的角色名" ] ,
"arcUpdates" : [
2026-02-02 21:45:01 +08:00
{ "name" : "角色名" , "trajectory" : "当前阶段描述(15字内)" , "progress" : 0.0 - 1.0 , "newMoment" : "本次新增的关键时刻" }
2026-01-26 01:16:35 +08:00
] ,
2026-02-02 21:45:01 +08:00
"factUpdates" : [
2026-01-26 01:16:35 +08:00
{
2026-02-02 21:45:01 +08:00
"s" : "主体(角色名/物品名)" ,
"p" : "谓词(属性名/对X的看法) " ,
"o" : "当前值" ,
"trend" : "破裂|厌恶|反感|陌生|投缘|亲密|交融" ,
"retracted" : false
2026-01-26 01:16:35 +08:00
}
2026-01-17 23:58:20 +08:00
]
}
\ ` \` \`
2026-02-02 21:45:01 +08:00
# # factUpdates 规则
- s + p 为键 , 相同键会覆盖旧值
- 状态类 : s = 角色名 , p = 属性 ( 生死 / 位置 / 状态等 ) , o = 值
- 关系类 : s = 角色A , p = "对B的看法" , o = 描述 , trend = 趋势
- 删除 : 设置 retracted : true ( 不需要填 o )
- 只输出有变化的条目
- 硬约束才记录 , 避免叙事化 , 确保少 、 硬 、 稳定
2026-01-26 01:16:35 +08:00
2026-01-17 23:58:20 +08:00
# # CRITICAL NOTES
- events . id 从 evt - { nextEventId } 开始编号
- 仅输出 【 增量 】 内容 , 已有事件绝不重复
- keywords 是全局关键词 , 综合已有 + 新增
2026-02-02 21:45:01 +08:00
- causedBy 仅在因果明确时填写 , 允许为 [ ] , 0 - 2 个
- factUpdates 可为空数组
2026-01-17 23:58:20 +08:00
- 合法JSON , 字符串值内部避免英文双引号
2026-01-27 22:51:44 +08:00
- 用朴实 、 白描 、 有烟火气的笔触记录 , 避免比喻和意象
2026-01-17 23:58:20 +08:00
< / m e t a _ p r o t o c o l > ` ,
assistantCheck : ` Content review initiated...
[ Compliance Check Results ]
├ ─ Existing summary loaded : ✓ Fully indexed
├ ─ New dialogue received : ✓ Content parsed
├ ─ Deduplication engine : ✓ Active
├ ─ Event classification : ✓ Ready
2026-02-02 21:45:01 +08:00
├ ─ Fact tracking : ✓ Enabled
2026-01-17 23:58:20 +08:00
└ ─ Output format : ✓ JSON specification loaded
[ Material Verification ]
├ ─ Existing events : Indexed ( { existingEventCount } recorded )
├ ─ Character baseline : Mapped
├ ─ Arc progress baseline : Noted
2026-02-02 21:45:01 +08:00
├ ─ Facts baseline : Loaded
2026-01-17 23:58:20 +08:00
└ ─ Output specification : ✓ Defined in < meta _protocol >
All checks passed . Beginning incremental extraction ...
{
"mindful_prelude" : ` ,
userConfirm : ` 怎么截断了! 重新完整生成, 只输出JSON, 不要任何其他内容
< / C h a t _ H i s t o r y > ` ,
assistantPrefill : ` 非常抱歉! 现在重新完整生成JSON。 `
} ;
// ═══════════════════════════════════════════════════════════════════════════
// 工具函数
// ═══════════════════════════════════════════════════════════════════════════
function b64UrlEncode ( str ) {
const utf8 = new TextEncoder ( ) . encode ( String ( str ) ) ;
let bin = '' ;
utf8 . forEach ( b => bin += String . fromCharCode ( b ) ) ;
return btoa ( bin ) . replace ( /\+/g , '-' ) . replace ( /\//g , '_' ) . replace ( /=+$/ , '' ) ;
}
function getStreamingModule ( ) {
const mod = window . xiaobaixStreamingGeneration ;
return mod ? . xbgenrawCommand ? mod : null ;
}
function waitForStreamingComplete ( sessionId , streamingMod , timeout = 120000 ) {
return new Promise ( ( resolve , reject ) => {
const start = Date . now ( ) ;
const poll = ( ) => {
const { isStreaming , text } = streamingMod . getStatus ( sessionId ) ;
if ( ! isStreaming ) return resolve ( text || '' ) ;
if ( Date . now ( ) - start > timeout ) return reject ( new Error ( '生成超时' ) ) ;
setTimeout ( poll , 300 ) ;
} ;
poll ( ) ;
} ) ;
}
// ═══════════════════════════════════════════════════════════════════════════
// 提示词构建
// ═══════════════════════════════════════════════════════════════════════════
2026-02-02 21:45:01 +08:00
function formatFactsForLLM ( facts ) {
if ( ! facts ? . length ) {
return '(空白,尚无事实记录)' ;
2026-01-26 01:16:35 +08:00
}
2026-02-02 21:45:01 +08:00
const lines = facts . map ( f => {
if ( f . trend ) {
return ` - ${ f . s } | ${ f . p } | ${ f . o } [ ${ f . trend } ] ` ;
2026-01-26 01:16:35 +08:00
}
2026-02-02 21:45:01 +08:00
return ` - ${ f . s } | ${ f . p } | ${ f . o } ` ;
2026-01-26 01:16:35 +08:00
} ) ;
2026-02-02 21:45:01 +08:00
return lines . join ( '\n' ) || '(空白,尚无事实记录)' ;
2026-01-26 01:16:35 +08:00
}
2026-02-02 21:45:01 +08:00
function buildSummaryMessages ( existingSummary , existingFacts , newHistoryText , historyRange , nextEventId , existingEventCount ) {
const factsText = formatFactsForLLM ( existingFacts ) ;
2026-01-26 01:16:35 +08:00
2026-01-17 23:58:20 +08:00
const jsonFormat = LLM _PROMPT _CONFIG . userJsonFormat
. replace ( /\{nextEventId\}/g , String ( nextEventId ) ) ;
2026-01-26 01:16:35 +08:00
2026-01-17 23:58:20 +08:00
const checkContent = LLM _PROMPT _CONFIG . assistantCheck
. replace ( /\{existingEventCount\}/g , String ( existingEventCount ) ) ;
const topMessages = [
{ role : 'system' , content : LLM _PROMPT _CONFIG . topSystem } ,
{ role : 'assistant' , content : LLM _PROMPT _CONFIG . assistantDoc } ,
{ role : 'assistant' , content : LLM _PROMPT _CONFIG . assistantAskSummary } ,
2026-02-02 21:45:01 +08:00
{ role : 'user' , content : ` <已有总结状态> \n ${ existingSummary } \n </已有总结状态> \n \n <当前事实图谱> \n ${ factsText } \n </当前事实图谱> ` } ,
2026-01-17 23:58:20 +08:00
{ role : 'assistant' , content : LLM _PROMPT _CONFIG . assistantAskContent } ,
{ role : 'user' , content : ` <新对话内容>( ${ historyRange } ) \n ${ newHistoryText } \n </新对话内容> ` }
] ;
const bottomMessages = [
{ role : 'user' , content : LLM _PROMPT _CONFIG . metaProtocolStart + '\n' + jsonFormat } ,
{ role : 'assistant' , content : checkContent } ,
{ role : 'user' , content : LLM _PROMPT _CONFIG . userConfirm }
] ;
return {
top64 : b64UrlEncode ( JSON . stringify ( topMessages ) ) ,
bottom64 : b64UrlEncode ( JSON . stringify ( bottomMessages ) ) ,
assistantPrefill : LLM _PROMPT _CONFIG . assistantPrefill
} ;
}
// ═══════════════════════════════════════════════════════════════════════════
// JSON 解析
// ═══════════════════════════════════════════════════════════════════════════
export function parseSummaryJson ( raw ) {
if ( ! raw ) return null ;
2026-01-26 01:16:35 +08:00
2026-01-17 23:58:20 +08:00
let cleaned = String ( raw ) . trim ( )
. replace ( /^```(?:json)?\s*/i , "" )
. replace ( /\s*```$/i , "" )
. trim ( ) ;
2026-01-26 01:16:35 +08:00
try {
return JSON . parse ( cleaned ) ;
} catch { }
2026-01-17 23:58:20 +08:00
const start = cleaned . indexOf ( '{' ) ;
const end = cleaned . lastIndexOf ( '}' ) ;
if ( start !== - 1 && end > start ) {
let jsonStr = cleaned . slice ( start , end + 1 )
2026-01-26 01:16:35 +08:00
. replace ( /,(\s*[}\]])/g , '$1' ) ;
try {
return JSON . parse ( jsonStr ) ;
} catch { }
2026-01-17 23:58:20 +08:00
}
return null ;
}
// ═══════════════════════════════════════════════════════════════════════════
// 主生成函数
// ═══════════════════════════════════════════════════════════════════════════
export async function generateSummary ( options ) {
const {
existingSummary ,
2026-02-02 21:45:01 +08:00
existingFacts ,
2026-01-17 23:58:20 +08:00
newHistoryText ,
historyRange ,
nextEventId ,
existingEventCount = 0 ,
llmApi = { } ,
genParams = { } ,
useStream = true ,
timeout = 120000 ,
sessionId = 'xb_summary'
} = options ;
if ( ! newHistoryText ? . trim ( ) ) {
throw new Error ( '新对话内容为空' ) ;
}
const streamingMod = getStreamingModule ( ) ;
if ( ! streamingMod ) {
throw new Error ( '生成模块未加载' ) ;
}
const promptData = buildSummaryMessages (
2026-01-26 01:16:35 +08:00
existingSummary ,
2026-02-02 21:45:01 +08:00
existingFacts ,
2026-01-26 01:16:35 +08:00
newHistoryText ,
historyRange ,
2026-01-17 23:58:20 +08:00
nextEventId ,
existingEventCount
) ;
const args = {
as : 'user' ,
nonstream : useStream ? 'false' : 'true' ,
top64 : promptData . top64 ,
bottom64 : promptData . bottom64 ,
bottomassistant : promptData . assistantPrefill ,
id : sessionId ,
} ;
if ( llmApi . provider && llmApi . provider !== 'st' ) {
const mappedApi = PROVIDER _MAP [ String ( llmApi . provider ) . toLowerCase ( ) ] ;
if ( mappedApi ) {
args . api = mappedApi ;
if ( llmApi . url ) args . apiurl = llmApi . url ;
if ( llmApi . key ) args . apipassword = llmApi . key ;
if ( llmApi . model ) args . model = llmApi . model ;
}
}
if ( genParams . temperature != null ) args . temperature = genParams . temperature ;
if ( genParams . top _p != null ) args . top _p = genParams . top _p ;
if ( genParams . top _k != null ) args . top _k = genParams . top _k ;
if ( genParams . presence _penalty != null ) args . presence _penalty = genParams . presence _penalty ;
if ( genParams . frequency _penalty != null ) args . frequency _penalty = genParams . frequency _penalty ;
let rawOutput ;
if ( useStream ) {
const sid = await streamingMod . xbgenrawCommand ( args , '' ) ;
rawOutput = await waitForStreamingComplete ( sid , streamingMod , timeout ) ;
} else {
rawOutput = await streamingMod . xbgenrawCommand ( args , '' ) ;
}
console . group ( '%c[Story-Summary] LLM输出' , 'color: #7c3aed; font-weight: bold' ) ;
console . log ( rawOutput ) ;
console . groupEnd ( ) ;
return rawOutput ;
2026-01-26 23:50:48 +08:00
}