Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions examples/openclaw-plugin/client.ts
Original file line number Diff line number Diff line change
Expand Up @@ -92,8 +92,13 @@ export type PreArchiveAbstract = {
abstract: string;
};

export type WorkingMemoryResult = {
markdown: string;
};

export type SessionContextResult = {
latest_archive_overview: string;
working_memory?: WorkingMemoryResult;
pre_archive_abstracts: PreArchiveAbstract[];
messages: OVMessage[];
estimatedTokens: number;
Expand Down
24 changes: 19 additions & 5 deletions examples/openclaw-plugin/context-engine.ts
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import { createHash } from "node:crypto";
import { DEFAULT_PHASE2_POLL_TIMEOUT_MS } from "./client.js";
import type { OpenVikingClient, OVMessage } from "./client.js";
import type { OpenVikingClient, OVMessage, SessionContextResult } from "./client.js";
import type { MemoryOpenVikingConfig } from "./config.js";
import {
compileSessionPatterns,
Expand Down Expand Up @@ -213,6 +213,20 @@ function validTokenBudget(raw: unknown): number | undefined {
return undefined;
}

function preferredWorkingMemoryMarkdown(
ctx: Pick<SessionContextResult, "latest_archive_overview" | "working_memory"> | undefined,
): string {
const markdown = ctx?.working_memory?.markdown;
if (typeof markdown === "string" && markdown.trim()) {
return markdown.trim();
}
const legacyOverview = ctx?.latest_archive_overview;
if (typeof legacyOverview === "string" && legacyOverview.trim()) {
return legacyOverview.trim();
}
return "";
}

/** OpenClaw session UUID (path-safe on Windows). */
const OPENVIKING_OV_SESSION_UUID =
/^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$/i;
Expand Down Expand Up @@ -820,6 +834,7 @@ export function createMemoryOpenVikingContextEngine(params: {
const routingRef = assembleParams.sessionId ?? sessionKey ?? OVSessionId;
const agentId = resolveAgentId(routingRef, sessionKey, OVSessionId);
const ctx = await client.getSessionContext(OVSessionId, tokenBudget, agentId);
const workingMemoryMarkdown = preferredWorkingMemoryMarkdown(ctx);

const preAbstracts = ctx?.pre_archive_abstracts ?? [];
const hasArchives = !!ctx?.latest_archive_overview || preAbstracts.length > 0;
Expand All @@ -837,7 +852,7 @@ export function createMemoryOpenVikingContextEngine(params: {
}

const { sanitized, archive, session, budgets, instruction } = buildAssembledContext(
ctx.latest_archive_overview,
workingMemoryMarkdown,
preAbstracts,
ctx.messages,
tokenBudget,
Expand Down Expand Up @@ -1218,14 +1233,13 @@ export function createMemoryOpenVikingContextEngine(params: {
let ctx: Awaited<ReturnType<typeof client.getSessionContext>> | undefined;
try {
ctx = await client.getSessionContext(OVSessionId, tokenBudget, agentId);
const workingMemoryMarkdown = preferredWorkingMemoryMarkdown(ctx);
// 打印完整的 getSessionContext 结果
logger.info(
`openviking: compact getSessionContext raw result for ${OVSessionId}: ` +
JSON.stringify(ctx, null, 2),
);
if (typeof ctx.latest_archive_overview === "string") {
summary = ctx.latest_archive_overview.trim();
}
summary = workingMemoryMarkdown;
if (
typeof ctx.estimatedTokens === "number" &&
Number.isFinite(ctx.estimatedTokens)
Expand Down
34 changes: 34 additions & 0 deletions examples/openclaw-plugin/tests/ut/context-engine-assemble.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -161,6 +161,40 @@ describe("context-engine assemble()", () => {
});
});

it("prefers working_memory.markdown over latest_archive_overview when both are present", async () => {
const { engine } = makeEngine({
latest_archive_overview: "# Session Summary\nLegacy overview",
working_memory: {
markdown: "# Session Summary\nPreferred working memory\n\n## Current Conversation Tail\n### User\nRecent tail",
},
pre_archive_abstracts: [],
messages: [],
estimatedTokens: 64,
stats: {
...makeStats(),
totalArchives: 1,
includedArchives: 1,
archiveTokens: 64,
},
});

const result = await engine.assemble({
sessionId: "session-working-memory",
messages: [],
tokenBudget: 4096,
});

expect(result.messages[0]).toEqual({
role: "user",
content:
"[Session History Summary]\n# Session Summary\nPreferred working memory\n\n## Current Conversation Tail\n### User\nRecent tail",
});
expect(result.messages[0]).not.toEqual({
role: "user",
content: "[Session History Summary]\n# Session Summary\nLegacy overview",
});
});

it("passes through live messages when the session matches bypassSessionPatterns", async () => {
const { engine, client, getClient } = makeEngine(
{
Expand Down
43 changes: 42 additions & 1 deletion examples/openclaw-plugin/tests/ut/context-engine-compact.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,13 @@ function makeLogger() {
};
}

function makeEngine(commitResult: unknown, opts?: { throwError?: Error }) {
function makeEngine(
commitResult: unknown,
opts?: {
throwError?: Error;
contextResult?: Record<string, unknown>;
},
) {
const cfg = memoryOpenVikingConfigSchema.parse({
mode: "remote",
baseUrl: "http://127.0.0.1:1933",
Expand All @@ -31,10 +37,12 @@ function makeEngine(commitResult: unknown, opts?: { throwError?: Error }) {
getSessionContext: vi.fn().mockResolvedValue({
latest_archive_overview: "",
latest_archive_id: "",
working_memory: undefined,
pre_archive_abstracts: [],
messages: [],
estimatedTokens: 0,
stats: { totalArchives: 0, includedArchives: 0, droppedArchives: 0, failedArchives: 0, activeTokens: 0, archiveTokens: 0 },
...opts?.contextResult,
}),
} as unknown as OpenVikingClient;

Expand Down Expand Up @@ -215,6 +223,39 @@ describe("context-engine compact()", () => {
expect(result.reason).toBe("commit_completed");
});

it("uses working_memory.markdown as the restored compact summary when present", async () => {
const { engine, logger } = makeEngine({
status: "completed",
archived: true,
task_id: "task-wm",
memories_extracted: {},
}, {
contextResult: {
latest_archive_overview: "# Session Summary\nLegacy overview",
working_memory: {
markdown: "# Session Summary\nPreferred working memory",
},
},
});

const contextCalls = logger.info.mock.calls.length;
await engine.compact({
sessionId: "s-working-memory",
sessionFile: "",
});

expect(logger.info.mock.calls.slice(contextCalls)).toEqual(
expect.arrayContaining([
[
expect.stringContaining('"working_memory": {\n "markdown": "# Session Summary\\nPreferred working memory"'),
],
[
expect.stringContaining("latestArchiveOverview=present"),
],
]),
);
});

it("returns compacted=false when commit succeeds with archived=false", async () => {
const { engine } = makeEngine({
status: "completed",
Expand Down
83 changes: 79 additions & 4 deletions openviking/session/session.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
from uuid import uuid4

from openviking.core.namespace import canonical_session_uri
from openviking.message import Message, Part
from openviking.message import ContextPart, Message, Part, TextPart, ToolPart
from openviking.server.identity import RequestContext, Role
from openviking.telemetry import get_current_telemetry, tracer
from openviking.utils.time_utils import get_current_timestamp
Expand All @@ -30,6 +30,9 @@
logger = get_logger(__name__)

_ARCHIVE_WAIT_POLL_SECONDS = 0.1
_WORKING_MEMORY_TAIL_MAX_MESSAGES = 4
_WORKING_MEMORY_TEXT_MAX_CHARS = 800
_WORKING_MEMORY_TOOL_IO_MAX_CHARS = 400


@dataclass
Expand Down Expand Up @@ -790,6 +793,7 @@ async def get_session_context(self, token_budget: int = 128_000) -> Dict[str, An
include_latest_overview = bool(
latest_archive and latest_archive["overview_tokens"] <= remaining_budget
)
latest_archive_overview = latest_archive["overview"] if include_latest_overview else ""
latest_archive_tokens = latest_archive["overview_tokens"] if include_latest_overview else 0
if include_latest_overview:
remaining_budget -= latest_archive_tokens
Expand All @@ -803,11 +807,16 @@ async def get_session_context(self, token_budget: int = 128_000) -> Dict[str, An
dropped_archives = max(
0, context["total_archives"] - context["failed_archives"] - included_archives
)
working_memory_markdown = self._build_working_memory_markdown(
latest_archive_overview=latest_archive_overview,
merged_messages=merged_messages,
)

return {
"latest_archive_overview": (
latest_archive["overview"] if include_latest_overview else ""
),
"latest_archive_overview": latest_archive_overview,
"working_memory": {
"markdown": working_memory_markdown,
},
"pre_archive_abstracts": [], # 保持 API 向后兼容,返回空数组
"messages": [m.to_dict() for m in merged_messages],
"estimatedTokens": message_tokens + archive_tokens,
Expand All @@ -821,6 +830,72 @@ async def get_session_context(self, token_budget: int = 128_000) -> Dict[str, An
},
}

def _build_working_memory_markdown(
self,
latest_archive_overview: str,
merged_messages: List[Message],
) -> str:
"""Build a compact working-memory markdown block for downstream clients."""
overview = (latest_archive_overview or "").strip()
if not overview:
return ""

sections = [overview]
tail_messages = merged_messages[-_WORKING_MEMORY_TAIL_MAX_MESSAGES:]
if tail_messages:
sections.append("## Current Conversation Tail")
sections.extend(self._format_working_memory_message(message) for message in tail_messages)
return "\n\n".join(section for section in sections if section).strip()

def _format_working_memory_message(self, message: Message) -> str:
"""Render one message into markdown for the working-memory tail."""
role_label = "User" if message.role == "user" else "Assistant"
blocks: List[str] = []
for part in message.parts:
if isinstance(part, TextPart):
text = self._truncate_working_memory_text(part.text, _WORKING_MEMORY_TEXT_MAX_CHARS)
if text:
blocks.append(text)
elif isinstance(part, ContextPart):
abstract = self._truncate_working_memory_text(
part.abstract, _WORKING_MEMORY_TEXT_MAX_CHARS
)
if abstract:
blocks.append(f"[Context: {part.context_type}] {abstract}")
elif isinstance(part, ToolPart):
tool_lines = [
f"[Tool: {part.tool_name or 'unknown'}] ({part.tool_status or 'unknown'})"
]
if part.tool_input:
tool_lines.append(
"Input: "
+ self._truncate_working_memory_text(
json.dumps(part.tool_input, ensure_ascii=False),
_WORKING_MEMORY_TOOL_IO_MAX_CHARS,
)
)
if part.tool_output:
tool_lines.append(
"Output: "
+ self._truncate_working_memory_text(
part.tool_output, _WORKING_MEMORY_TOOL_IO_MAX_CHARS
)
)
blocks.append("\n".join(tool_lines))

if not blocks:
blocks.append("(empty)")

return f"### {role_label}\n" + "\n\n".join(blocks)

@staticmethod
def _truncate_working_memory_text(text: str, max_chars: int) -> str:
"""Bound working-memory text blocks to keep the derived field compact."""
normalized = (text or "").strip()
if len(normalized) <= max_chars:
return normalized
return normalized[: max_chars - 3].rstrip() + "..."

async def get_context_for_search(self, query: str, max_messages: int = 20) -> Dict[str, Any]:
"""Get session context for intent analysis."""
del query # Current query no longer affects historical archive selection.
Expand Down
31 changes: 31 additions & 0 deletions tests/session/test_session_context.py
Original file line number Diff line number Diff line change
Expand Up @@ -391,6 +391,37 @@ async def test_get_session_context_counts_active_tool_parts(
assert context["stats"]["activeTokens"] == session.messages[0].estimated_tokens
assert context["stats"]["activeTokens"] > _estimate_tokens("Executing tool...")

async def test_get_session_context_builds_working_memory_from_latest_overview_and_tail(
self, client: AsyncOpenViking, monkeypatch
):
session = client.session(session_id="working_memory_tail_test")
summary = "# Session Summary\n\nArchived project state."

async def fake_generate(_messages, latest_archive_overview=""):
del _messages, latest_archive_overview
return summary

monkeypatch.setattr(session, "_generate_archive_summary_async", fake_generate)

session.add_message("user", [TextPart("archived seed")])
result = await session.commit_async()
await _wait_for_task(result["task_id"])

for text in ("tail one", "tail two", "tail three", "tail four", "tail five"):
session.add_message("user", [TextPart(text)])

token_budget = sum(message.estimated_tokens for message in session.messages) + _estimate_tokens(
summary
)
context = await session.get_session_context(token_budget=token_budget)

markdown = context["working_memory"]["markdown"]
assert markdown.startswith(summary)
assert "## Current Conversation Tail" in markdown
assert "tail one" not in markdown
assert "tail two" in markdown
assert "tail five" in markdown

async def test_get_session_context_reads_latest_overview_and_all_archive_abstracts(
self, client: AsyncOpenViking, monkeypatch
):
Expand Down
Loading