Skip to content

Commit 45f1a30

Browse files
authored
Merge pull request #123 from vectorlessflow/dev
feat: add chain command and intent-based navigation hints
2 parents 4048f75 + 06768af commit 45f1a30

4 files changed

Lines changed: 125 additions & 10 deletions

File tree

vectorless/ask/prompts.py

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -64,6 +64,7 @@ class NavigationParams:
6464
- doc_card Show document-level overview (title, summary, sections)
6565
- concepts List key concepts extracted from the document
6666
- find_section <title> Find a section by exact title (case-insensitive)
67+
- chain <name> Follow reasoning chains from a node (pre-computed links)
6768
- compare <a> <b> Compare two nodes using LLM analysis (use node IDs)
6869
- trace <name> Trace reasoning chain from a node using LLM
6970
- summarize <name> Generate dynamic LLM summary of a node
@@ -202,7 +203,7 @@ def worker_dispatch(params: WorkerDispatchParams) -> tuple[str, str]:
202203
f"cd .., back, cat, cat <name>, head <name>, find <keyword>, findtree <pattern>, grep <regex>, "
203204
f"toc [depth], stats <name>, grep_node <node> <pattern>, similar <name>, overview <name>, "
204205
f"siblings <name>, ancestors <name>, doc_card, concepts, find_section <title>, "
205-
f"compare <a> <b>, trace <name>, summarize <name>, "
206+
f"chain <name>, compare <a> <b>, trace <name>, summarize <name>, "
206207
f"wc <name>, pwd, check, done\n"
207208
f"\n"
208209
f"SEARCH STRATEGY:\n"

vectorless/ask/worker/agent.py

Lines changed: 63 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,13 @@ async def run(self) -> WorkerOutput:
8282
pass
8383
logger.info("Worker starting: doc=%s max_rounds=%d", doc_name, max_rounds)
8484

85+
# Phase 0.5: try intent routes for direct jump
86+
intent_hint = ""
87+
try:
88+
intent_hint = await self._build_intent_hint(doc, query)
89+
except Exception:
90+
pass
91+
8592
try:
8693
children = await doc.ls()
8794
if children:
@@ -104,9 +111,15 @@ async def run(self) -> WorkerOutput:
104111
except Exception:
105112
pass
106113

114+
combined_hints = ""
115+
if intent_hint:
116+
combined_hints += intent_hint
107117
if keyword_hints:
108-
logger.info("Phase 1.5: keyword hints available, generating plan")
109-
await self._generate_plan(doc, query, task, state, keyword_hints, llm)
118+
combined_hints += keyword_hints
119+
120+
if combined_hints:
121+
logger.info("Phase 1.5: hints available, generating plan")
122+
await self._generate_plan(doc, query, task, state, combined_hints, llm)
110123
if state.plan:
111124
logger.info("Phase 1.5: plan generated — %s", state.plan[:150])
112125

@@ -142,7 +155,7 @@ async def run(self) -> WorkerOutput:
142155
visited_titles=visited_titles,
143156
plan=state.plan,
144157
intent_context=intent_context,
145-
keyword_hints=keyword_hints,
158+
keyword_hints=combined_hints,
146159
shared_context=shared_context,
147160
))
148161

@@ -172,7 +185,7 @@ async def run(self) -> WorkerOutput:
172185
f'"{raw_preview}"\n\n'
173186
f"Please output exactly one command "
174187
f"(ls, cd, cat, head, find, grep, toc, stats, similar, overview, "
175-
f"siblings, ancestors, doc_card, concepts, find_section, "
188+
f"siblings, ancestors, doc_card, concepts, find_section, chain, "
176189
f"compare, trace, summarize, wc, pwd, check, or done)."
177190
)
178191
state.push_history("(unrecognized) \u2192 parse failure")
@@ -239,6 +252,39 @@ async def run(self) -> WorkerOutput:
239252

240253
return state.into_worker_output(doc_name)
241254

255+
async def _build_intent_hint(self, doc: NavigableDocument, query: str) -> str:
256+
"""Build navigation hints from pre-computed intent routes.
257+
258+
If the query routing table has routes for this document's structure,
259+
we can suggest direct jumps instead of root-level exploration.
260+
"""
261+
try:
262+
routes = await doc.intent_routes()
263+
except Exception:
264+
return ""
265+
266+
if not routes:
267+
return ""
268+
269+
lines = ["Intent routes (pre-computed shortcuts — use cd to jump directly):"]
270+
for route in routes[:5]:
271+
targets = getattr(route, "targets", [])
272+
if not targets:
273+
continue
274+
for target in targets[:3]:
275+
title = await doc.node_title(target.node_id)
276+
relevance = getattr(target, "relevance", 0.0)
277+
reason = getattr(target, "reason", "")
278+
lines.append(
279+
f" - {title} (relevance {relevance:.2f}: {reason})"
280+
)
281+
282+
if len(lines) == 1:
283+
return ""
284+
285+
logger.info("Intent routes: %d routes found", len(routes))
286+
return "\n".join(lines) + "\n"
287+
242288
async def _build_keyword_hints(self, doc: NavigableDocument, query: str) -> str:
243289
"""Build keyword hints from the document's reasoning index and acceleration data."""
244290
keywords = extract_keywords(query)
@@ -276,15 +322,23 @@ async def _build_keyword_hints(self, doc: NavigableDocument, query: str) -> str:
276322
pass
277323

278324
# Top evidence scores (pre-computed by ScorePass)
325+
# Filter to nodes whose titles overlap with query keywords for relevance
279326
score_hints = []
280327
try:
281328
scores = await doc.evidence_scores_ranked()
282-
for s in scores[:5]:
329+
kw_lower = {kw.lower() for kw in keywords}
330+
for s in scores[:20]:
283331
title = await doc.node_title(s.node_id)
284-
score_hints.append(
285-
f" - {title} (score {s.composite:.2f}: "
286-
f"density={s.density:.2f} richness={s.data_richness:.2f})"
287-
)
332+
title_lower = title.lower()
333+
# Include if title contains any query keyword or is top-3 by score
334+
is_keyword_match = any(kw in title_lower for kw in kw_lower)
335+
if is_keyword_match or len(score_hints) < 3:
336+
score_hints.append(
337+
f" - {title} (score {s.composite:.2f}: "
338+
f"density={s.density:.2f} richness={s.data_richness:.2f})"
339+
)
340+
if len(score_hints) >= 8:
341+
break
288342
except Exception:
289343
pass
290344

vectorless/ask/worker/commands.py

Lines changed: 57 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -202,6 +202,23 @@ async def handle_cat(
202202

203203
preview = content[:500] + "..." if len(content) > 500 else content
204204
state.last_feedback = f"[{title}] collected as evidence:\n{preview}"
205+
206+
# Mark overlapping nodes as visited to avoid redundant reads
207+
try:
208+
overlaps = await doc.overlaps_for(node_id)
209+
if overlaps:
210+
for overlap in overlaps[:5]:
211+
overlap_id = getattr(overlap, "node_id", None)
212+
if overlap_id and overlap_id not in state.collected_nodes:
213+
state.visited.add(overlap_id)
214+
overlap_title = await doc.node_title(overlap_id)
215+
logger.info(
216+
"Overlap dedup: marking '%s' as visited (overlap of '%s')",
217+
overlap_title, title,
218+
)
219+
except Exception:
220+
pass
221+
205222
return Step(kind="continue")
206223
except Exception as e:
207224
state.last_feedback = f"Error reading node: {e}"
@@ -688,6 +705,45 @@ async def handle_find_section(
688705
return Step(kind="continue")
689706

690707

708+
async def handle_chain(
709+
command: Any, doc: NavigableDocument, state: WorkerState, query: str, llm: LLMClient,
710+
) -> Step:
711+
"""Follow reasoning chains from a node — uses pre-computed ChainIndex."""
712+
target = command.target
713+
node_id = await _resolve_target(doc, target if target else ".", state)
714+
if node_id is None:
715+
state.last_feedback = f"Node '{target}' not found."
716+
return Step(kind="continue")
717+
try:
718+
chains = await doc.chains_for(node_id)
719+
if not chains:
720+
title = await doc.node_title(node_id)
721+
state.last_feedback = f"No reasoning chains found for '{title}'."
722+
return Step(kind="continue")
723+
lines = ["Reasoning chains:"]
724+
for chain in chains[:5]:
725+
chain_title = getattr(chain, "title", "")
726+
chain_summary = getattr(chain, "summary", "")
727+
nodes = getattr(chain, "nodes", [])
728+
if nodes:
729+
node_titles = []
730+
for n in nodes[:6]:
731+
n_title = getattr(n, "title", "?")
732+
node_titles.append(n_title)
733+
more = f" (+{len(nodes) - 6} more)" if len(nodes) > 6 else ""
734+
path = " → ".join(node_titles)
735+
lines.append(f" - {chain_title}: {path}{more}")
736+
if chain_summary:
737+
lines.append(f" {chain_summary[:120]}")
738+
else:
739+
desc = chain_summary or chain_title or "(unnamed chain)"
740+
lines.append(f" - {desc}")
741+
state.last_feedback = "\n".join(lines)
742+
except Exception as e:
743+
state.last_feedback = f"chain error: {e}"
744+
return Step(kind="continue")
745+
746+
691747
async def handle_check(
692748
command: Any, doc: NavigableDocument, state: WorkerState, query: str, llm: LLMClient,
693749
) -> Step:
@@ -758,6 +814,7 @@ async def handle_done(
758814
"doc_card": handle_doc_card,
759815
"concepts": handle_concepts,
760816
"find_section": handle_find_section,
817+
"chain": handle_chain,
761818
"check": handle_check,
762819
"done": handle_done,
763820
}

vectorless/ask/worker/parse.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -138,6 +138,9 @@ def parse_command(llm_output: str) -> Command:
138138
elif cmd == "find_section":
139139
target = _strip_quotes(" ".join(parts[1:])) if len(parts) > 1 else ""
140140
return Command(kind="find_section", target=target)
141+
elif cmd == "chain":
142+
target = _strip_quotes(" ".join(parts[1:])) if len(parts) > 1 else ""
143+
return Command(kind="chain", target=target)
141144
else:
142145
return Command(kind="ls") # fallback: re-observe
143146

0 commit comments

Comments
 (0)