Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@ The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.1.0/).

## [Unreleased]

### Added

- feat(tui): Phase 1 dynamic metrics in TUI — 8 new fields in `MetricsSnapshot` (`embedding_model`, `token_budget`, `compaction_threshold`, `vault_backend`, `active_channel`, `self_learning_enabled`, `cache_enabled`, `autosave_enabled`); Resources panel redesigned with LLM/Session/Infra grouped sections and overflow collapse at height < 30; status bar shows active model name replacing the low-value Panel toggle indicator

### Fixed

- fix(config): add `[security.guardrail]` stub to `default.toml` so `--migrate-config` injects commented guardrail defaults for configs that have `[security]` but no `[security.guardrail]` (#2158)
Expand Down
18 changes: 18 additions & 0 deletions crates/zeph-core/src/metrics.rs
Original file line number Diff line number Diff line change
Expand Up @@ -267,6 +267,24 @@ pub struct MetricsSnapshot {
pub tool_cache_entries: usize,
/// Number of semantic-tier facts in memory (0 when tier promotion disabled).
pub semantic_fact_count: u64,

// --- Phase 1: dynamic config metrics ---
/// Embedding model name (e.g. "nomic-embed-text"). Empty when not configured.
pub embedding_model: String,
/// Configured max token budget for the context window.
pub token_budget: Option<u32>,
/// Token threshold that triggers soft compaction (0.0–1.0 ratio × budget).
pub compaction_threshold: Option<u32>,
/// Vault backend identifier: "age", "env", or "none".
pub vault_backend: String,
/// Active I/O channel: "cli", "telegram", or "tui".
pub active_channel: String,
/// Whether the self-learning engine is enabled for this session.
pub self_learning_enabled: bool,
/// Whether semantic response caching is enabled.
pub cache_enabled: bool,
/// Whether assistant messages are auto-saved to memory.
pub autosave_enabled: bool,
}

/// Strip ASCII control characters and ANSI escape sequences from a string for safe TUI display.
Expand Down
225 changes: 179 additions & 46 deletions crates/zeph-tui/src/widgets/resources.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,59 +9,133 @@ use ratatui::widgets::{Block, Borders, Paragraph};
use crate::metrics::MetricsSnapshot;
use crate::theme::Theme;

#[allow(clippy::too_many_lines)]
pub fn render(metrics: &MetricsSnapshot, frame: &mut Frame, area: Rect) {
let theme = Theme::default();

let mut res_lines = vec![
Line::from(format!(" Provider: {}", metrics.provider_name)),
Line::from(format!(" Model: {}", metrics.model_name)),
Line::from(format!(" Context: {}", metrics.context_tokens)),
Line::from(format!(" Session: {}", metrics.total_tokens)),
Line::from(format!(" API calls: {}", metrics.api_calls)),
Line::from(format!(" Latency: {}ms", metrics.last_llm_latency_ms)),
];
let collapsed = area.height < 30;

let mut lines: Vec<Line<'_>> = Vec::new();

// LLM section
lines.push(Line::from(" LLM"));
lines.push(Line::from(format!(
" Provider: {}",
metrics.provider_name
)));
lines.push(Line::from(format!(" Model: {}", metrics.model_name)));
if !metrics.embedding_model.is_empty() {
lines.push(Line::from(format!(
" Embed: {}",
metrics.embedding_model
)));
}
lines.push(Line::from(format!(
" Context: {} | Latency: {}ms",
metrics.context_tokens, metrics.last_llm_latency_ms
)));
if metrics.extended_context {
res_lines.push(Line::from(" Max context: 1M"));
lines.push(Line::from(" Max context: 1M"));
}
if metrics.cache_creation_tokens > 0 || metrics.cache_read_tokens > 0 {
res_lines.push(Line::from(format!(
" Cache write: {}",
metrics.cache_creation_tokens

// Session section
if collapsed {
lines.push(Line::from(format!(
" Session: {} tok | {} calls",
metrics.total_tokens, metrics.api_calls
)));
res_lines.push(Line::from(format!(
" Cache read: {}",
metrics.cache_read_tokens
} else {
lines.push(Line::from(" Session"));
lines.push(Line::from(format!(
" Tokens: {} | API: {}",
metrics.total_tokens, metrics.api_calls
)));
if let Some(budget) = metrics.token_budget {
if let Some(threshold) = metrics.compaction_threshold {
lines.push(Line::from(format!(
" Budget: {budget} | Compact: {threshold}"
)));
} else {
lines.push(Line::from(format!(" Budget: {budget}")));
}
}
if metrics.cache_creation_tokens > 0 || metrics.cache_read_tokens > 0 {
lines.push(Line::from(format!(
" Cache W:{} R:{}",
metrics.cache_creation_tokens, metrics.cache_read_tokens
)));
}
if metrics.filter_applications > 0 {
#[allow(clippy::cast_precision_loss)]
let hit_pct = if metrics.filter_total_commands > 0 {
metrics.filter_filtered_commands as f64 / metrics.filter_total_commands as f64
* 100.0
} else {
0.0
};
lines.push(Line::from(format!(
" Filter: {}/{} ({hit_pct:.0}% hit)",
metrics.filter_filtered_commands, metrics.filter_total_commands,
)));
#[allow(clippy::cast_precision_loss)]
let pct = if metrics.filter_raw_tokens > 0 {
metrics.filter_saved_tokens as f64 / metrics.filter_raw_tokens as f64 * 100.0
} else {
0.0
};
lines.push(Line::from(format!(
" Filter saved: {} tok ({pct:.0}%)",
metrics.filter_saved_tokens,
)));
}
}
if metrics.filter_applications > 0 {
#[allow(clippy::cast_precision_loss)]
let hit_pct = if metrics.filter_total_commands > 0 {
metrics.filter_filtered_commands as f64 / metrics.filter_total_commands as f64 * 100.0
} else {
0.0
};
res_lines.push(Line::from(format!(
" Filter: {}/{} commands ({hit_pct:.0}% hit rate)",
metrics.filter_filtered_commands, metrics.filter_total_commands,
)));
#[allow(clippy::cast_precision_loss)]
let pct = if metrics.filter_raw_tokens > 0 {
metrics.filter_saved_tokens as f64 / metrics.filter_raw_tokens as f64 * 100.0
} else {
0.0
};
res_lines.push(Line::from(format!(
" Filter saved: {} tok ({pct:.0}%)",
metrics.filter_saved_tokens,
)));
res_lines.push(Line::from(format!(
" Confidence: F/{} P/{} B/{}",
metrics.filter_confidence_full,
metrics.filter_confidence_partial,
metrics.filter_confidence_fallback,
)));

// Infra section
if collapsed {
let mut infra_parts: Vec<String> = Vec::new();
if !metrics.vault_backend.is_empty() {
infra_parts.push(format!("vault:{}", metrics.vault_backend));
}
if !metrics.active_channel.is_empty() {
infra_parts.push(format!("ch:{}", metrics.active_channel));
}
if !infra_parts.is_empty() {
lines.push(Line::from(format!(" Infra: {}", infra_parts.join(" | "))));
}
} else {
lines.push(Line::from(" Infra"));
match (
metrics.vault_backend.as_str(),
metrics.active_channel.as_str(),
) {
("", "") => {}
(v, "") => lines.push(Line::from(format!(" Vault: {v}"))),
("", c) => lines.push(Line::from(format!(" Channel: {c}"))),
(v, c) => lines.push(Line::from(format!(" Vault: {v} | Channel: {c}"))),
}

let mut flags: Vec<&str> = Vec::new();
if metrics.self_learning_enabled {
flags.push("Learning: ON");
}
if metrics.cache_enabled {
flags.push("Cache: ON");
}
if metrics.autosave_enabled {
flags.push("Autosave: ON");
}
if !flags.is_empty() {
lines.push(Line::from(format!(" {}", flags.join(" | "))));
}
if metrics.mcp_server_count > 0 {
lines.push(Line::from(format!(
" MCP: {} servers, {} tools",
metrics.mcp_server_count, metrics.mcp_tool_count
)));
}
}
let resources = Paragraph::new(res_lines).block(

let resources = Paragraph::new(lines).block(
Block::default()
.borders(Borders::ALL)
.border_style(theme.panel_border)
Expand Down Expand Up @@ -89,7 +163,7 @@ mod tests {
..MetricsSnapshot::default()
};

let output = render_to_string(35, 10, |frame, area| {
let output = render_to_string(35, 12, |frame, area| {
super::render(&metrics, frame, area);
});
assert_snapshot!(output);
Expand All @@ -108,7 +182,7 @@ mod tests {
..MetricsSnapshot::default()
};

let output = render_to_string(35, 11, |frame, area| {
let output = render_to_string(35, 13, |frame, area| {
super::render(&metrics, frame, area);
});
assert!(
Expand All @@ -117,4 +191,63 @@ mod tests {
);
assert_snapshot!(output);
}

#[test]
fn resources_with_full_infra() {
let metrics = MetricsSnapshot {
provider_name: "claude".into(),
model_name: "claude-sonnet-4-6".into(),
context_tokens: 10000,
total_tokens: 15000,
api_calls: 7,
last_llm_latency_ms: 180,
embedding_model: "nomic-embed-text".into(),
token_budget: Some(200_000),
compaction_threshold: Some(120_000),
vault_backend: "age".into(),
active_channel: "tui".into(),
self_learning_enabled: true,
cache_enabled: true,
autosave_enabled: true,
mcp_server_count: 2,
mcp_tool_count: 14,
..MetricsSnapshot::default()
};

let output = render_to_string(40, 30, |frame, area| {
super::render(&metrics, frame, area);
});
assert!(
output.contains("Vault: age"),
"expected vault backend; got: {output:?}"
);
assert!(
output.contains("Channel: tui"),
"expected channel; got: {output:?}"
);
assert!(
output.contains("Learning: ON"),
"expected learning flag; got: {output:?}"
);
assert_snapshot!(output);
}

#[test]
fn resources_collapsed_when_small_height() {
let metrics = MetricsSnapshot {
provider_name: "claude".into(),
model_name: "claude-sonnet-4-6".into(),
vault_backend: "age".into(),
active_channel: "tui".into(),
..MetricsSnapshot::default()
};

let output = render_to_string(40, 20, |frame, area| {
super::render(&metrics, frame, area);
});
assert!(
output.contains("vault:age"),
"collapsed mode should show vault inline; got: {output:?}"
);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,15 @@ source: crates/zeph-tui/src/widgets/resources.rs
expression: output
---
┌ Resources ──────────────────────┐
│ Provider: claude │
│ Model: claude-sonnet-4-6 │
│ Context: 50000 │
│ Session: 75000 │
│ API calls: 3 │
│ Latency: 400ms │
│ Max context: 1M │
│ LLM │
│ Provider: claude │
│ Model: claude-sonnet-4-6 │
│ Context: 50000 | Latency: 400│
│ Max context: 1M │
│ Session: 75000 tok | 3 calls │
│ │
│ │
│ │
│ │
│ │
└─────────────────────────────────┘
Original file line number Diff line number Diff line change
@@ -0,0 +1,34 @@
---
source: crates/zeph-tui/src/widgets/resources.rs
expression: output
---
┌ Resources ───────────────────────────┐
│ LLM │
│ Provider: claude │
│ Model: claude-sonnet-4-6 │
│ Embed: nomic-embed-text │
│ Context: 10000 | Latency: 180ms │
│ Session │
│ Tokens: 15000 | API: 7 │
│ Budget: 200000 | Compact: 120000 │
│ Infra │
│ Vault: age | Channel: tui │
│ Learning: ON | Cache: ON | Autosav│
│ MCP: 2 servers, 14 tools │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
│ │
└──────────────────────────────────────┘
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,14 @@ source: crates/zeph-tui/src/widgets/resources.rs
expression: output
---
┌ Resources ──────────────────────┐
│ Provider: claude │
│ Model: opus-4 │
│ Context: 8000 │
│ Session: 12000 │
│ API calls: 5 │
│ Latency: 250ms │
│ LLM │
│ Provider: claude │
│ Model: opus-4 │
│ Context: 8000 | Latency: 250m│
│ Session: 12000 tok | 5 calls │
│ │
│ │
│ │
│ │
│ │
└─────────────────────────────────┘
Original file line number Diff line number Diff line change
Expand Up @@ -2,4 +2,4 @@
source: crates/zeph-tui/src/widgets/status.rs
expression: output
---
[Insert] | Panel: ON | Skills: 2/5 | Tokens: 4.2k | qdrant: OK | API: 12 | 2m 15s
[Insert] | Skills: 2/5 | Tokens: 4.2k | qdrant: OK | API: 12 | 2m 15s
Loading
Loading