All checks were successful
Build AppImage / build (push) Successful in 8m32s
- Konzept-Map: ~60 deutsche Phrasen → technische Suchbegriffe
("Nachrichten falsch rum" → "message sort chronological")
- Bigram-Extraktion: benachbarte Content-Woerter als Phrase
- Chat-Kontext: letzte 3 User-Nachrichten fliessen in die Suche ein
- Erweiterte Tech-Terms (~40) und Stoppwort-Liste
- Keywords max 12 statt 8, Konzepte haben Vorrang
Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
1587 lines
58 KiB
Rust
1587 lines
58 KiB
Rust
// Claude Desktop — SQLite Datenbankschicht
|
|
// Persistiert Guard-Rails, Audit-Log, Memory und Einstellungen
|
|
|
|
use rusqlite::{params, Connection, Result as SqlResult};
|
|
use std::path::Path;
|
|
use std::sync::{Arc, Mutex};
|
|
use tauri::{AppHandle, Manager};
|
|
|
|
use crate::audit::{AuditAction, AuditCategory, AuditEntry, AuditStats};
|
|
use crate::guard::{Permission, PermissionAction, PermissionType};
|
|
use crate::memory::{ContextCategory, MemoryEntry, Pattern};
|
|
use crate::strutil::safe_truncate_ellipsis;
|
|
|
|
/// Eine Claude-Session
|
|
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
|
pub struct Session {
|
|
pub id: String,
|
|
pub claude_session_id: Option<String>,
|
|
pub title: String,
|
|
pub working_dir: Option<String>,
|
|
pub message_count: i64,
|
|
pub token_input: i64,
|
|
pub token_output: i64,
|
|
pub cost_usd: f64,
|
|
pub status: String,
|
|
pub created_at: String,
|
|
pub updated_at: String,
|
|
pub last_message: Option<String>,
|
|
}
|
|
|
|
/// Eine Chat-Nachricht
|
|
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
|
pub struct ChatMessage {
|
|
pub id: String,
|
|
pub session_id: String,
|
|
pub role: String, // "user", "assistant", "system"
|
|
pub content: String,
|
|
pub model: Option<String>,
|
|
pub agent_id: Option<String>, // Agent der die Nachricht erzeugt hat
|
|
pub timestamp: String,
|
|
}
|
|
|
|
/// Block C: Suchresultat fuer Cross-Session-Recall (FTS5 auf messages)
|
|
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
|
pub struct PastMessageMatch {
|
|
pub id: String,
|
|
pub session_id: String,
|
|
pub role: String,
|
|
pub snippet: String, // bis zu 240 Zeichen Anriss
|
|
pub timestamp: String,
|
|
pub session_title: Option<String>,
|
|
}
|
|
|
|
/// Ein Monitor-Event (System-Log)
|
|
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
|
pub struct MonitorEvent {
|
|
pub id: String,
|
|
pub timestamp: String,
|
|
pub event_type: String, // "api", "tool", "agent", "hook", "mcp", "error", "debug"
|
|
pub summary: String,
|
|
pub details: Option<String>, // JSON-String
|
|
pub agent_id: Option<String>,
|
|
pub session_id: Option<String>,
|
|
pub duration_ms: Option<i64>,
|
|
#[allow(dead_code)]
|
|
pub error: Option<String>,
|
|
}
|
|
|
|
/// Checkpoint-Eintrag (Datei-Snapshot fuer Accept/Reject + Rewind)
|
|
#[allow(dead_code)]
|
|
pub struct CheckpointEntry {
|
|
pub tool_id: String,
|
|
pub session_id: String,
|
|
pub tool_name: String,
|
|
pub file_path: String,
|
|
pub content_before: String,
|
|
pub content_after: Option<String>,
|
|
pub status: String,
|
|
pub created_at: String,
|
|
}
|
|
|
|
/// Datenbank-Wrapper
|
|
pub struct Database {
|
|
pub(crate) conn: Connection,
|
|
}
|
|
|
|
/// Datenbank-Statistiken
|
|
#[derive(Debug, serde::Serialize)]
|
|
pub struct DbStats {
|
|
pub permissions: usize,
|
|
pub audit_entries: usize,
|
|
pub memory_entries: usize,
|
|
pub patterns: usize,
|
|
pub db_size_kb: u64,
|
|
}
|
|
|
|
impl Database {
|
|
/// Öffnet oder erstellt die Datenbank
|
|
pub fn open(path: &Path) -> SqlResult<Self> {
|
|
let conn = Connection::open(path)?;
|
|
|
|
// WAL-Modus für bessere Performance
|
|
conn.execute_batch("PRAGMA journal_mode=WAL; PRAGMA foreign_keys=ON;")?;
|
|
|
|
let db = Self { conn };
|
|
db.create_tables()?;
|
|
Ok(db)
|
|
}
|
|
|
|
/// Schema erstellen
|
|
fn create_tables(&self) -> SqlResult<()> {
|
|
self.conn.execute_batch(
|
|
"
|
|
-- Guard-Rails Permissions
|
|
CREATE TABLE IF NOT EXISTS permissions (
|
|
id TEXT PRIMARY KEY,
|
|
pattern TEXT NOT NULL,
|
|
tool TEXT,
|
|
path_pattern TEXT,
|
|
action TEXT NOT NULL DEFAULT 'allow',
|
|
created_at TEXT NOT NULL,
|
|
use_count INTEGER DEFAULT 0,
|
|
last_used TEXT
|
|
);
|
|
|
|
-- Audit-Log
|
|
CREATE TABLE IF NOT EXISTS audit_log (
|
|
id TEXT PRIMARY KEY,
|
|
timestamp TEXT NOT NULL,
|
|
category TEXT NOT NULL,
|
|
action TEXT NOT NULL,
|
|
item_id TEXT NOT NULL,
|
|
item_name TEXT NOT NULL,
|
|
old_value TEXT,
|
|
new_value TEXT,
|
|
reason TEXT,
|
|
auto_corrected INTEGER DEFAULT 0,
|
|
session_id TEXT
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_audit_timestamp ON audit_log(timestamp DESC);
|
|
CREATE INDEX IF NOT EXISTS idx_audit_category ON audit_log(category);
|
|
|
|
-- Memory-Einträge
|
|
CREATE TABLE IF NOT EXISTS memory (
|
|
id TEXT PRIMARY KEY,
|
|
category TEXT NOT NULL,
|
|
key TEXT NOT NULL,
|
|
value TEXT NOT NULL,
|
|
sticky INTEGER DEFAULT 0,
|
|
auto_load INTEGER DEFAULT 0,
|
|
last_used TEXT,
|
|
use_count INTEGER DEFAULT 0
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_memory_category ON memory(category);
|
|
CREATE INDEX IF NOT EXISTS idx_memory_sticky ON memory(sticky) WHERE sticky = 1;
|
|
|
|
-- Patterns (Vorgehensweisen)
|
|
CREATE TABLE IF NOT EXISTS patterns (
|
|
id TEXT PRIMARY KEY,
|
|
name TEXT NOT NULL,
|
|
description TEXT,
|
|
trigger_text TEXT,
|
|
old_approach TEXT,
|
|
new_approach TEXT,
|
|
reason TEXT,
|
|
occurrence_count INTEGER DEFAULT 1,
|
|
auto_corrected INTEGER DEFAULT 0,
|
|
created_at TEXT NOT NULL,
|
|
updated_at TEXT NOT NULL
|
|
);
|
|
|
|
-- Sessions (Claude-Konversationen)
|
|
CREATE TABLE IF NOT EXISTS sessions (
|
|
id TEXT PRIMARY KEY,
|
|
claude_session_id TEXT,
|
|
title TEXT NOT NULL,
|
|
working_dir TEXT,
|
|
message_count INTEGER DEFAULT 0,
|
|
token_input INTEGER DEFAULT 0,
|
|
token_output INTEGER DEFAULT 0,
|
|
cost_usd REAL DEFAULT 0,
|
|
status TEXT NOT NULL DEFAULT 'active',
|
|
created_at TEXT NOT NULL,
|
|
updated_at TEXT NOT NULL,
|
|
last_message TEXT
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_sessions_updated ON sessions(updated_at DESC);
|
|
CREATE INDEX IF NOT EXISTS idx_sessions_status ON sessions(status);
|
|
|
|
-- Einstellungen (Key-Value)
|
|
CREATE TABLE IF NOT EXISTS settings (
|
|
key TEXT PRIMARY KEY,
|
|
value TEXT NOT NULL,
|
|
updated_at TEXT NOT NULL
|
|
);
|
|
|
|
-- Chat-Nachrichten
|
|
CREATE TABLE IF NOT EXISTS messages (
|
|
id TEXT PRIMARY KEY,
|
|
session_id TEXT NOT NULL,
|
|
role TEXT NOT NULL,
|
|
content TEXT NOT NULL,
|
|
model TEXT,
|
|
agent_id TEXT,
|
|
timestamp TEXT NOT NULL,
|
|
FOREIGN KEY (session_id) REFERENCES sessions(id) ON DELETE CASCADE
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_messages_session ON messages(session_id, timestamp);
|
|
|
|
-- Block C: FTS5-Volltextsuche fuer Cross-Session-Recall.
|
|
-- Externer content-Modus: kein doppelter Speicher, FTS hat nur
|
|
-- die rowid + indizierten Felder. Sync via Trigger.
|
|
CREATE VIRTUAL TABLE IF NOT EXISTS messages_fts USING fts5(
|
|
content,
|
|
tokenize='unicode61 remove_diacritics 2'
|
|
);
|
|
CREATE TRIGGER IF NOT EXISTS messages_fts_ai
|
|
AFTER INSERT ON messages BEGIN
|
|
INSERT INTO messages_fts(rowid, content) VALUES (new.rowid, new.content);
|
|
END;
|
|
CREATE TRIGGER IF NOT EXISTS messages_fts_ad
|
|
AFTER DELETE ON messages BEGIN
|
|
DELETE FROM messages_fts WHERE rowid = old.rowid;
|
|
END;
|
|
CREATE TRIGGER IF NOT EXISTS messages_fts_au
|
|
AFTER UPDATE ON messages BEGIN
|
|
UPDATE messages_fts SET content = new.content WHERE rowid = old.rowid;
|
|
END;
|
|
|
|
-- Monitor-Events (System-Log)
|
|
CREATE TABLE IF NOT EXISTS monitor_events (
|
|
id TEXT PRIMARY KEY,
|
|
timestamp TEXT NOT NULL,
|
|
event_type TEXT NOT NULL,
|
|
summary TEXT NOT NULL,
|
|
details TEXT,
|
|
agent_id TEXT,
|
|
session_id TEXT,
|
|
duration_ms INTEGER,
|
|
error TEXT
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_monitor_timestamp ON monitor_events(timestamp DESC);
|
|
CREATE INDEX IF NOT EXISTS idx_monitor_type ON monitor_events(event_type);
|
|
|
|
-- Threshold-basierter Cleanup: greift nur wenn die Tabelle mehr als
|
|
-- 50_000 Zeilen hat, behaelt dann die juengsten 30_000. Loescht ausserdem
|
|
-- Eintraege aelter als 7 Tage. Verhindert die Endlosschleife aus 04/2026
|
|
-- (Bridge-EPIPE-Crash schrieb 5 Events/s und der Trigger scannte bei
|
|
-- jedem Insert die ganze Tabelle = O(n^2)-Schneeball).
|
|
DROP TRIGGER IF EXISTS cleanup_old_monitor_events;
|
|
CREATE TRIGGER IF NOT EXISTS cleanup_old_monitor_events
|
|
AFTER INSERT ON monitor_events
|
|
WHEN (SELECT COUNT(*) FROM monitor_events) > 50000
|
|
BEGIN
|
|
DELETE FROM monitor_events
|
|
WHERE timestamp < datetime('now', '-7 days')
|
|
OR id IN (
|
|
SELECT id FROM monitor_events
|
|
ORDER BY timestamp ASC
|
|
LIMIT MAX((SELECT COUNT(*) FROM monitor_events) - 30000, 0)
|
|
);
|
|
END;
|
|
|
|
-- Projekte (für schnellen Wechsel)
|
|
CREATE TABLE IF NOT EXISTS projects (
|
|
id TEXT PRIMARY KEY,
|
|
name TEXT NOT NULL,
|
|
working_dir TEXT NOT NULL,
|
|
claude_md_path TEXT,
|
|
description TEXT,
|
|
last_used TEXT NOT NULL,
|
|
created_at TEXT NOT NULL,
|
|
session_count INTEGER DEFAULT 0
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_projects_last_used ON projects(last_used DESC);
|
|
|
|
-- Phase 2.0: Fehler-Tracking für Auto-Pattern-Erkennung
|
|
CREATE TABLE IF NOT EXISTS error_tracker (
|
|
error_hash TEXT PRIMARY KEY,
|
|
error_message TEXT NOT NULL,
|
|
tool TEXT NOT NULL,
|
|
occurrence_count INTEGER DEFAULT 1,
|
|
first_seen TEXT NOT NULL,
|
|
last_seen TEXT NOT NULL,
|
|
kb_pattern_id INTEGER,
|
|
UNIQUE(error_hash)
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_error_tracker_count ON error_tracker(occurrence_count DESC);
|
|
|
|
-- Checkpoints: Datei-Snapshots fuer Accept/Reject und Rewind
|
|
CREATE TABLE IF NOT EXISTS checkpoints (
|
|
tool_id TEXT PRIMARY KEY,
|
|
session_id TEXT NOT NULL,
|
|
tool_name TEXT NOT NULL,
|
|
file_path TEXT NOT NULL,
|
|
content_before TEXT NOT NULL,
|
|
content_after TEXT,
|
|
status TEXT NOT NULL DEFAULT 'pending',
|
|
created_at TEXT NOT NULL DEFAULT (datetime('now'))
|
|
);
|
|
CREATE INDEX IF NOT EXISTS idx_checkpoints_session ON checkpoints(session_id, created_at DESC);
|
|
",
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
// ============ Permissions ============
|
|
|
|
/// Speichert eine Permission
|
|
pub fn save_permission(&self, perm: &Permission) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT OR REPLACE INTO permissions (id, pattern, tool, path_pattern, action, created_at, use_count, last_used)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
|
|
params![
|
|
perm.id,
|
|
perm.pattern,
|
|
perm.tool,
|
|
perm.path_pattern,
|
|
format!("{:?}", perm.action).to_lowercase(),
|
|
perm.created_at,
|
|
perm.use_count,
|
|
perm.last_used,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Lädt alle permanenten Permissions
|
|
pub fn load_permissions(&self) -> SqlResult<Vec<Permission>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, pattern, tool, path_pattern, action, created_at, use_count, last_used FROM permissions"
|
|
)?;
|
|
|
|
let perms = stmt.query_map([], |row| {
|
|
let action_str: String = row.get(4)?;
|
|
let action = match action_str.as_str() {
|
|
"deny" => PermissionAction::Deny,
|
|
_ => PermissionAction::Allow,
|
|
};
|
|
|
|
Ok(Permission {
|
|
id: row.get(0)?,
|
|
pattern: row.get(1)?,
|
|
tool: row.get(2)?,
|
|
path_pattern: row.get(3)?,
|
|
permission_type: PermissionType::Permanent,
|
|
action,
|
|
created_at: row.get(5)?,
|
|
use_count: row.get(6)?,
|
|
last_used: row.get(7)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
Ok(perms)
|
|
}
|
|
|
|
/// Löscht eine Permission
|
|
pub fn delete_permission(&self, id: &str) -> SqlResult<()> {
|
|
self.conn.execute("DELETE FROM permissions WHERE id = ?1", params![id])?;
|
|
Ok(())
|
|
}
|
|
|
|
// ============ Audit-Log ============
|
|
|
|
/// Speichert einen Audit-Eintrag
|
|
pub fn save_audit_entry(&self, entry: &AuditEntry) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT INTO audit_log (id, timestamp, category, action, item_id, item_name, old_value, new_value, reason, auto_corrected, session_id)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)",
|
|
params![
|
|
entry.id,
|
|
entry.timestamp,
|
|
format!("{:?}", entry.category).to_lowercase(),
|
|
format!("{:?}", entry.action).to_lowercase(),
|
|
entry.item_id,
|
|
entry.item_name,
|
|
entry.old_value.as_ref().map(|v| v.to_string()),
|
|
entry.new_value.as_ref().map(|v| v.to_string()),
|
|
entry.reason,
|
|
entry.auto_corrected as i32,
|
|
entry.session_id,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Lädt die letzten N Audit-Einträge
|
|
pub fn load_audit_log(&self, limit: usize) -> SqlResult<Vec<AuditEntry>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, timestamp, category, action, item_id, item_name, old_value, new_value, reason, auto_corrected, session_id
|
|
FROM audit_log ORDER BY timestamp DESC LIMIT ?1"
|
|
)?;
|
|
|
|
let entries = stmt.query_map(params![limit as i64], |row| {
|
|
let cat_str: String = row.get(2)?;
|
|
let act_str: String = row.get(3)?;
|
|
let old_val: Option<String> = row.get(6)?;
|
|
let new_val: Option<String> = row.get(7)?;
|
|
let auto_corr: i32 = row.get(9)?;
|
|
|
|
Ok(AuditEntry {
|
|
id: row.get(0)?,
|
|
timestamp: row.get(1)?,
|
|
category: parse_audit_category(&cat_str),
|
|
action: parse_audit_action(&act_str),
|
|
item_id: row.get(4)?,
|
|
item_name: row.get(5)?,
|
|
old_value: old_val.and_then(|s| serde_json::from_str(&s).ok()),
|
|
new_value: new_val.and_then(|s| serde_json::from_str(&s).ok()),
|
|
reason: row.get(8)?,
|
|
auto_corrected: auto_corr != 0,
|
|
session_id: row.get(10)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
Ok(entries)
|
|
}
|
|
|
|
/// Audit-Statistiken
|
|
pub fn audit_stats(&self) -> SqlResult<AuditStats> {
|
|
let total: usize = self.conn.query_row(
|
|
"SELECT COUNT(*) FROM audit_log", [], |row| row.get(0)
|
|
)?;
|
|
let auto_corrected: usize = self.conn.query_row(
|
|
"SELECT COUNT(*) FROM audit_log WHERE auto_corrected = 1", [], |row| row.get(0)
|
|
)?;
|
|
let today: usize = self.conn.query_row(
|
|
"SELECT COUNT(*) FROM audit_log WHERE timestamp LIKE ?1 || '%'",
|
|
params![chrono::Local::now().format("%Y-%m-%d").to_string()],
|
|
|row| row.get(0),
|
|
)?;
|
|
|
|
Ok(AuditStats { total, auto_corrected, today })
|
|
}
|
|
|
|
// ============ Memory ============
|
|
|
|
/// Speichert einen Memory-Eintrag
|
|
#[allow(dead_code)]
|
|
pub fn save_memory_entry(&self, entry: &MemoryEntry) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT OR REPLACE INTO memory (id, category, key, value, sticky, auto_load, last_used, use_count)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
|
|
params![
|
|
entry.id,
|
|
format!("{:?}", entry.category),
|
|
entry.key,
|
|
entry.value.to_string(),
|
|
entry.sticky as i32,
|
|
entry.auto_load as i32,
|
|
entry.last_used,
|
|
entry.use_count,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Lädt alle Memory-Einträge
|
|
pub fn load_memory_entries(&self) -> SqlResult<Vec<MemoryEntry>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, category, key, value, sticky, auto_load, last_used, use_count FROM memory"
|
|
)?;
|
|
|
|
let entries = stmt.query_map([], |row| {
|
|
let cat_str: String = row.get(1)?;
|
|
let val_str: String = row.get(3)?;
|
|
let sticky: i32 = row.get(4)?;
|
|
let auto_load: i32 = row.get(5)?;
|
|
|
|
Ok(MemoryEntry {
|
|
id: row.get(0)?,
|
|
category: parse_context_category(&cat_str),
|
|
key: row.get(2)?,
|
|
value: serde_json::from_str(&val_str).unwrap_or(serde_json::Value::String(val_str)),
|
|
sticky: sticky != 0,
|
|
auto_load: auto_load != 0,
|
|
last_used: row.get(6)?,
|
|
use_count: row.get(7)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
Ok(entries)
|
|
}
|
|
|
|
/// Löscht einen Memory-Eintrag
|
|
pub fn delete_memory_entry(&self, id: &str) -> SqlResult<()> {
|
|
self.conn.execute("DELETE FROM memory WHERE id = ?1", params![id])?;
|
|
Ok(())
|
|
}
|
|
|
|
// ============ Patterns ============
|
|
|
|
/// Speichert ein Pattern
|
|
pub fn save_pattern(&self, pattern: &Pattern) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT OR REPLACE INTO patterns (id, name, description, trigger_text, old_approach, new_approach, reason, occurrence_count, auto_corrected, created_at, updated_at)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)",
|
|
params![
|
|
pattern.id,
|
|
pattern.name,
|
|
pattern.description,
|
|
pattern.trigger,
|
|
pattern.old_approach,
|
|
pattern.new_approach,
|
|
pattern.reason,
|
|
pattern.occurrence_count,
|
|
pattern.auto_corrected as i32,
|
|
pattern.created_at,
|
|
pattern.updated_at,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Lädt alle Patterns
|
|
pub fn load_patterns(&self) -> SqlResult<Vec<Pattern>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, name, description, trigger_text, old_approach, new_approach, reason, occurrence_count, auto_corrected, created_at, updated_at FROM patterns"
|
|
)?;
|
|
|
|
let patterns = stmt.query_map([], |row| {
|
|
let auto_corr: i32 = row.get(8)?;
|
|
Ok(Pattern {
|
|
id: row.get(0)?,
|
|
name: row.get(1)?,
|
|
description: row.get(2)?,
|
|
trigger: row.get(3)?,
|
|
old_approach: row.get(4)?,
|
|
new_approach: row.get(5)?,
|
|
reason: row.get(6)?,
|
|
occurrence_count: row.get(7)?,
|
|
auto_corrected: auto_corr != 0,
|
|
created_at: row.get(9)?,
|
|
updated_at: row.get(10)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
Ok(patterns)
|
|
}
|
|
|
|
// ============ Sessions ============
|
|
|
|
/// Erstellt eine neue Session
|
|
pub fn create_session(&self, session: &Session) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT INTO sessions (id, claude_session_id, title, working_dir, message_count, token_input, token_output, cost_usd, status, created_at, updated_at, last_message)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11, ?12)",
|
|
params![
|
|
session.id,
|
|
session.claude_session_id,
|
|
session.title,
|
|
session.working_dir,
|
|
session.message_count,
|
|
session.token_input,
|
|
session.token_output,
|
|
session.cost_usd,
|
|
session.status,
|
|
session.created_at,
|
|
session.updated_at,
|
|
session.last_message,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Aktualisiert eine Session
|
|
pub fn update_session(&self, session: &Session) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"UPDATE sessions SET claude_session_id = ?2, title = ?3, message_count = ?4,
|
|
token_input = ?5, token_output = ?6, cost_usd = ?7, status = ?8,
|
|
updated_at = ?9, last_message = ?10
|
|
WHERE id = ?1",
|
|
params![
|
|
session.id,
|
|
session.claude_session_id,
|
|
session.title,
|
|
session.message_count,
|
|
session.token_input,
|
|
session.token_output,
|
|
session.cost_usd,
|
|
session.status,
|
|
chrono::Local::now().to_rfc3339(),
|
|
session.last_message,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Lädt alle Sessions (neueste zuerst) — Convenience-Wrapper
|
|
#[allow(dead_code)]
|
|
pub fn load_sessions(&self, limit: usize) -> SqlResult<Vec<Session>> {
|
|
self.load_sessions_filtered(limit, None)
|
|
}
|
|
|
|
/// Sessions laden, optional gefiltert nach working_dir (Projekt-Bindung)
|
|
pub fn load_sessions_filtered(&self, limit: usize, working_dir: Option<&str>) -> SqlResult<Vec<Session>> {
|
|
if let Some(dir) = working_dir {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, claude_session_id, title, working_dir, message_count,
|
|
token_input, token_output, cost_usd, status, created_at, updated_at, last_message
|
|
FROM sessions WHERE working_dir = ?1 ORDER BY updated_at DESC LIMIT ?2"
|
|
)?;
|
|
let result = stmt.query_map(params![dir, limit as i64], |row| {
|
|
Ok(Session {
|
|
id: row.get(0)?, claude_session_id: row.get(1)?, title: row.get(2)?,
|
|
working_dir: row.get(3)?, message_count: row.get(4)?,
|
|
token_input: row.get(5)?, token_output: row.get(6)?,
|
|
cost_usd: row.get(7)?, status: row.get(8)?,
|
|
created_at: row.get(9)?, updated_at: row.get(10)?,
|
|
last_message: row.get(11)?,
|
|
})
|
|
})?.collect();
|
|
result
|
|
} else {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, claude_session_id, title, working_dir, message_count,
|
|
token_input, token_output, cost_usd, status, created_at, updated_at, last_message
|
|
FROM sessions ORDER BY updated_at DESC LIMIT ?1"
|
|
)?;
|
|
let result = stmt.query_map(params![limit as i64], |row| {
|
|
Ok(Session {
|
|
id: row.get(0)?, claude_session_id: row.get(1)?, title: row.get(2)?,
|
|
working_dir: row.get(3)?, message_count: row.get(4)?,
|
|
token_input: row.get(5)?, token_output: row.get(6)?,
|
|
cost_usd: row.get(7)?, status: row.get(8)?,
|
|
created_at: row.get(9)?, updated_at: row.get(10)?,
|
|
last_message: row.get(11)?,
|
|
})
|
|
})?.collect();
|
|
result
|
|
}
|
|
}
|
|
|
|
/// Holt eine Session nach ID
|
|
pub fn get_session(&self, id: &str) -> SqlResult<Option<Session>> {
|
|
let result = self.conn.query_row(
|
|
"SELECT id, claude_session_id, title, working_dir, message_count,
|
|
token_input, token_output, cost_usd, status, created_at, updated_at, last_message
|
|
FROM sessions WHERE id = ?1",
|
|
params![id],
|
|
|row| {
|
|
Ok(Session {
|
|
id: row.get(0)?,
|
|
claude_session_id: row.get(1)?,
|
|
title: row.get(2)?,
|
|
working_dir: row.get(3)?,
|
|
message_count: row.get(4)?,
|
|
token_input: row.get(5)?,
|
|
token_output: row.get(6)?,
|
|
cost_usd: row.get(7)?,
|
|
status: row.get(8)?,
|
|
created_at: row.get(9)?,
|
|
updated_at: row.get(10)?,
|
|
last_message: row.get(11)?,
|
|
})
|
|
},
|
|
);
|
|
match result {
|
|
Ok(s) => Ok(Some(s)),
|
|
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
|
|
Err(e) => Err(e),
|
|
}
|
|
}
|
|
|
|
/// Löscht eine Session
|
|
pub fn delete_session(&self, id: &str) -> SqlResult<()> {
|
|
// Erst Nachrichten löschen (wegen Foreign Key)
|
|
self.conn.execute("DELETE FROM messages WHERE session_id = ?1", params![id])?;
|
|
self.conn.execute("DELETE FROM sessions WHERE id = ?1", params![id])?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Holt die zuletzt aktualisierte aktive Session
|
|
pub fn get_active_session(&self) -> SqlResult<Option<Session>> {
|
|
let result = self.conn.query_row(
|
|
"SELECT id, claude_session_id, title, working_dir, message_count,
|
|
token_input, token_output, cost_usd, status, created_at, updated_at, last_message
|
|
FROM sessions WHERE status = 'active' ORDER BY updated_at DESC LIMIT 1",
|
|
[],
|
|
|row| {
|
|
Ok(Session {
|
|
id: row.get(0)?,
|
|
claude_session_id: row.get(1)?,
|
|
title: row.get(2)?,
|
|
working_dir: row.get(3)?,
|
|
message_count: row.get(4)?,
|
|
token_input: row.get(5)?,
|
|
token_output: row.get(6)?,
|
|
cost_usd: row.get(7)?,
|
|
status: row.get(8)?,
|
|
created_at: row.get(9)?,
|
|
updated_at: row.get(10)?,
|
|
last_message: row.get(11)?,
|
|
})
|
|
},
|
|
);
|
|
match result {
|
|
Ok(s) => Ok(Some(s)),
|
|
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
|
|
Err(e) => Err(e),
|
|
}
|
|
}
|
|
|
|
// ============ Messages ============
|
|
|
|
/// Speichert eine Nachricht
|
|
pub fn save_message(&self, msg: &ChatMessage) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT OR REPLACE INTO messages (id, session_id, role, content, model, agent_id, timestamp)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7)",
|
|
params![
|
|
msg.id,
|
|
msg.session_id,
|
|
msg.role,
|
|
msg.content,
|
|
msg.model,
|
|
msg.agent_id,
|
|
msg.timestamp,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Lädt alle Nachrichten einer Session
|
|
pub fn load_messages(&self, session_id: &str) -> SqlResult<Vec<ChatMessage>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, session_id, role, content, model, agent_id, timestamp
|
|
FROM messages WHERE session_id = ?1 ORDER BY timestamp ASC"
|
|
)?;
|
|
|
|
let messages = stmt.query_map(params![session_id], |row| {
|
|
Ok(ChatMessage {
|
|
id: row.get(0)?,
|
|
session_id: row.get(1)?,
|
|
role: row.get(2)?,
|
|
content: row.get(3)?,
|
|
model: row.get(4)?,
|
|
agent_id: row.get(5)?,
|
|
timestamp: row.get(6)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
Ok(messages)
|
|
}
|
|
|
|
/// Letzte N User-Nachrichten einer Session laden (für KB-Kontext-Analyse)
|
|
pub fn load_recent_user_messages(&self, session_id: &str, limit: usize) -> SqlResult<Vec<String>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT content FROM messages
|
|
WHERE session_id = ?1 AND role = 'user'
|
|
ORDER BY timestamp DESC LIMIT ?2"
|
|
)?;
|
|
let messages = stmt.query_map(params![session_id, limit as i64], |row| {
|
|
row.get::<_, String>(0)
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
Ok(messages)
|
|
}
|
|
|
|
/// Löscht alle Nachrichten einer Session
|
|
pub fn clear_messages(&self, session_id: &str) -> SqlResult<()> {
|
|
self.conn.execute("DELETE FROM messages WHERE session_id = ?1", params![session_id])?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Block C: Cross-Session-Recall — Volltext-Suche ueber alle Sessions
|
|
/// liefert die top-N relevanten Assistant-Antworten aus der Vergangenheit.
|
|
/// Nutzt FTS5-Index `messages_fts`. Der current_session_id wird ausgeschlossen
|
|
/// damit das Recall sich nicht selbst trifft.
|
|
pub fn search_past_messages(
|
|
&self,
|
|
query: &str,
|
|
current_session_id: Option<&str>,
|
|
limit: usize,
|
|
) -> SqlResult<Vec<PastMessageMatch>> {
|
|
// FTS5-Query: bei leerem Query nichts. Auch sehr kurze Queries (<3 Zeichen)
|
|
// ueberspringen — bringen nur Rauschen.
|
|
if query.trim().len() < 3 {
|
|
return Ok(Vec::new());
|
|
}
|
|
|
|
// Sanitize: FTS5 mag keine ' und " in der Query
|
|
let safe = query
|
|
.replace('"', " ")
|
|
.replace('\'', " ")
|
|
.split_whitespace()
|
|
.filter(|w| w.len() >= 2)
|
|
.take(8)
|
|
.collect::<Vec<_>>()
|
|
.join(" OR ");
|
|
if safe.is_empty() {
|
|
return Ok(Vec::new());
|
|
}
|
|
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT m.id, m.session_id, m.role, m.content, m.timestamp, s.title
|
|
FROM messages_fts
|
|
JOIN messages m ON m.rowid = messages_fts.rowid
|
|
LEFT JOIN sessions s ON s.id = m.session_id
|
|
WHERE messages_fts MATCH ?1
|
|
AND m.role = 'assistant'
|
|
AND length(m.content) > 100
|
|
AND (?2 IS NULL OR m.session_id != ?2)
|
|
ORDER BY rank
|
|
LIMIT ?3"
|
|
)?;
|
|
|
|
let rows = stmt.query_map(
|
|
params![safe, current_session_id, limit as i64],
|
|
|row| {
|
|
let content: String = row.get(3)?;
|
|
Ok(PastMessageMatch {
|
|
id: row.get(0)?,
|
|
session_id: row.get(1)?,
|
|
role: row.get(2)?,
|
|
snippet: safe_truncate_ellipsis(&content, 240),
|
|
timestamp: row.get(4)?,
|
|
session_title: row.get(5)?,
|
|
})
|
|
}
|
|
)?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
Ok(rows)
|
|
}
|
|
|
|
/// Einmalige Migration: bestehende messages in den FTS5-Index spielen
|
|
/// (falls die Tabelle vor Block C bereits Eintraege hatte).
|
|
pub fn rebuild_messages_fts(&self) -> SqlResult<usize> {
|
|
// Prefcheck: ist FTS leer aber messages voll?
|
|
let fts_count: i64 = self.conn
|
|
.query_row("SELECT COUNT(*) FROM messages_fts", [], |r| r.get(0))
|
|
.unwrap_or(0);
|
|
let msg_count: i64 = self.conn
|
|
.query_row("SELECT COUNT(*) FROM messages", [], |r| r.get(0))?;
|
|
if fts_count >= msg_count { return Ok(0); }
|
|
|
|
self.conn.execute(
|
|
"INSERT INTO messages_fts(rowid, content)
|
|
SELECT rowid, content FROM messages
|
|
WHERE rowid NOT IN (SELECT rowid FROM messages_fts)",
|
|
[]
|
|
)?;
|
|
Ok((msg_count - fts_count) as usize)
|
|
}
|
|
|
|
/// Zählt Nachrichten einer Session
|
|
pub fn count_messages(&self, session_id: &str) -> SqlResult<usize> {
|
|
self.conn.query_row(
|
|
"SELECT COUNT(*) FROM messages WHERE session_id = ?1",
|
|
params![session_id],
|
|
|row| row.get(0),
|
|
)
|
|
}
|
|
|
|
/// Kompaktiert eine Session: Behält die letzten N Nachrichten, fasst ältere zusammen
|
|
/// Gibt die Anzahl der kompaktierten Nachrichten zurück
|
|
pub fn compact_session(&self, session_id: &str, keep_last: usize) -> SqlResult<usize> {
|
|
let total = self.count_messages(session_id)?;
|
|
if total <= keep_last {
|
|
return Ok(0); // Nichts zu kompaktieren
|
|
}
|
|
|
|
let to_compact = total - keep_last;
|
|
|
|
// Alte Nachrichten holen (die kompaktiert werden sollen)
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, role, content FROM messages
|
|
WHERE session_id = ?1
|
|
ORDER BY timestamp ASC
|
|
LIMIT ?2"
|
|
)?;
|
|
|
|
let old_messages: Vec<(String, String, String)> = stmt.query_map(
|
|
params![session_id, to_compact as i64],
|
|
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?))
|
|
)?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
if old_messages.is_empty() {
|
|
return Ok(0);
|
|
}
|
|
|
|
// Summary erstellen
|
|
let mut summary_parts: Vec<String> = Vec::new();
|
|
for (_, role, content) in &old_messages {
|
|
let preview = if content.len() > 200 {
|
|
// Sicher an Char-Boundary abschneiden (Multi-Byte wie ─, ä, ü)
|
|
let end = content.char_indices()
|
|
.take_while(|(i, _)| *i < 200)
|
|
.last()
|
|
.map(|(i, c)| i + c.len_utf8())
|
|
.unwrap_or(200.min(content.len()));
|
|
format!("{}...", &content[..end])
|
|
} else {
|
|
content.clone()
|
|
};
|
|
summary_parts.push(format!("[{}] {}", role, preview));
|
|
}
|
|
let summary_content = format!(
|
|
"📦 **Kompaktierter Kontext** ({} Nachrichten)\n\n{}",
|
|
old_messages.len(),
|
|
summary_parts.join("\n\n---\n\n")
|
|
);
|
|
|
|
// IDs der alten Nachrichten
|
|
let old_ids: Vec<&str> = old_messages.iter().map(|(id, _, _)| id.as_str()).collect();
|
|
|
|
// Alte Nachrichten löschen
|
|
let placeholders: String = old_ids.iter().map(|_| "?").collect::<Vec<_>>().join(",");
|
|
let delete_sql = format!("DELETE FROM messages WHERE id IN ({})", placeholders);
|
|
|
|
let params: Vec<&dyn rusqlite::ToSql> = old_ids.iter().map(|id| id as &dyn rusqlite::ToSql).collect();
|
|
self.conn.execute(&delete_sql, params.as_slice())?;
|
|
|
|
// Summary-Nachricht einfügen (mit Timestamp vor allen anderen)
|
|
let summary_msg = ChatMessage {
|
|
id: uuid::Uuid::new_v4().to_string(),
|
|
session_id: session_id.to_string(),
|
|
role: "system".to_string(),
|
|
content: summary_content,
|
|
model: None,
|
|
agent_id: None,
|
|
timestamp: "1970-01-01T00:00:00Z".to_string(), // Ganz am Anfang
|
|
};
|
|
self.save_message(&summary_msg)?;
|
|
|
|
Ok(old_messages.len())
|
|
}
|
|
|
|
// ============ Settings ============
|
|
|
|
/// Speichert eine Einstellung
|
|
pub fn set_setting(&self, key: &str, value: &str) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT OR REPLACE INTO settings (key, value, updated_at) VALUES (?1, ?2, ?3)",
|
|
params![key, value, chrono::Local::now().to_rfc3339()],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Liest eine Einstellung
|
|
pub fn get_setting(&self, key: &str) -> SqlResult<Option<String>> {
|
|
let result = self.conn.query_row(
|
|
"SELECT value FROM settings WHERE key = ?1",
|
|
params![key],
|
|
|row| row.get(0),
|
|
);
|
|
match result {
|
|
Ok(val) => Ok(Some(val)),
|
|
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
|
|
Err(e) => Err(e),
|
|
}
|
|
}
|
|
|
|
/// Lädt alle Einstellungen
|
|
pub fn get_all_settings(&self) -> SqlResult<Vec<(String, String)>> {
|
|
let mut stmt = self.conn.prepare("SELECT key, value FROM settings")?;
|
|
let settings = stmt.query_map([], |row| {
|
|
Ok((row.get(0)?, row.get(1)?))
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
Ok(settings)
|
|
}
|
|
|
|
// ============ Monitor-Events ============
|
|
|
|
/// Speichert ein Monitor-Event
|
|
pub fn save_monitor_event(&self, event: &MonitorEvent) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT OR REPLACE INTO monitor_events (id, timestamp, event_type, summary, details, agent_id, session_id, duration_ms, error)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9)",
|
|
params![
|
|
event.id,
|
|
event.timestamp,
|
|
event.event_type,
|
|
event.summary,
|
|
event.details,
|
|
event.agent_id,
|
|
event.session_id,
|
|
event.duration_ms,
|
|
event.error,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Lädt die letzten N Monitor-Events
|
|
pub fn load_monitor_events(&self, limit: usize) -> SqlResult<Vec<MonitorEvent>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, timestamp, event_type, summary, details, agent_id, session_id, duration_ms, error
|
|
FROM monitor_events ORDER BY timestamp DESC LIMIT ?1"
|
|
)?;
|
|
|
|
let events = stmt.query_map(params![limit as i64], |row| {
|
|
Ok(MonitorEvent {
|
|
id: row.get(0)?,
|
|
timestamp: row.get(1)?,
|
|
event_type: row.get(2)?,
|
|
summary: row.get(3)?,
|
|
details: row.get(4)?,
|
|
agent_id: row.get(5)?,
|
|
session_id: row.get(6)?,
|
|
duration_ms: row.get(7)?,
|
|
error: row.get(8)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
Ok(events)
|
|
}
|
|
|
|
/// Lädt Monitor-Events nach Typ gefiltert
|
|
pub fn load_monitor_events_by_type(&self, event_type: &str, limit: usize) -> SqlResult<Vec<MonitorEvent>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, timestamp, event_type, summary, details, agent_id, session_id, duration_ms, error
|
|
FROM monitor_events WHERE event_type = ?1 ORDER BY timestamp DESC LIMIT ?2"
|
|
)?;
|
|
|
|
let events = stmt.query_map(params![event_type, limit as i64], |row| {
|
|
Ok(MonitorEvent {
|
|
id: row.get(0)?,
|
|
timestamp: row.get(1)?,
|
|
event_type: row.get(2)?,
|
|
summary: row.get(3)?,
|
|
details: row.get(4)?,
|
|
agent_id: row.get(5)?,
|
|
session_id: row.get(6)?,
|
|
duration_ms: row.get(7)?,
|
|
error: row.get(8)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
|
|
Ok(events)
|
|
}
|
|
|
|
/// Löscht alle Monitor-Events
|
|
pub fn clear_monitor_events(&self) -> SqlResult<usize> {
|
|
let count: usize = self.conn.query_row(
|
|
"SELECT COUNT(*) FROM monitor_events", [], |row| row.get(0)
|
|
)?;
|
|
self.conn.execute("DELETE FROM monitor_events", [])?;
|
|
Ok(count)
|
|
}
|
|
|
|
/// Zählt Monitor-Events nach Typ
|
|
pub fn count_monitor_events_by_type(&self) -> SqlResult<Vec<(String, usize)>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT event_type, COUNT(*) FROM monitor_events GROUP BY event_type"
|
|
)?;
|
|
let counts = stmt.query_map([], |row| {
|
|
Ok((row.get(0)?, row.get(1)?))
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
Ok(counts)
|
|
}
|
|
|
|
// ============ Checkpoints (Accept/Reject + Rewind) ============
|
|
|
|
/// Checkpoint speichern (VOR der Aenderung)
|
|
pub fn save_checkpoint(
|
|
&self,
|
|
tool_id: &str,
|
|
session_id: &str,
|
|
tool_name: &str,
|
|
file_path: &str,
|
|
content_before: &str,
|
|
) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT OR REPLACE INTO checkpoints (tool_id, session_id, tool_name, file_path, content_before, status)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, 'pending')",
|
|
params![tool_id, session_id, tool_name, file_path, content_before],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Checkpoint After-Content updaten
|
|
pub fn update_checkpoint_after(&self, tool_id: &str, content_after: &str) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"UPDATE checkpoints SET content_after = ?1, status = 'completed' WHERE tool_id = ?2",
|
|
params![content_after, tool_id],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Checkpoint abrufen
|
|
pub fn get_checkpoint(&self, tool_id: &str) -> SqlResult<Option<CheckpointEntry>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT tool_id, session_id, tool_name, file_path, content_before, content_after, status, created_at
|
|
FROM checkpoints WHERE tool_id = ?1"
|
|
)?;
|
|
let result: Vec<CheckpointEntry> = stmt.query_map(params![tool_id], |row| {
|
|
Ok(CheckpointEntry {
|
|
tool_id: row.get(0)?,
|
|
session_id: row.get(1)?,
|
|
tool_name: row.get(2)?,
|
|
file_path: row.get(3)?,
|
|
content_before: row.get(4)?,
|
|
content_after: row.get(5)?,
|
|
status: row.get(6)?,
|
|
created_at: row.get(7)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
Ok(result.into_iter().next())
|
|
}
|
|
|
|
/// Alle Checkpoints einer Session (neueste zuerst)
|
|
pub fn list_checkpoints(&self, session_id: &str) -> SqlResult<Vec<CheckpointEntry>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT tool_id, session_id, tool_name, file_path, content_before, content_after, status, created_at
|
|
FROM checkpoints WHERE session_id = ?1 ORDER BY created_at DESC LIMIT 100"
|
|
)?;
|
|
let result = stmt.query_map(params![session_id], |row| {
|
|
Ok(CheckpointEntry {
|
|
tool_id: row.get(0)?,
|
|
session_id: row.get(1)?,
|
|
tool_name: row.get(2)?,
|
|
file_path: row.get(3)?,
|
|
content_before: row.get(4)?,
|
|
content_after: row.get(5)?,
|
|
status: row.get(6)?,
|
|
created_at: row.get(7)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
Ok(result)
|
|
}
|
|
|
|
/// Checkpoint-Status auf 'rejected' setzen
|
|
pub fn reject_checkpoint(&self, tool_id: &str) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"UPDATE checkpoints SET status = 'rejected' WHERE tool_id = ?1",
|
|
params![tool_id],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Checkpoint-Status auf 'accepted' setzen
|
|
pub fn accept_checkpoint(&self, tool_id: &str) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"UPDATE checkpoints SET status = 'accepted' WHERE tool_id = ?1",
|
|
params![tool_id],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
// ============ Phase 2.0: Fehler-Tracking ============
|
|
|
|
/// Fehler-Occurrence zählen und zurückgeben
|
|
/// Gibt (neuer_count, error_message, tool, kb_pattern_id) zurück
|
|
pub fn track_error(&self, error_hash: &str, error_message: &str, tool: &str) -> SqlResult<(i32, Option<i64>)> {
|
|
let now = chrono::Local::now().to_rfc3339();
|
|
|
|
// Versuche zu aktualisieren
|
|
let updated = self.conn.execute(
|
|
"UPDATE error_tracker SET
|
|
occurrence_count = occurrence_count + 1,
|
|
last_seen = ?1,
|
|
error_message = ?2
|
|
WHERE error_hash = ?3",
|
|
params![now, error_message, error_hash],
|
|
)?;
|
|
|
|
if updated == 0 {
|
|
// Neuer Eintrag
|
|
self.conn.execute(
|
|
"INSERT INTO error_tracker (error_hash, error_message, tool, occurrence_count, first_seen, last_seen)
|
|
VALUES (?1, ?2, ?3, 1, ?4, ?4)",
|
|
params![error_hash, error_message, tool, now],
|
|
)?;
|
|
return Ok((1, None));
|
|
}
|
|
|
|
// Aktuellen Count und kb_pattern_id holen
|
|
let result: (i32, Option<i64>) = self.conn.query_row(
|
|
"SELECT occurrence_count, kb_pattern_id FROM error_tracker WHERE error_hash = ?1",
|
|
params![error_hash],
|
|
|row| Ok((row.get(0)?, row.get(1)?)),
|
|
)?;
|
|
|
|
Ok(result)
|
|
}
|
|
|
|
/// KB-Pattern-ID für einen Fehler speichern (nachdem Pattern in KB erstellt wurde)
|
|
pub fn set_error_kb_pattern(&self, error_hash: &str, kb_pattern_id: i64) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"UPDATE error_tracker SET kb_pattern_id = ?1 WHERE error_hash = ?2",
|
|
params![kb_pattern_id, error_hash],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Fehler-Statistiken laden (Top N häufigste Fehler)
|
|
pub fn get_error_stats(&self, limit: usize) -> SqlResult<Vec<(String, String, String, i32, Option<i64>)>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT error_hash, error_message, tool, occurrence_count, kb_pattern_id
|
|
FROM error_tracker
|
|
ORDER BY occurrence_count DESC
|
|
LIMIT ?1"
|
|
)?;
|
|
let stats = stmt.query_map(params![limit as i64], |row| {
|
|
Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?, row.get(4)?))
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
Ok(stats)
|
|
}
|
|
|
|
// ============ Statistiken ============
|
|
|
|
/// DB-Statistiken
|
|
pub fn stats(&self) -> SqlResult<DbStats> {
|
|
let permissions: usize = self.conn.query_row(
|
|
"SELECT COUNT(*) FROM permissions", [], |row| row.get(0)
|
|
)?;
|
|
let audit_entries: usize = self.conn.query_row(
|
|
"SELECT COUNT(*) FROM audit_log", [], |row| row.get(0)
|
|
)?;
|
|
let memory_entries: usize = self.conn.query_row(
|
|
"SELECT COUNT(*) FROM memory", [], |row| row.get(0)
|
|
)?;
|
|
let patterns: usize = self.conn.query_row(
|
|
"SELECT COUNT(*) FROM patterns", [], |row| row.get(0)
|
|
)?;
|
|
|
|
// DB-Größe ermitteln
|
|
let page_count: u64 = self.conn.query_row(
|
|
"PRAGMA page_count", [], |row| row.get(0)
|
|
)?;
|
|
let page_size: u64 = self.conn.query_row(
|
|
"PRAGMA page_size", [], |row| row.get(0)
|
|
)?;
|
|
let db_size_kb = (page_count * page_size) / 1024;
|
|
|
|
Ok(DbStats { permissions, audit_entries, memory_entries, patterns, db_size_kb })
|
|
}
|
|
}
|
|
|
|
// ============ Hilfsfunktionen ============
|
|
|
|
fn parse_audit_category(s: &str) -> AuditCategory {
|
|
match s {
|
|
"guardrail" | "guard_rail" => AuditCategory::GuardRail,
|
|
"pattern" => AuditCategory::Pattern,
|
|
"hook" => AuditCategory::Hook,
|
|
"skill" => AuditCategory::Skill,
|
|
"setting" => AuditCategory::Setting,
|
|
"mcp" => AuditCategory::MCP,
|
|
"memory" => AuditCategory::Memory,
|
|
_ => AuditCategory::Setting,
|
|
}
|
|
}
|
|
|
|
fn parse_audit_action(s: &str) -> AuditAction {
|
|
match s {
|
|
"create" => AuditAction::Create,
|
|
"update" => AuditAction::Update,
|
|
"delete" => AuditAction::Delete,
|
|
"enable" => AuditAction::Enable,
|
|
"disable" => AuditAction::Disable,
|
|
_ => AuditAction::Update,
|
|
}
|
|
}
|
|
|
|
fn parse_context_category(s: &str) -> ContextCategory {
|
|
match s {
|
|
"Critical" => ContextCategory::Critical,
|
|
"Pattern" => ContextCategory::Pattern,
|
|
"Preference" => ContextCategory::Preference,
|
|
"GuardRail" => ContextCategory::GuardRail,
|
|
"Hook" => ContextCategory::Hook,
|
|
"Skill" => ContextCategory::Skill,
|
|
_ => ContextCategory::Pattern,
|
|
}
|
|
}
|
|
|
|
// ============ Tauri Commands ============
|
|
|
|
pub type DbState = Arc<Mutex<Database>>;
|
|
|
|
/// DB initialisieren (falls Frontend es auslösen will)
|
|
#[tauri::command]
|
|
pub async fn init_database(app: AppHandle) -> Result<DbStats, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.stats().map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// DB-Statistiken abrufen
|
|
#[tauri::command]
|
|
pub async fn get_db_stats(app: AppHandle) -> Result<DbStats, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.stats().map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Einstellung lesen
|
|
#[tauri::command]
|
|
pub async fn get_setting(app: AppHandle, key: String) -> Result<Option<String>, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.get_setting(&key).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Einstellung speichern
|
|
#[tauri::command]
|
|
pub async fn set_setting(app: AppHandle, key: String, value: String) -> Result<(), String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.set_setting(&key, &value).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Alle Einstellungen laden
|
|
#[tauri::command]
|
|
pub async fn get_all_settings(app: AppHandle) -> Result<Vec<(String, String)>, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.get_all_settings().map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Nachricht speichern
|
|
#[tauri::command]
|
|
pub async fn save_message(app: AppHandle, message: ChatMessage) -> Result<(), String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.save_message(&message).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Nachrichten einer Session laden
|
|
#[tauri::command]
|
|
pub async fn load_messages(app: AppHandle, session_id: String) -> Result<Vec<ChatMessage>, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.load_messages(&session_id).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Alle Nachrichten einer Session löschen
|
|
#[tauri::command]
|
|
pub async fn clear_messages(app: AppHandle, session_id: String) -> Result<(), String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.clear_messages(&session_id).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Block C: Cross-Session-Recall — sucht in alten Sessions nach aehnlichen
|
|
/// Assistant-Antworten. Frontend ruft das beim ersten claude-text auf, damit
|
|
/// User sieht "🕒 Schon mal beantwortet" wenn er etwas Aehnliches frueher fragte.
|
|
#[tauri::command]
|
|
pub async fn search_past_messages(
|
|
app: AppHandle,
|
|
query: String,
|
|
current_session_id: Option<String>,
|
|
limit: Option<usize>,
|
|
) -> Result<Vec<PastMessageMatch>, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.search_past_messages(&query, current_session_id.as_deref(), limit.unwrap_or(3))
|
|
.map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Block C: einmalige Migration — bestehende messages in den FTS5-Index spielen.
|
|
/// Frontend ruft das einmal beim App-Start auf (idempotent — tut nichts wenn schon synced).
|
|
#[tauri::command]
|
|
pub async fn rebuild_messages_fts(app: AppHandle) -> Result<usize, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.rebuild_messages_fts().map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Session kompaktieren — fasst alte Nachrichten zusammen
|
|
#[tauri::command]
|
|
pub async fn compact_session(
|
|
app: AppHandle,
|
|
session_id: String,
|
|
keep_last: Option<usize>,
|
|
) -> Result<usize, String> {
|
|
let keep = keep_last.unwrap_or(30); // Standard: letzte 30 Nachrichten behalten
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
let compacted = db.compact_session(&session_id, keep).map_err(|e| e.to_string())?;
|
|
|
|
if compacted > 0 {
|
|
println!("📦 Session {} kompaktiert: {} Nachrichten zusammengefasst", session_id, compacted);
|
|
}
|
|
|
|
Ok(compacted)
|
|
}
|
|
|
|
// ============ Monitor-Events Commands ============
|
|
|
|
/// Monitor-Event speichern
|
|
#[tauri::command]
|
|
pub async fn save_monitor_event(app: AppHandle, event: MonitorEvent) -> Result<(), String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.save_monitor_event(&event).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Monitor-Events laden (neueste zuerst)
|
|
#[tauri::command]
|
|
pub async fn load_monitor_events(app: AppHandle, limit: Option<usize>) -> Result<Vec<MonitorEvent>, String> {
|
|
let limit = limit.unwrap_or(1000); // Standard: letzte 1000 Events
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.load_monitor_events(limit).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Monitor-Events nach Typ laden
|
|
#[tauri::command]
|
|
pub async fn load_monitor_events_by_type(
|
|
app: AppHandle,
|
|
event_type: String,
|
|
limit: Option<usize>,
|
|
) -> Result<Vec<MonitorEvent>, String> {
|
|
let limit = limit.unwrap_or(500);
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.load_monitor_events_by_type(&event_type, limit).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Alle Monitor-Events löschen
|
|
#[tauri::command]
|
|
pub async fn clear_all_monitor_events(app: AppHandle) -> Result<usize, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
let count = db.clear_monitor_events().map_err(|e| e.to_string())?;
|
|
println!("🗑️ {} Monitor-Events gelöscht", count);
|
|
Ok(count)
|
|
}
|
|
|
|
/// Monitor-Event Statistiken
|
|
#[tauri::command]
|
|
pub async fn get_monitor_stats(app: AppHandle) -> Result<Vec<(String, usize)>, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.count_monitor_events_by_type().map_err(|e| e.to_string())
|
|
}
|
|
|
|
// ============ Phase 2.0: Fehler-Tracking Commands ============
|
|
|
|
/// Fehler tracken — gibt (count, kb_pattern_id) zurück
|
|
#[tauri::command]
|
|
pub async fn track_error(
|
|
app: AppHandle,
|
|
error_hash: String,
|
|
error_message: String,
|
|
tool: String,
|
|
) -> Result<(i32, Option<i64>), String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.track_error(&error_hash, &error_message, &tool).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// KB-Pattern-ID für Fehler setzen
|
|
#[tauri::command]
|
|
pub async fn set_error_kb_pattern(
|
|
app: AppHandle,
|
|
error_hash: String,
|
|
kb_pattern_id: i64,
|
|
) -> Result<(), String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.set_error_kb_pattern(&error_hash, kb_pattern_id).map_err(|e| e.to_string())
|
|
}
|
|
|
|
/// Fehler-Statistiken laden
|
|
#[tauri::command]
|
|
pub async fn get_error_stats(
|
|
app: AppHandle,
|
|
limit: Option<usize>,
|
|
) -> Result<Vec<(String, String, String, i32, Option<i64>)>, String> {
|
|
let limit = limit.unwrap_or(20);
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.get_error_stats(limit).map_err(|e| e.to_string())
|
|
}
|
|
|
|
// ============ Projekte ============
|
|
|
|
/// Ein Projekt für schnellen Wechsel
|
|
#[derive(Debug, Clone, serde::Serialize, serde::Deserialize)]
|
|
pub struct Project {
|
|
pub id: String,
|
|
pub name: String,
|
|
pub working_dir: String,
|
|
pub claude_md_path: Option<String>,
|
|
pub description: Option<String>,
|
|
pub last_used: String,
|
|
pub created_at: String,
|
|
pub session_count: i64,
|
|
}
|
|
|
|
impl Database {
|
|
/// Alle Projekte laden (zuletzt benutzte zuerst)
|
|
pub fn list_projects(&self) -> SqlResult<Vec<Project>> {
|
|
let mut stmt = self.conn.prepare(
|
|
"SELECT id, name, working_dir, claude_md_path, description, last_used, created_at, session_count
|
|
FROM projects ORDER BY last_used DESC"
|
|
)?;
|
|
let projects = stmt.query_map([], |row| {
|
|
Ok(Project {
|
|
id: row.get(0)?,
|
|
name: row.get(1)?,
|
|
working_dir: row.get(2)?,
|
|
claude_md_path: row.get(3)?,
|
|
description: row.get(4)?,
|
|
last_used: row.get(5)?,
|
|
created_at: row.get(6)?,
|
|
session_count: row.get(7)?,
|
|
})
|
|
})?.collect::<SqlResult<Vec<_>>>()?;
|
|
Ok(projects)
|
|
}
|
|
|
|
/// Projekt speichern (INSERT oder UPDATE)
|
|
pub fn save_project(&self, project: &Project) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"INSERT OR REPLACE INTO projects (id, name, working_dir, claude_md_path, description, last_used, created_at, session_count)
|
|
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
|
|
params![
|
|
project.id, project.name, project.working_dir,
|
|
project.claude_md_path, project.description,
|
|
project.last_used, project.created_at, project.session_count,
|
|
],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Projekt löschen
|
|
pub fn delete_project(&self, id: &str) -> SqlResult<()> {
|
|
self.conn.execute("DELETE FROM projects WHERE id = ?1", params![id])?;
|
|
Ok(())
|
|
}
|
|
|
|
/// Projekt als zuletzt benutzt markieren
|
|
pub fn touch_project(&self, id: &str) -> SqlResult<()> {
|
|
self.conn.execute(
|
|
"UPDATE projects SET last_used = datetime('now'), session_count = session_count + 1 WHERE id = ?1",
|
|
params![id],
|
|
)?;
|
|
Ok(())
|
|
}
|
|
}
|
|
|
|
// Tauri-Commands für Projekte
|
|
|
|
#[tauri::command]
|
|
pub async fn list_projects(app: AppHandle) -> Result<Vec<Project>, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.list_projects().map_err(|e| e.to_string())
|
|
}
|
|
|
|
#[tauri::command]
|
|
pub async fn save_project(app: AppHandle, project: Project) -> Result<(), String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.save_project(&project).map_err(|e| e.to_string())
|
|
}
|
|
|
|
#[tauri::command]
|
|
pub async fn delete_project(app: AppHandle, id: String) -> Result<(), String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
db.delete_project(&id).map_err(|e| e.to_string())
|
|
}
|
|
|
|
#[tauri::command]
|
|
pub async fn switch_project(app: AppHandle, project_id: String) -> Result<Project, String> {
|
|
let state = app.state::<DbState>();
|
|
let db = state.lock().unwrap();
|
|
|
|
// Projekt als benutzt markieren
|
|
db.touch_project(&project_id).map_err(|e| e.to_string())?;
|
|
|
|
// Projekt-Daten laden
|
|
let projects = db.list_projects().map_err(|e| e.to_string())?;
|
|
let project = projects.into_iter()
|
|
.find(|p| p.id == project_id)
|
|
.ok_or_else(|| format!("Projekt '{}' nicht gefunden", project_id))?;
|
|
|
|
// Sticky-Context aktualisieren: current_project setzen
|
|
let project_json = serde_json::json!({
|
|
"id": project.id,
|
|
"name": project.name,
|
|
"working_dir": project.working_dir,
|
|
}).to_string();
|
|
|
|
db.conn.execute(
|
|
"INSERT OR REPLACE INTO sticky_context (key, value, priority, updated_at)
|
|
VALUES ('project:current', ?1, 10, datetime('now'))",
|
|
params![project_json],
|
|
).map_err(|e| e.to_string())?;
|
|
|
|
Ok(project)
|
|
}
|