Add read_file skill, session persistence, and update script

- New read_file skill: paginated file reading with line ranges,
  path restricted to /workspace, binary detection, directory listing
- Session persistence via SQLite + FTS5: conversation history survives
  agent restarts, last N messages restored into deque on boot,
  auto-prune to 1000 messages
- Update truncation hint to reference read_file instead of run_command
- New scripts/update.sh for patching rootfs + rebuilding snapshot

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-08 14:49:54 +00:00
parent 6673210ff0
commit 5b312e34de
6 changed files with 262 additions and 1 deletions

View File

@@ -12,6 +12,7 @@ from collections import deque
from skills import discover_skills, execute_skill, set_logger as set_skills_logger
from tools import load_memory, query_ollama, set_logger as set_tools_logger
from sessions import init_db, save_message, load_recent, set_logger as set_sessions_logger
# ─── Config ──────────────────────────────────────────────────────────
@@ -63,6 +64,7 @@ def log(msg):
# Inject logger into submodules
set_skills_logger(log)
set_tools_logger(log)
set_sessions_logger(log)
# ─── Init ────────────────────────────────────────────────────────────
@@ -70,6 +72,11 @@ AGENT_MEMORY = load_memory(WORKSPACE)
TOOLS, SKILL_SCRIPTS = discover_skills(SKILL_DIRS)
log(f"Loaded {len(TOOLS)} skills: {', '.join(SKILL_SCRIPTS.keys())}")
db_conn = init_db(f"{WORKSPACE}/sessions.db")
for msg in load_recent(db_conn, CONTEXT_SIZE):
recent.append(msg)
log(f"Session: restored {len(recent)} messages")
def reload_memory():
global AGENT_MEMORY
@@ -212,6 +219,7 @@ def handle_message(irc, source_nick, target, text):
reply_to = source_nick if is_dm else target
recent.append({"nick": source_nick, "text": text, "channel": channel})
save_message(db_conn, source_nick, channel, text)
if source_nick == NICK:
return
@@ -249,6 +257,7 @@ def handle_message(irc, source_nick, target, text):
irc.say(reply_to, "\n".join(lines))
recent.append({"nick": NICK, "text": response[:200], "channel": channel})
save_message(db_conn, NICK, channel, response[:200], full_text=response)
except Exception as e:
log(f"Error handling message: {e}")
try:

87
agent/sessions.py Normal file
View File

@@ -0,0 +1,87 @@
"""Session persistence — SQLite + FTS5 for conversation history."""
import sqlite3
import threading
import time
def log(msg):
print(f"[sessions] {msg}", flush=True)
def set_logger(fn):
global log
log = fn
_write_lock = threading.Lock()
def init_db(db_path):
"""Create/open the session database. Returns a connection."""
conn = sqlite3.connect(db_path, check_same_thread=False)
conn.execute("PRAGMA journal_mode=WAL")
conn.execute("""
CREATE TABLE IF NOT EXISTS messages (
id INTEGER PRIMARY KEY AUTOINCREMENT,
ts REAL NOT NULL,
nick TEXT NOT NULL,
channel TEXT NOT NULL,
text TEXT NOT NULL,
full_text TEXT
)
""")
conn.execute("""
CREATE INDEX IF NOT EXISTS idx_messages_channel_ts
ON messages(channel, ts)
""")
# FTS5 virtual table for full-text search
conn.execute("""
CREATE VIRTUAL TABLE IF NOT EXISTS messages_fts
USING fts5(text, content=messages, content_rowid=id)
""")
conn.commit()
_prune(conn)
log(f"Session DB ready: {db_path}")
return conn
def save_message(conn, nick, channel, text, full_text=None):
"""Persist a message to the session database."""
with _write_lock:
cur = conn.execute(
"INSERT INTO messages (ts, nick, channel, text, full_text) VALUES (?, ?, ?, ?, ?)",
(time.time(), nick, channel, text, full_text),
)
conn.execute(
"INSERT INTO messages_fts(rowid, text) VALUES (?, ?)",
(cur.lastrowid, text),
)
conn.commit()
def load_recent(conn, limit=20):
"""Load the last N messages for boot recovery.
Returns list of {"nick", "text", "channel"} dicts in chronological order."""
rows = conn.execute(
"SELECT nick, text, channel FROM messages ORDER BY id DESC LIMIT ?",
(limit,),
).fetchall()
return [{"nick": r[0], "text": r[1], "channel": r[2]} for r in reversed(rows)]
def _prune(conn, keep=1000):
"""Delete old messages beyond the last `keep`. Runs once at init."""
count = conn.execute("SELECT COUNT(*) FROM messages").fetchone()[0]
if count <= keep:
return
deleted = count - keep
conn.execute("""
DELETE FROM messages WHERE id NOT IN (
SELECT id FROM messages ORDER BY id DESC LIMIT ?
)
""", (keep,))
# Rebuild FTS index after bulk delete
conn.execute("INSERT INTO messages_fts(messages_fts) VALUES('rebuild')")
conn.commit()
log(f"Pruned {deleted} old messages (kept {keep})")

View File

@@ -166,7 +166,7 @@ def execute_skill(script_path, args, workspace, config):
with open(filepath, "w") as f:
f.write(output)
preview = output[:1500]
return f"{preview}\n\n[output truncated — full result ({len(output)} chars) saved to {filepath}. Use run_command to read it: cat {filepath}]"
return f"{preview}\n\n[output truncated — full result ({len(output)} chars) saved to {filepath}. Use read_file to view it.]"
return output
except subprocess.TimeoutExpired:

81
scripts/update.sh Executable file
View File

@@ -0,0 +1,81 @@
#!/bin/bash
# Update fireclaw agent code and skills in the rootfs.
# Stops the overseer, patches the rootfs, rebuilds snapshot, restarts.
#
# Usage: ./scripts/update.sh
set -euo pipefail
log() { echo -e "\033[1;34m[fireclaw]\033[0m $*"; }
step() { echo -e "\n\033[1;32m━━━ $* ━━━\033[0m"; }
ok() { echo -e " \033[0;32m✓\033[0m $*"; }
err() { echo -e "\033[1;31m[error]\033[0m $*" >&2; exit 1; }
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)"
FIRECLAW_DIR="$HOME/.fireclaw"
ROOTFS="$FIRECLAW_DIR/agent-rootfs.ext4"
MNT="/tmp/fireclaw-update-mnt"
[[ ! -f "$ROOTFS" ]] && err "No rootfs found at $ROOTFS — run install.sh first."
# ─── Stop overseer ──────────────────────────────────────────────────
step "Stop overseer"
if systemctl is-active --quiet fireclaw-overseer 2>/dev/null; then
sudo systemctl stop fireclaw-overseer
ok "Overseer stopped"
else
ok "Overseer not running"
fi
# Wait for any firecracker processes to exit
sleep 1
# ─── Build TypeScript ───────────────────────────────────────────────
step "Build TypeScript"
cd "$SCRIPT_DIR"
npm run build
ok "TypeScript compiled"
# ─── Patch rootfs ───────────────────────────────────────────────────
step "Patch rootfs"
sudo mkdir -p "$MNT"
sudo mount "$ROOTFS" "$MNT" || err "Failed to mount rootfs"
trap 'sudo umount "$MNT" 2>/dev/null; sudo rmdir "$MNT" 2>/dev/null' EXIT
sudo cp "$SCRIPT_DIR/agent/"*.py "$MNT/opt/agent/"
sudo chmod +x "$MNT/opt/agent/agent.py"
sudo rm -rf "$MNT/opt/skills/"*
sudo cp -r "$SCRIPT_DIR/skills/"* "$MNT/opt/skills/"
sudo chmod +x "$MNT/opt/skills/"*/run.*
sudo umount "$MNT"
sudo rmdir "$MNT"
trap - EXIT
ok "Agent + skills updated in rootfs"
# ─── Rebuild snapshot ───────────────────────────────────────────────
step "Rebuild snapshot"
rm -f "$FIRECLAW_DIR/snapshot.state" \
"$FIRECLAW_DIR/snapshot.mem" \
"$FIRECLAW_DIR/snapshot-rootfs.ext4"
fireclaw snapshot create
ok "Snapshot rebuilt"
# ─── Restart overseer ──────────────────────────────────────────────
step "Restart overseer"
sudo systemctl start fireclaw-overseer
ok "Overseer started"
echo ""
log "Update complete. Use IRC to test."

17
skills/read_file/SKILL.md Normal file
View File

@@ -0,0 +1,17 @@
---
name: read_file
description: Read a file from the workspace with optional line range. Use this to view large tool outputs, logs, or any file in /workspace.
parameters:
path:
type: string
description: Path to the file to read (must be under /workspace)
required: true
offset:
type: integer
description: Start reading from this line number (default 1)
required: false
limit:
type: integer
description: Maximum number of lines to return (default 200)
required: false
---

67
skills/read_file/run.py Normal file
View File

@@ -0,0 +1,67 @@
#!/usr/bin/env python3
"""Read a file from /workspace with optional line range."""
import json
import os
import sys
args = json.loads(sys.stdin.read())
path = args.get("path", "")
offset = max(int(args.get("offset", 1)), 1)
limit = max(int(args.get("limit", 200)), 1)
WORKSPACE = os.environ.get("WORKSPACE", "/workspace")
# Resolve to absolute and ensure it stays under /workspace
resolved = os.path.realpath(path)
if not resolved.startswith(WORKSPACE + "/") and resolved != WORKSPACE:
print(f"[error: path must be under {WORKSPACE}]")
sys.exit(0)
if not os.path.exists(resolved):
print(f"[error: file not found: {path}]")
sys.exit(0)
if os.path.isdir(resolved):
entries = sorted(os.listdir(resolved))
print(f"Directory listing of {path} ({len(entries)} entries):")
for entry in entries[:100]:
full = os.path.join(resolved, entry)
kind = "dir" if os.path.isdir(full) else "file"
print(f" {entry} ({kind})")
if len(entries) > 100:
print(f" ... and {len(entries) - 100} more")
sys.exit(0)
# Binary detection — check first 512 bytes
try:
with open(resolved, "rb") as f:
chunk = f.read(512)
if b"\x00" in chunk:
size = os.path.getsize(resolved)
print(f"[binary file: {path} ({size} bytes)]")
sys.exit(0)
except Exception as e:
print(f"[error reading file: {e}]")
sys.exit(0)
# Read with line range
try:
with open(resolved) as f:
lines = f.readlines()
except Exception as e:
print(f"[error reading file: {e}]")
sys.exit(0)
total = len(lines)
start_idx = offset - 1 # 0-based
end_idx = min(start_idx + limit, total)
if start_idx >= total:
print(f"[file has {total} lines — offset {offset} is beyond end of file]")
sys.exit(0)
for i in range(start_idx, end_idx):
print(f"{i + 1}\t{lines[i]}", end="")
if end_idx < total:
print(f"\n[showing lines {offset}-{end_idx} of {total} — use offset={end_idx + 1} to read more]")