Add !status command and web search tool

- !status: shows agent count, load, RAM, disk, uptime, Ollama model loaded
- web_search tool: agents can search via SearXNG (searx.mymx.me)
  Works in both structured and text-based tool call paths

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
This commit is contained in:
2026-04-07 14:36:27 +00:00
parent 50b8c7464b
commit 5cc6a38c96
2 changed files with 106 additions and 2 deletions

View File

@@ -104,8 +104,31 @@ TOOLS = [
},
},
},
{
"type": "function",
"function": {
"name": "web_search",
"description": "Search the web using SearXNG. Returns titles, URLs, and snippets for the top results. Use this when you need current information or facts you're unsure about.",
"parameters": {
"type": "object",
"properties": {
"query": {
"type": "string",
"description": "The search query",
},
"num_results": {
"type": "integer",
"description": "Number of results to return (default 5)",
},
},
"required": ["query"],
},
},
},
]
SEARX_URL = CONFIG.get("searx_url", "https://searx.mymx.me")
def log(msg):
print(f"[agent:{NICK}] {msg}", flush=True)
@@ -219,6 +242,32 @@ def save_memory(topic, content):
return f"Memory saved to {filepath}"
def web_search(query, num_results=5):
"""Search the web via SearXNG."""
log(f"Web search: {query[:60]}")
try:
import urllib.parse
params = urllib.parse.urlencode({"q": query, "format": "json"})
req = urllib.request.Request(
f"{SEARX_URL}/search?{params}",
headers={"User-Agent": "fireclaw-agent"},
)
with urllib.request.urlopen(req, timeout=15) as resp:
data = json.loads(resp.read())
results = data.get("results", [])[:num_results]
if not results:
return "No results found."
lines = []
for r in results:
title = r.get("title", "")
url = r.get("url", "")
snippet = r.get("content", "")[:150]
lines.append(f"- {title}\n {url}\n {snippet}")
return "\n".join(lines)
except Exception as e:
return f"[search error: {e}]"
def try_parse_tool_call(text):
"""Try to parse a text-based tool call from model output.
Handles formats like:
@@ -298,6 +347,12 @@ def query_ollama(messages):
log(f"Tool call [{round_num+1}/{MAX_TOOL_ROUNDS}]: save_memory({topic})")
result = save_memory(topic, content)
messages.append({"role": "tool", "content": result})
elif fn_name == "web_search":
query = fn_args.get("query", "")
num = fn_args.get("num_results", 5)
log(f"Tool call [{round_num+1}/{MAX_TOOL_ROUNDS}]: web_search({query[:60]})")
result = web_search(query, num)
messages.append({"role": "tool", "content": result})
else:
messages.append({
"role": "tool",
@@ -324,6 +379,12 @@ def query_ollama(messages):
log(f"Text tool call [{round_num+1}/{MAX_TOOL_ROUNDS}]: save_memory({topic})")
result = save_memory(topic, mem_content)
messages.append({"role": "user", "content": f"{result}\n\nNow respond to the user."})
elif fn_name == "web_search":
query = fn_args.get("query", "")
num = fn_args.get("num_results", 5)
log(f"Text tool call [{round_num+1}/{MAX_TOOL_ROUNDS}]: web_search({query[:60]})")
result = web_search(query, num)
messages.append({"role": "user", "content": f"Search results:\n{result}\n\nNow respond to the user based on these results."})
payload["messages"] = messages
continue
@@ -339,7 +400,8 @@ def build_messages(question, channel):
if TOOLS_ENABLED:
system += "\n\nYou have access to tools:"
system += "\n- run_command: Execute shell commands on your system."
system += "\n- save_memory: Save important information to your persistent workspace (/workspace/memory/). Use this to remember things across restarts — user preferences, learned facts, project context."
system += "\n- web_search: Search the web for current information."
system += "\n- save_memory: Save important information to your persistent workspace."
system += "\nUse tools when needed rather than guessing. Your workspace at /workspace persists across restarts."
if AGENT_MEMORY and AGENT_MEMORY != "# Agent Memory":
system += f"\n\nIMPORTANT - Your persistent memory (facts you saved previously, use these to answer questions):\n{AGENT_MEMORY}"

View File

@@ -153,8 +153,50 @@ export async function runOverseer(config: OverseerConfig) {
break;
}
case "!status": {
try {
const os = await import("node:os");
const { execFileSync } = await import("node:child_process");
const agents = listAgents();
const uptime = Math.floor(os.uptime() / 3600);
const totalMem = (os.totalmem() / 1e9).toFixed(0);
const freeMem = (os.freemem() / 1e9).toFixed(0);
const load = os.loadavg()[0].toFixed(2);
// Disk free
let diskFree = "?";
try {
const dfOut = execFileSync("df", ["-h", "/"], { encoding: "utf-8" });
const parts = dfOut.split("\n")[1]?.split(/\s+/);
if (parts) diskFree = `${parts[3]} free / ${parts[1]}`;
} catch {}
// Ollama model loaded
let ollamaModel = "none";
try {
const http = await import("node:http");
const psData = await new Promise<string>((resolve, reject) => {
http.get("http://localhost:11434/api/ps", (res) => {
const chunks: Buffer[] = [];
res.on("data", (c) => chunks.push(c));
res.on("end", () => resolve(Buffer.concat(chunks).toString()));
}).on("error", reject);
});
const running = JSON.parse(psData).models;
if (running?.length > 0) {
ollamaModel = running.map((m: { name: string }) => m.name).join(", ");
}
} catch {}
bot.say(event.target, `Agents: ${agents.length} running | Load: ${load} | RAM: ${freeMem}/${totalMem} GB free | Disk: ${diskFree} | Uptime: ${uptime}h | Ollama: ${ollamaModel}`);
} catch (e) {
bot.say(event.target, "Error getting status.");
}
break;
}
case "!help": {
bot.say(event.target, "Commands: !invoke <template> [name] | !destroy <name> | !list | !model <name> <model> | !models | !templates | !help");
bot.say(event.target, "Commands: !invoke <template> [name] | !destroy <name> | !list | !model <name> <model> | !models | !templates | !status | !help");
break;
}
}