Add skill definitions (SKILL.md + run.py) for all agent tools

This commit is contained in:
2026-04-07 20:35:56 +00:00
parent 42870c7c1f
commit 4483b585a7
8 changed files with 187 additions and 0 deletions

View File

@@ -0,0 +1,9 @@
---
name: fetch_url
description: Fetch a URL and return its text content. HTML is stripped to plain text. Use this to read web pages, documentation, articles, etc.
parameters:
url:
type: string
description: The URL to fetch
required: true
---

51
skills/fetch_url/run.py Normal file
View File

@@ -0,0 +1,51 @@
#!/usr/bin/env python3
import sys
import json
import re
import urllib.request
from html.parser import HTMLParser
args = json.loads(sys.stdin.read())
url = args.get("url", "")
class TextExtractor(HTMLParser):
def __init__(self):
super().__init__()
self.text = []
self._skip = False
def handle_starttag(self, tag, attrs):
if tag in ("script", "style", "noscript"):
self._skip = True
def handle_endtag(self, tag):
if tag in ("script", "style", "noscript"):
self._skip = False
if tag in ("p", "br", "div", "h1", "h2", "h3", "h4", "li", "tr"):
self.text.append("\n")
def handle_data(self, data):
if not self._skip:
self.text.append(data)
try:
req = urllib.request.Request(url, headers={"User-Agent": "fireclaw-agent"})
with urllib.request.urlopen(req, timeout=15) as resp:
content_type = resp.headers.get("Content-Type", "")
raw = resp.read(50_000).decode("utf-8", errors="replace")
if "html" in content_type:
parser = TextExtractor()
parser.feed(raw)
text = "".join(parser.text)
else:
text = raw
text = re.sub(r"\n{3,}", "\n\n", text).strip()
if len(text) > 3000:
text = text[:3000] + "\n[truncated]"
print(text or "[empty page]")
except Exception as e:
print(f"[fetch error: {e}]")

View File

@@ -0,0 +1,9 @@
---
name: run_command
description: Execute a shell command on this system and return the output. Use this to check system info, run scripts, fetch URLs, process data, etc.
parameters:
command:
type: string
description: The shell command to execute (bash)
required: true
---

27
skills/run_command/run.py Normal file
View File

@@ -0,0 +1,27 @@
#!/usr/bin/env python3
import subprocess
import sys
import json
args = json.loads(sys.stdin.read())
command = args.get("command", "")
try:
result = subprocess.run(
["bash", "-c", command],
capture_output=True,
text=True,
timeout=120,
)
output = result.stdout
if result.stderr:
output += f"\n[stderr] {result.stderr}"
if result.returncode != 0:
output += f"\n[exit code: {result.returncode}]"
if len(output) > 2000:
output = output[:2000] + "\n[output truncated]"
print(output.strip() or "[no output]")
except subprocess.TimeoutExpired:
print("[command timed out after 120s]")
except Exception as e:
print(f"[error: {e}]")

View File

@@ -0,0 +1,13 @@
---
name: save_memory
description: Save something important to your persistent memory. Use this to remember facts about users, lessons learned, project context, or anything you want to recall in future conversations. Memories survive restarts.
parameters:
topic:
type: string
description: "Short topic name for the memory file (e.g. 'user_prefs', 'project_x', 'lessons')"
required: true
content:
type: string
description: The memory content to save
required: true
---

33
skills/save_memory/run.py Normal file
View File

@@ -0,0 +1,33 @@
#!/usr/bin/env python3
import sys
import json
import os
args = json.loads(sys.stdin.read())
topic = args.get("topic", "note")
content = args.get("content", "")
workspace = os.environ.get("WORKSPACE", "/workspace")
mem_dir = f"{workspace}/memory"
os.makedirs(mem_dir, exist_ok=True)
# Write the memory file
filepath = f"{mem_dir}/{topic}.md"
with open(filepath, "w") as f:
f.write(content + "\n")
# Update MEMORY.md index
index_path = f"{workspace}/MEMORY.md"
existing = ""
try:
with open(index_path) as f:
existing = f.read()
except FileNotFoundError:
existing = "# Agent Memory\n"
entry = f"- [{topic}](memory/{topic}.md)"
if topic not in existing:
with open(index_path, "a") as f:
f.write(f"\n{entry}")
print(f"Memory saved to {filepath}")

View File

@@ -0,0 +1,13 @@
---
name: web_search
description: Search the web using SearXNG. Returns titles, URLs, and snippets for the top results. Use this when you need current information or facts you're unsure about.
parameters:
query:
type: string
description: The search query
required: true
num_results:
type: integer
description: Number of results to return (default 5)
required: false
---

32
skills/web_search/run.py Normal file
View File

@@ -0,0 +1,32 @@
#!/usr/bin/env python3
import sys
import json
import urllib.request
import urllib.parse
args = json.loads(sys.stdin.read())
query = args.get("query", "")
num_results = args.get("num_results", 5)
searx_url = args.get("_searx_url", "https://searx.mymx.me")
try:
params = urllib.parse.urlencode({"q": query, "format": "json"})
req = urllib.request.Request(
f"{searx_url}/search?{params}",
headers={"User-Agent": "fireclaw-agent"},
)
with urllib.request.urlopen(req, timeout=15) as resp:
data = json.loads(resp.read())
results = data.get("results", [])[:num_results]
if not results:
print("No results found.")
else:
lines = []
for r in results:
title = r.get("title", "")
url = r.get("url", "")
snippet = r.get("content", "")[:150]
lines.append(f"- {title}\n {url}\n {snippet}")
print("\n".join(lines))
except Exception as e:
print(f"[search error: {e}]")