Extract skills.py and tools.py from agent.py
This commit is contained in:
175
agent/skills.py
Normal file
175
agent/skills.py
Normal file
@@ -0,0 +1,175 @@
|
||||
"""Skill discovery, parsing, and execution."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import subprocess
|
||||
import time
|
||||
|
||||
|
||||
def log(msg):
|
||||
"""Import-safe logging — overridden by agent.py at init."""
|
||||
print(f"[skills] {msg}", flush=True)
|
||||
|
||||
|
||||
def set_logger(fn):
|
||||
"""Allow agent.py to inject its logger."""
|
||||
global log
|
||||
log = fn
|
||||
|
||||
|
||||
LARGE_OUTPUT_THRESHOLD = 2000
|
||||
_output_counter = 0
|
||||
|
||||
|
||||
def parse_skill_md(path):
|
||||
"""Parse a SKILL.md frontmatter into a tool definition.
|
||||
Returns tool definition dict or None on failure."""
|
||||
try:
|
||||
with open(path) as f:
|
||||
content = f.read()
|
||||
except Exception as e:
|
||||
log(f"Cannot read {path}: {e}")
|
||||
return None
|
||||
|
||||
content = content.replace("\r\n", "\n")
|
||||
|
||||
match = re.match(r"^---\n(.*?)\n---", content, re.DOTALL)
|
||||
if not match:
|
||||
log(f"No frontmatter in {path}")
|
||||
return None
|
||||
|
||||
fm = {}
|
||||
current_key = None
|
||||
current_param = None
|
||||
params = {}
|
||||
|
||||
for line in match.group(1).split("\n"):
|
||||
stripped = line.strip()
|
||||
if not stripped or stripped.startswith("#"):
|
||||
continue
|
||||
|
||||
indent = len(line) - len(line.lstrip())
|
||||
|
||||
if indent >= 2 and current_key == "parameters":
|
||||
if indent >= 4 and current_param:
|
||||
k, _, v = stripped.partition(":")
|
||||
k = k.strip()
|
||||
v = v.strip().strip('"').strip("'")
|
||||
if k == "required":
|
||||
v = v.lower() in ("true", "yes", "1")
|
||||
params[current_param][k] = v
|
||||
elif ":" in stripped:
|
||||
param_name = stripped.rstrip(":").strip()
|
||||
current_param = param_name
|
||||
params[param_name] = {}
|
||||
elif ":" in line and indent == 0:
|
||||
k, _, v = line.partition(":")
|
||||
k = k.strip()
|
||||
v = v.strip().strip('"').strip("'")
|
||||
fm[k] = v
|
||||
current_key = k
|
||||
if k == "parameters":
|
||||
current_param = None
|
||||
|
||||
if "name" not in fm:
|
||||
log(f"No 'name' field in {path}")
|
||||
return None
|
||||
|
||||
if "description" not in fm:
|
||||
log(f"Warning: no 'description' in {path}")
|
||||
|
||||
properties = {}
|
||||
required = []
|
||||
for pname, pdata in params.items():
|
||||
ptype = pdata.get("type", "string")
|
||||
if ptype not in ("string", "integer", "number", "boolean", "array", "object"):
|
||||
log(f"Warning: unknown type '{ptype}' for param '{pname}' in {path}")
|
||||
properties[pname] = {
|
||||
"type": ptype,
|
||||
"description": pdata.get("description", ""),
|
||||
}
|
||||
if pdata.get("required", False):
|
||||
required.append(pname)
|
||||
|
||||
return {
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": fm["name"],
|
||||
"description": fm.get("description", ""),
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": properties,
|
||||
"required": required,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def discover_skills(skill_dirs):
|
||||
"""Scan skill directories and return tool definitions + script paths."""
|
||||
tools = []
|
||||
scripts = {}
|
||||
|
||||
for skill_dir in skill_dirs:
|
||||
if not os.path.isdir(skill_dir):
|
||||
continue
|
||||
for name in sorted(os.listdir(skill_dir)):
|
||||
skill_path = os.path.join(skill_dir, name)
|
||||
skill_md = os.path.join(skill_path, "SKILL.md")
|
||||
if not os.path.isfile(skill_md):
|
||||
continue
|
||||
|
||||
tool_def = parse_skill_md(skill_md)
|
||||
if not tool_def:
|
||||
continue
|
||||
|
||||
for ext in ("run.py", "run.sh"):
|
||||
script = os.path.join(skill_path, ext)
|
||||
if os.path.isfile(script):
|
||||
scripts[tool_def["function"]["name"]] = script
|
||||
break
|
||||
|
||||
if tool_def["function"]["name"] in scripts:
|
||||
tools.append(tool_def)
|
||||
|
||||
return tools, scripts
|
||||
|
||||
|
||||
def execute_skill(script_path, args, workspace, config):
|
||||
"""Execute a skill script with args as JSON on stdin.
|
||||
Large outputs are saved to a file with a preview returned."""
|
||||
global _output_counter
|
||||
env = os.environ.copy()
|
||||
env["WORKSPACE"] = workspace
|
||||
env["SEARX_URL"] = config.get("searx_url", "https://searx.mymx.me")
|
||||
|
||||
try:
|
||||
result = subprocess.run(
|
||||
["python3" if script_path.endswith(".py") else "bash", script_path],
|
||||
input=json.dumps(args),
|
||||
capture_output=True,
|
||||
text=True,
|
||||
timeout=120,
|
||||
env=env,
|
||||
)
|
||||
output = result.stdout
|
||||
if result.stderr:
|
||||
output += f"\n[stderr] {result.stderr}"
|
||||
output = output.strip() or "[no output]"
|
||||
|
||||
if len(output) > LARGE_OUTPUT_THRESHOLD:
|
||||
output_dir = f"{workspace}/tool_outputs"
|
||||
os.makedirs(output_dir, exist_ok=True)
|
||||
_output_counter += 1
|
||||
filepath = f"{output_dir}/output_{_output_counter}.txt"
|
||||
with open(filepath, "w") as f:
|
||||
f.write(output)
|
||||
preview = output[:1500]
|
||||
return f"{preview}\n\n[output truncated — full result ({len(output)} chars) saved to {filepath}. Use run_command to read it: cat {filepath}]"
|
||||
|
||||
return output
|
||||
except subprocess.TimeoutExpired:
|
||||
return "[skill timed out after 120s]"
|
||||
except Exception as e:
|
||||
return f"[skill error: {e}]"
|
||||
132
agent/tools.py
Normal file
132
agent/tools.py
Normal file
@@ -0,0 +1,132 @@
|
||||
"""LLM interaction, tool dispatch, and memory management."""
|
||||
|
||||
import os
|
||||
import re
|
||||
import json
|
||||
import urllib.request
|
||||
import urllib.error
|
||||
|
||||
|
||||
def log(msg):
|
||||
print(f"[tools] {msg}", flush=True)
|
||||
|
||||
|
||||
def set_logger(fn):
|
||||
global log
|
||||
log = fn
|
||||
|
||||
|
||||
# ─── Memory ──────────────────────────────────────────────────────────
|
||||
|
||||
def load_memory(workspace):
|
||||
"""Load all memory files from workspace."""
|
||||
memory = ""
|
||||
try:
|
||||
with open(f"{workspace}/MEMORY.md") as f:
|
||||
memory = f.read().strip()
|
||||
mem_dir = f"{workspace}/memory"
|
||||
if os.path.isdir(mem_dir):
|
||||
for fname in sorted(os.listdir(mem_dir)):
|
||||
if fname.endswith(".md"):
|
||||
try:
|
||||
with open(f"{mem_dir}/{fname}") as f:
|
||||
topic = fname.replace(".md", "")
|
||||
memory += f"\n\n## {topic}\n{f.read().strip()}"
|
||||
except Exception:
|
||||
pass
|
||||
except FileNotFoundError:
|
||||
pass
|
||||
return memory
|
||||
|
||||
|
||||
# ─── Tool Call Parsing ───────────────────────────────────────────────
|
||||
|
||||
def try_parse_tool_call(text):
|
||||
"""Parse text-based tool calls (model dumps JSON as text)."""
|
||||
text = re.sub(r"</?tool_call>", "", text).strip()
|
||||
for start in range(len(text)):
|
||||
if text[start] == "{":
|
||||
for end in range(len(text), start, -1):
|
||||
if text[end - 1] == "}":
|
||||
try:
|
||||
obj = json.loads(text[start:end])
|
||||
name = obj.get("name")
|
||||
args = obj.get("arguments", {})
|
||||
if name and isinstance(args, dict):
|
||||
return (name, args)
|
||||
except json.JSONDecodeError:
|
||||
continue
|
||||
return None
|
||||
|
||||
|
||||
# ─── LLM Interaction ────────────────────────────────────────────────
|
||||
|
||||
def ollama_request(ollama_url, payload):
|
||||
data = json.dumps(payload).encode("utf-8")
|
||||
req = urllib.request.Request(
|
||||
f"{ollama_url}/api/chat",
|
||||
data=data,
|
||||
headers={"Content-Type": "application/json"},
|
||||
)
|
||||
with urllib.request.urlopen(req, timeout=120) as resp:
|
||||
return json.loads(resp.read(2_000_000))
|
||||
|
||||
|
||||
def query_ollama(messages, runtime, tools, skill_scripts, dispatch_fn, ollama_url, max_rounds):
|
||||
"""Call Ollama chat API with skill-based tool support."""
|
||||
payload = {
|
||||
"model": runtime["model"],
|
||||
"messages": messages,
|
||||
"stream": False,
|
||||
"options": {"num_predict": 512},
|
||||
}
|
||||
|
||||
if tools:
|
||||
payload["tools"] = tools
|
||||
|
||||
for round_num in range(max_rounds):
|
||||
remaining = max_rounds - round_num
|
||||
try:
|
||||
data = ollama_request(ollama_url, payload)
|
||||
except (urllib.error.URLError, TimeoutError) as e:
|
||||
return f"[error: {e}]"
|
||||
|
||||
msg = data.get("message", {})
|
||||
|
||||
# Structured tool calls
|
||||
tool_calls = msg.get("tool_calls")
|
||||
if tool_calls:
|
||||
messages.append(msg)
|
||||
for tc in tool_calls:
|
||||
fn = tc.get("function", {})
|
||||
result = dispatch_fn(
|
||||
fn.get("name", ""),
|
||||
fn.get("arguments", {}),
|
||||
round_num + 1,
|
||||
)
|
||||
if remaining <= 2:
|
||||
result += f"\n[warning: {remaining - 1} tool rounds remaining — wrap up]"
|
||||
messages.append({"role": "tool", "content": result})
|
||||
payload["messages"] = messages
|
||||
continue
|
||||
|
||||
# Text-based tool calls
|
||||
content = msg.get("content", "").strip()
|
||||
parsed_tool = try_parse_tool_call(content)
|
||||
if parsed_tool:
|
||||
fn_name, fn_args = parsed_tool
|
||||
if fn_name in skill_scripts:
|
||||
messages.append({"role": "assistant", "content": content})
|
||||
result = dispatch_fn(fn_name, fn_args, round_num + 1)
|
||||
if remaining <= 2:
|
||||
result += f"\n[warning: {remaining - 1} tool rounds remaining — wrap up]"
|
||||
messages.append({
|
||||
"role": "user",
|
||||
"content": f"Tool result:\n{result}\n\nNow respond to the user based on this result.",
|
||||
})
|
||||
payload["messages"] = messages
|
||||
continue
|
||||
|
||||
return content
|
||||
|
||||
return "[max tool rounds reached]"
|
||||
Reference in New Issue
Block a user