Add 5 missing skills to repo for sync coverage

github-repo-search, gooddays-calendar, luxtts,
openclaw-tavily-search, skill-vetter — previously only
in workspace, now tracked in Gitea for full sync.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
2026-03-14 20:36:22 +08:00
parent 6451d73732
commit 8bacc868bd
12 changed files with 1043 additions and 0 deletions

View File

@@ -0,0 +1,7 @@
{
"version": 1,
"registry": "https://clawhub.ai",
"slug": "openclaw-tavily-search",
"installedVersion": "0.1.0",
"installedAt": 1773208165907
}

View File

@@ -0,0 +1,48 @@
---
name: tavily-search
description: "Web search via Tavily API (alternative to Brave). Use when the user asks to search the web / look up sources / find links and Brave web_search is unavailable or undesired. Returns a small set of relevant results (title, url, snippet) and can optionally include short answer summaries."
---
# Tavily Search
Use the bundled script to search the web with Tavily.
## Requirements
- Provide API key via either:
- environment variable: `TAVILY_API_KEY`, or
- `~/.openclaw/.env` line: `TAVILY_API_KEY=...`
## Commands
Run from the OpenClaw workspace:
```bash
# raw JSON (default)
python3 {baseDir}/scripts/tavily_search.py --query "..." --max-results 5
# include short answer (if available)
python3 {baseDir}/scripts/tavily_search.py --query "..." --max-results 5 --include-answer
# stable schema (closer to web_search): {query, results:[{title,url,snippet}], answer?}
python3 {baseDir}/scripts/tavily_search.py --query "..." --max-results 5 --format brave
# human-readable Markdown list
python3 {baseDir}/scripts/tavily_search.py --query "..." --max-results 5 --format md
```
## Output
### raw (default)
- JSON: `query`, optional `answer`, `results: [{title,url,content}]`
### brave
- JSON: `query`, optional `answer`, `results: [{title,url,snippet}]`
### md
- A compact Markdown list with title/url/snippet.
## Notes
- Keep `max-results` small by default (35) to reduce token/reading load.
- Prefer returning URLs + snippets; fetch full pages only when needed.

View File

@@ -0,0 +1,6 @@
{
"ownerId": "kn78hhhbxwjs4nrcyn8my5fcw981wmys",
"slug": "openclaw-tavily-search",
"version": "0.1.0",
"publishedAt": 1772121679343
}

View File

@@ -0,0 +1,159 @@
#!/usr/bin/env python3
import argparse
import json
import os
import pathlib
import re
import sys
import urllib.request
TAVILY_URL = "https://api.tavily.com/search"
def load_key():
key = os.environ.get("TAVILY_API_KEY")
if key:
return key.strip()
env_path = pathlib.Path.home() / ".openclaw" / ".env"
if env_path.exists():
try:
txt = env_path.read_text(encoding="utf-8", errors="ignore")
m = re.search(r"^\s*TAVILY_API_KEY\s*=\s*(.+?)\s*$", txt, re.M)
if m:
v = m.group(1).strip().strip('"').strip("'")
if v:
return v
except Exception:
pass
return None
def tavily_search(query: str, max_results: int, include_answer: bool, search_depth: str):
key = load_key()
if not key:
raise SystemExit(
"Missing TAVILY_API_KEY. Set env var TAVILY_API_KEY or add it to ~/.openclaw/.env"
)
payload = {
"api_key": key,
"query": query,
"max_results": max_results,
"search_depth": search_depth,
"include_answer": bool(include_answer),
"include_images": False,
"include_raw_content": False,
}
data = json.dumps(payload).encode("utf-8")
req = urllib.request.Request(
TAVILY_URL,
data=data,
headers={"Content-Type": "application/json", "Accept": "application/json"},
method="POST",
)
with urllib.request.urlopen(req, timeout=30) as resp:
body = resp.read().decode("utf-8", errors="replace")
try:
obj = json.loads(body)
except json.JSONDecodeError:
raise SystemExit(f"Tavily returned non-JSON: {body[:300]}")
out = {
"query": query,
"answer": obj.get("answer"),
"results": [],
}
for r in (obj.get("results") or [])[:max_results]:
out["results"].append(
{
"title": r.get("title"),
"url": r.get("url"),
"content": r.get("content"),
}
)
if not include_answer:
out.pop("answer", None)
return out
def to_brave_like(obj: dict) -> dict:
# A lightweight, stable shape similar to web_search: results with title/url/snippet.
results = []
for r in obj.get("results", []) or []:
results.append(
{
"title": r.get("title"),
"url": r.get("url"),
"snippet": r.get("content"),
}
)
out = {"query": obj.get("query"), "results": results}
if "answer" in obj:
out["answer"] = obj.get("answer")
return out
def to_markdown(obj: dict) -> str:
lines = []
if obj.get("answer"):
lines.append(obj["answer"].strip())
lines.append("")
for i, r in enumerate(obj.get("results", []) or [], 1):
title = (r.get("title") or "").strip() or r.get("url") or "(no title)"
url = r.get("url") or ""
snippet = (r.get("content") or "").strip()
lines.append(f"{i}. {title}")
if url:
lines.append(f" {url}")
if snippet:
lines.append(f" - {snippet}")
return "\n".join(lines).strip() + "\n"
def main():
ap = argparse.ArgumentParser()
ap.add_argument("--query", required=True)
ap.add_argument("--max-results", type=int, default=5)
ap.add_argument("--include-answer", action="store_true")
ap.add_argument(
"--search-depth",
default="basic",
choices=["basic", "advanced"],
help="Tavily search depth",
)
ap.add_argument(
"--format",
default="raw",
choices=["raw", "brave", "md"],
help="Output format: raw (default) | brave (title/url/snippet) | md (human-readable)",
)
args = ap.parse_args()
res = tavily_search(
query=args.query,
max_results=max(1, min(args.max_results, 10)),
include_answer=args.include_answer,
search_depth=args.search_depth,
)
if args.format == "md":
sys.stdout.write(to_markdown(res))
return
if args.format == "brave":
res = to_brave_like(res)
json.dump(res, sys.stdout, ensure_ascii=False)
sys.stdout.write("\n")
if __name__ == "__main__":
main()