feat: add Kick, Dailymotion, and PeerTube backends to alert plugin

Kick (kk) searches channels and livestreams via public search API.
Dailymotion (dm) queries video API sorted by recent. PeerTube (pt)
searches across 4 federated instances with per-instance timeout.
All routed through SOCKS5 proxy via _urlopen.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
user
2026-02-15 23:01:21 +01:00
parent 80677343bf
commit 52c49609b3
3 changed files with 179 additions and 7 deletions

View File

@@ -345,12 +345,12 @@ No API credentials needed (uses public GQL endpoint).
!alert history <name> [n] # Show recent results (default 5)
```
Searches keywords across 7 backends: YouTube (yt), Twitch (tw), SearXNG (sx),
Reddit (rd), Mastodon (ft), DuckDuckGo (dg), Google News (gn). Names: lowercase
alphanumeric + hyphens, 1-20 chars. Keywords: 1-100 chars. Max 20 alerts/channel.
Polls every 5min. Format: `[name/yt] Title -- URL`, etc. No API credentials needed.
DuckDuckGo and Google News route through SOCKS5 proxy. Persists across restarts.
History stored in `data/alert_history.db`.
Searches keywords across 10 backends: YouTube (yt), Twitch (tw), SearXNG (sx),
Reddit (rd), Mastodon (ft), DuckDuckGo (dg), Google News (gn), Kick (kk),
Dailymotion (dm), PeerTube (pt). Names: lowercase alphanumeric + hyphens, 1-20
chars. Keywords: 1-100 chars. Max 20 alerts/channel. Polls every 5min.
Format: `[name/yt] Title -- URL`, etc. No API credentials needed. Persists across
restarts. History stored in `data/alert_history.db`.
## SearX

View File

@@ -689,13 +689,16 @@ Platforms searched:
- **Mastodon** (`ft`) -- Public hashtag timeline across 4 instances (no auth required)
- **DuckDuckGo** (`dg`) -- HTML lite search endpoint via SOCKS5 proxy (no auth required)
- **Google News** (`gn`) -- Public RSS feed via SOCKS5 proxy (no auth required)
- **Kick** (`kk`) -- Public search API: channels and livestreams (no auth required)
- **Dailymotion** (`dm`) -- Public video API, sorted by recent (no auth required)
- **PeerTube** (`pt`) -- Federated video search across 4 instances (no auth required)
Polling and announcements:
- Alerts are polled every 5 minutes by default
- On `add`, existing results are recorded without announcing (prevents flood)
- New results announced as `[name/<tag>] Title -- URL` where tag is `yt`, `tw`,
`sx`, `rd`, `ft`, `dg`, or `gn`
`sx`, `rd`, `ft`, `dg`, `gn`, `kk`, `dm`, or `pt`
- Titles are truncated to 80 characters
- Each platform maintains its own seen list (capped at 200 per platform)
- 5 consecutive errors doubles the poll interval (max 1 hour)

View File

@@ -42,6 +42,15 @@ _MASTODON_INSTANCES = [
_MASTODON_TAG_TIMEOUT = 4
_DDG_URL = "https://html.duckduckgo.com/html/"
_GOOGLE_NEWS_RSS = "https://news.google.com/rss/search"
_KICK_SEARCH_URL = "https://kick.com/api/search"
_DAILYMOTION_API = "https://api.dailymotion.com/videos"
_PEERTUBE_INSTANCES = [
"videos.framasoft.org",
"tilvids.com",
"tube.tchncs.de",
"diode.zone",
]
_PEERTUBE_TIMEOUT = 4
# -- Module-level tracking ---------------------------------------------------
@@ -621,6 +630,163 @@ def _search_google_news(keyword: str) -> list[dict]:
return results
# -- Kick search (blocking) -------------------------------------------------
def _search_kick(keyword: str) -> list[dict]:
"""Search Kick via public search API. Blocking."""
import urllib.parse
params = urllib.parse.urlencode({"searched_word": keyword})
url = f"{_KICK_SEARCH_URL}?{params}"
req = urllib.request.Request(url, method="GET")
req.add_header("Accept", "application/json")
req.add_header("User-Agent", "Mozilla/5.0 (compatible; derp-bot)")
resp = _urlopen(req, timeout=_FETCH_TIMEOUT)
raw = resp.read()
resp.close()
data = json.loads(raw)
results: list[dict] = []
# Channels (may be live)
for ch in data.get("channels") or []:
slug = ch.get("slug", "")
if not slug:
continue
username = (ch.get("user") or {}).get("username", slug)
is_live = ch.get("isLive", False)
title = f"{username} (live)" if is_live else username
results.append({
"id": f"ch:{ch.get('id', slug)}",
"title": title,
"url": f"https://kick.com/{slug}",
"date": "",
"extra": "",
})
# Livestreams
livestreams = data.get("livestreams") or {}
for stream in livestreams.get("tags") or []:
stream_id = str(stream.get("id", ""))
if not stream_id:
continue
session_title = stream.get("session_title", "")
channel = stream.get("channel") or {}
slug = channel.get("slug", "")
viewers = stream.get("viewer_count", 0)
title = session_title
if viewers:
title += f" ({viewers} viewers)"
results.append({
"id": f"live:{stream_id}",
"title": title,
"url": f"https://kick.com/{slug}" if slug else "",
"date": _parse_date(stream.get("start_time", "")),
"extra": "",
})
return results
# -- Dailymotion search (blocking) ------------------------------------------
def _search_dailymotion(keyword: str) -> list[dict]:
"""Search Dailymotion via public API. Blocking."""
import urllib.parse
params = urllib.parse.urlencode({
"search": keyword,
"sort": "recent",
"limit": "25",
"fields": "id,title,url,created_time",
})
url = f"{_DAILYMOTION_API}?{params}"
req = urllib.request.Request(url, method="GET")
req.add_header("User-Agent", "Mozilla/5.0 (compatible; derp-bot)")
resp = _urlopen(req, timeout=_FETCH_TIMEOUT)
raw = resp.read()
resp.close()
data = json.loads(raw)
results: list[dict] = []
for item in data.get("list") or []:
video_id = item.get("id", "")
title = item.get("title", "")
video_url = item.get("url", "")
created = item.get("created_time")
date = ""
if created:
try:
date = datetime.fromtimestamp(
int(created), tz=timezone.utc,
).strftime("%Y-%m-%d")
except (ValueError, OSError):
pass
results.append({
"id": video_id,
"title": title,
"url": video_url,
"date": date,
"extra": "",
})
return results
# -- PeerTube search (blocking) ---------------------------------------------
def _search_peertube(keyword: str) -> list[dict]:
"""Search PeerTube instances via public API. Blocking."""
import urllib.parse
results: list[dict] = []
seen_urls: set[str] = set()
for instance in _PEERTUBE_INSTANCES:
params = urllib.parse.urlencode({
"search": keyword, "count": "15", "sort": "-publishedAt",
})
api_url = f"https://{instance}/api/v1/search/videos?{params}"
req = urllib.request.Request(api_url, method="GET")
req.add_header("User-Agent", "Mozilla/5.0 (compatible; derp-bot)")
try:
resp = _urlopen(req, timeout=_PEERTUBE_TIMEOUT)
raw = resp.read()
resp.close()
except Exception as exc:
_log.debug("peertube %s failed: %s", instance, exc)
continue
try:
data = json.loads(raw)
except json.JSONDecodeError:
continue
for video in data.get("data") or []:
video_url = video.get("url", "")
if not video_url or video_url in seen_urls:
continue
seen_urls.add(video_url)
name = video.get("name", "")
acct = (video.get("account") or {}).get("displayName", "")
title = f"{acct}: {name}" if acct else name
date = _parse_date(video.get("publishedAt", ""))
results.append({
"id": video_url,
"title": title,
"url": video_url,
"date": date,
"extra": "",
})
return results
# -- Backend registry -------------------------------------------------------
_BACKENDS: dict[str, callable] = {
@@ -631,6 +797,9 @@ _BACKENDS: dict[str, callable] = {
"ft": _search_mastodon,
"dg": _search_duckduckgo,
"gn": _search_google_news,
"kk": _search_kick,
"dm": _search_dailymotion,
"pt": _search_peertube,
}