feat: add Kick, Dailymotion, and PeerTube backends to alert plugin

Kick (kk) searches channels and livestreams via public search API.
Dailymotion (dm) queries video API sorted by recent. PeerTube (pt)
searches across 4 federated instances with per-instance timeout.
All routed through SOCKS5 proxy via _urlopen.

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
user
2026-02-15 23:01:21 +01:00
parent 80677343bf
commit 52c49609b3
3 changed files with 179 additions and 7 deletions

View File

@@ -42,6 +42,15 @@ _MASTODON_INSTANCES = [
_MASTODON_TAG_TIMEOUT = 4
_DDG_URL = "https://html.duckduckgo.com/html/"
_GOOGLE_NEWS_RSS = "https://news.google.com/rss/search"
_KICK_SEARCH_URL = "https://kick.com/api/search"
_DAILYMOTION_API = "https://api.dailymotion.com/videos"
_PEERTUBE_INSTANCES = [
"videos.framasoft.org",
"tilvids.com",
"tube.tchncs.de",
"diode.zone",
]
_PEERTUBE_TIMEOUT = 4
# -- Module-level tracking ---------------------------------------------------
@@ -621,6 +630,163 @@ def _search_google_news(keyword: str) -> list[dict]:
return results
# -- Kick search (blocking) -------------------------------------------------
def _search_kick(keyword: str) -> list[dict]:
"""Search Kick via public search API. Blocking."""
import urllib.parse
params = urllib.parse.urlencode({"searched_word": keyword})
url = f"{_KICK_SEARCH_URL}?{params}"
req = urllib.request.Request(url, method="GET")
req.add_header("Accept", "application/json")
req.add_header("User-Agent", "Mozilla/5.0 (compatible; derp-bot)")
resp = _urlopen(req, timeout=_FETCH_TIMEOUT)
raw = resp.read()
resp.close()
data = json.loads(raw)
results: list[dict] = []
# Channels (may be live)
for ch in data.get("channels") or []:
slug = ch.get("slug", "")
if not slug:
continue
username = (ch.get("user") or {}).get("username", slug)
is_live = ch.get("isLive", False)
title = f"{username} (live)" if is_live else username
results.append({
"id": f"ch:{ch.get('id', slug)}",
"title": title,
"url": f"https://kick.com/{slug}",
"date": "",
"extra": "",
})
# Livestreams
livestreams = data.get("livestreams") or {}
for stream in livestreams.get("tags") or []:
stream_id = str(stream.get("id", ""))
if not stream_id:
continue
session_title = stream.get("session_title", "")
channel = stream.get("channel") or {}
slug = channel.get("slug", "")
viewers = stream.get("viewer_count", 0)
title = session_title
if viewers:
title += f" ({viewers} viewers)"
results.append({
"id": f"live:{stream_id}",
"title": title,
"url": f"https://kick.com/{slug}" if slug else "",
"date": _parse_date(stream.get("start_time", "")),
"extra": "",
})
return results
# -- Dailymotion search (blocking) ------------------------------------------
def _search_dailymotion(keyword: str) -> list[dict]:
"""Search Dailymotion via public API. Blocking."""
import urllib.parse
params = urllib.parse.urlencode({
"search": keyword,
"sort": "recent",
"limit": "25",
"fields": "id,title,url,created_time",
})
url = f"{_DAILYMOTION_API}?{params}"
req = urllib.request.Request(url, method="GET")
req.add_header("User-Agent", "Mozilla/5.0 (compatible; derp-bot)")
resp = _urlopen(req, timeout=_FETCH_TIMEOUT)
raw = resp.read()
resp.close()
data = json.loads(raw)
results: list[dict] = []
for item in data.get("list") or []:
video_id = item.get("id", "")
title = item.get("title", "")
video_url = item.get("url", "")
created = item.get("created_time")
date = ""
if created:
try:
date = datetime.fromtimestamp(
int(created), tz=timezone.utc,
).strftime("%Y-%m-%d")
except (ValueError, OSError):
pass
results.append({
"id": video_id,
"title": title,
"url": video_url,
"date": date,
"extra": "",
})
return results
# -- PeerTube search (blocking) ---------------------------------------------
def _search_peertube(keyword: str) -> list[dict]:
"""Search PeerTube instances via public API. Blocking."""
import urllib.parse
results: list[dict] = []
seen_urls: set[str] = set()
for instance in _PEERTUBE_INSTANCES:
params = urllib.parse.urlencode({
"search": keyword, "count": "15", "sort": "-publishedAt",
})
api_url = f"https://{instance}/api/v1/search/videos?{params}"
req = urllib.request.Request(api_url, method="GET")
req.add_header("User-Agent", "Mozilla/5.0 (compatible; derp-bot)")
try:
resp = _urlopen(req, timeout=_PEERTUBE_TIMEOUT)
raw = resp.read()
resp.close()
except Exception as exc:
_log.debug("peertube %s failed: %s", instance, exc)
continue
try:
data = json.loads(raw)
except json.JSONDecodeError:
continue
for video in data.get("data") or []:
video_url = video.get("url", "")
if not video_url or video_url in seen_urls:
continue
seen_urls.add(video_url)
name = video.get("name", "")
acct = (video.get("account") or {}).get("displayName", "")
title = f"{acct}: {name}" if acct else name
date = _parse_date(video.get("publishedAt", ""))
results.append({
"id": video_url,
"title": title,
"url": video_url,
"date": date,
"extra": "",
})
return results
# -- Backend registry -------------------------------------------------------
_BACKENDS: dict[str, callable] = {
@@ -631,6 +797,9 @@ _BACKENDS: dict[str, callable] = {
"ft": _search_mastodon,
"dg": _search_duckduckgo,
"gn": _search_google_news,
"kk": _search_kick,
"dm": _search_dailymotion,
"pt": _search_peertube,
}