perf: concurrent fetches for multi-instance alert backends
Add _fetch_many() helper using ThreadPoolExecutor to query instances in parallel. Refactors PeerTube, Mastodon, Lemmy, and SearXNG from sequential to concurrent fetches. Also adds retries parameter to derp.http.urlopen; multi-instance backends use retries=1 since instance redundancy already provides resilience. Worst-case wall time per backend drops from N*timeout to 1*timeout. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -52,24 +52,25 @@ class _ProxyHandler(SocksiPyHandler, urllib.request.HTTPSHandler):
|
||||
return self.do_open(build, req)
|
||||
|
||||
|
||||
def urlopen(req, *, timeout=None, context=None):
|
||||
def urlopen(req, *, timeout=None, context=None, retries=None):
|
||||
"""Proxy-aware drop-in for urllib.request.urlopen.
|
||||
|
||||
Retries on transient SSL/connection errors with exponential backoff.
|
||||
"""
|
||||
max_retries = retries if retries is not None else _MAX_RETRIES
|
||||
opener = _get_opener(context)
|
||||
kwargs = {}
|
||||
if timeout is not None:
|
||||
kwargs["timeout"] = timeout
|
||||
for attempt in range(_MAX_RETRIES):
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
return opener.open(req, **kwargs)
|
||||
except _RETRY_ERRORS as exc:
|
||||
if attempt + 1 >= _MAX_RETRIES:
|
||||
if attempt + 1 >= max_retries:
|
||||
raise
|
||||
delay = 2 ** attempt
|
||||
_log.debug("urlopen retry %d/%d after %s: %s",
|
||||
attempt + 1, _MAX_RETRIES, type(exc).__name__, exc)
|
||||
attempt + 1, max_retries, type(exc).__name__, exc)
|
||||
time.sleep(delay)
|
||||
|
||||
|
||||
|
||||
Reference in New Issue
Block a user