support multiple checktypes with random selection

This commit is contained in:
Username
2025-12-25 20:23:05 +01:00
parent 755abc7f6e
commit 560230988d
2 changed files with 36 additions and 28 deletions

View File

@@ -1691,7 +1691,7 @@ class Proxywatchd():
database, source_file, checktype
"""
old_threads = config.watchd.threads
old_checktype = config.watchd.checktype
old_checktypes = list(config.watchd.checktypes)
try:
config.load()
@@ -1713,9 +1713,9 @@ class Proxywatchd():
self.scaler.max_threads = config.watchd.threads
# Warn about values requiring restart
if config.watchd.checktype != old_checktype:
if config.watchd.checktypes != old_checktypes:
_log('checktype changed (%s -> %s), requires restart to take effect' % (
old_checktype, config.watchd.checktype), 'warn')
','.join(old_checktypes), ','.join(config.watchd.checktypes)), 'warn')
_log('config reloaded: threads=%d timeout=%d checktime=%d max_fail=%d' % (
config.watchd.threads, config.watchd.timeout,
@@ -1787,25 +1787,23 @@ class Proxywatchd():
## enable tor safeguard by default
self.tor_safeguard = config.watchd.tor_safeguard
rows = self.fetch_rows()
checktype = config.watchd.checktype
checktypes = config.watchd.checktypes
num_targets = 1
# select target pool based on checktype
if checktype == 'irc':
target_pool = config.servers
elif checktype == 'judges':
# Filter out judges in cooldown (blocked/rate-limited)
all_judges = list(judges.keys())
target_pool = judge_stats.get_available_judges(all_judges)
if not target_pool:
# All judges in cooldown - use all anyway
target_pool = all_judges
if config.watchd.debug:
_log('all judges in cooldown, using full list', 'debug')
elif checktype == 'ssl':
target_pool = ssl_targets
else: # http
target_pool = list(regexes.keys())
# Build target pools for each checktype
target_pools = {}
for ct in checktypes:
if ct == 'irc':
target_pools[ct] = config.servers
elif ct == 'judges':
# Filter out judges in cooldown (blocked/rate-limited)
all_judges = list(judges.keys())
available = judge_stats.get_available_judges(all_judges)
target_pools[ct] = available if available else all_judges
elif ct == 'ssl':
target_pools[ct] = ssl_targets
else: # http/head
target_pools[ct] = list(regexes.keys())
# create all jobs first, then shuffle for interleaving
all_jobs = []
@@ -1820,6 +1818,10 @@ class Proxywatchd():
)
new_states.append(state)
# randomly select checktype for this proxy
checktype = random.choice(checktypes)
target_pool = target_pools[checktype]
# select random targets for this proxy
targets = random.sample(target_pool, min(num_targets, len(target_pool)))
@@ -2070,7 +2072,8 @@ class Proxywatchd():
stats_data['min_threads'] = self.scaler.min_threads
stats_data['max_threads'] = self.scaler.max_threads
stats_data['queue_size'] = self.job_queue.qsize()
stats_data['checktype'] = config.watchd.checktype
stats_data['checktype'] = ','.join(config.watchd.checktypes)
stats_data['checktypes'] = config.watchd.checktypes
stats_data['use_ssl'] = config.watchd.use_ssl
stats_data['profiling'] = getattr(config.args, 'profile', False) if hasattr(config, 'args') else False
stats_data['pass_rate'] = try_div(self.stats.passed, elapsed)
@@ -2113,7 +2116,7 @@ class Proxywatchd():
}
# Judge stats (when using judges checktype)
if config.watchd.checktype == 'judges':
if 'judges' in config.watchd.checktypes:
js = judge_stats.get_stats()
stats_data['judges'] = {
'total': js.get('total', 0),
@@ -2154,7 +2157,7 @@ class Proxywatchd():
def _run(self):
_log('starting...', 'watchd')
_log('config: db=%s checktype=%s threads=%d checktime=%d perfail=%d max_fail=%d' % (
config.watchd.database, config.watchd.checktype, config.watchd.threads,
config.watchd.database, ','.join(config.watchd.checktypes), config.watchd.threads,
config.watchd.checktime, config.watchd.perfail_checktime, config.watchd.max_fail), 'watchd')
# Log database status at startup
@@ -2251,7 +2254,7 @@ class Proxywatchd():
if pool:
_log(pool.status_line(), 'stats')
# Report judge stats (when using judges checktype)
if config.watchd.checktype == 'judges':
if 'judges' in config.watchd.checktypes:
_log(judge_stats.status_line(), 'stats')
# Report scaler status
_log(self.scaler.status_line(len(self.threads), self.job_queue.qsize()), 'scaler')