spaces to tabs
This commit is contained in:
64
config.py
64
config.py
@@ -30,16 +30,16 @@ class Config(ComboParser):
|
|||||||
self.add_item(section, 'tor_safeguard', bool, True, 'enable tor safeguard (default: True)', False)
|
self.add_item(section, 'tor_safeguard', bool, True, 'enable tor safeguard (default: True)', False)
|
||||||
|
|
||||||
section = 'httpd'
|
section = 'httpd'
|
||||||
self.add_item(section, 'listenip', str, '127.0.0.1', 'address for the httpd to listen to (default: 127.0.0.1)', True)
|
self.add_item(section, 'listenip', str, '127.0.0.1', 'address for the httpd to listen to (default: 127.0.0.1)', True)
|
||||||
self.add_item(section, 'port', int, 8081, 'port for the httpd to listen to (default: 8081)', True)
|
self.add_item(section, 'port', int, 8081, 'port for the httpd to listen to (default: 8081)', True)
|
||||||
self.add_item(section, 'enabled', bool, False, 'start httpd (default: False)', True)
|
self.add_item(section, 'enabled', bool, False, 'start httpd (default: False)', True)
|
||||||
|
|
||||||
section = 'ppf'
|
section = 'ppf'
|
||||||
self.add_item(section, 'debug', bool, False, 'whether to print additional debug info', False)
|
self.add_item(section, 'debug', bool, False, 'whether to print additional debug info', False)
|
||||||
self.add_item(section, 'search', bool, True, 'whether to use searx search engine to find new proxy lists', False)
|
self.add_item(section, 'search', bool, True, 'whether to use searx search engine to find new proxy lists', False)
|
||||||
self.add_item(section, 'timeout', float, 15, 'timeout for blocking operations (connect/recv/...) for proxy checks in seconds', False)
|
self.add_item(section, 'timeout', float, 15, 'timeout for blocking operations (connect/recv/...) for proxy checks in seconds', False)
|
||||||
self.add_item(section, 'http_retries', int, 1, 'number of retries for http connects', False)
|
self.add_item(section, 'http_retries', int, 1, 'number of retries for http connects', False)
|
||||||
self.add_item(section, 'threads', int, 1, 'number of threads to run (default: 1)', False)
|
self.add_item(section, 'threads', int, 1, 'number of threads to run (default: 1)', False)
|
||||||
self.add_item(section, 'checktime', int, 3600, 'base checking interval for urls in db in seconds', False)
|
self.add_item(section, 'checktime', int, 3600, 'base checking interval for urls in db in seconds', False)
|
||||||
self.add_item(section, 'perfail_checktime', int, 3600, 'additional checking interval for urls in db in seconds per resultless check', False)
|
self.add_item(section, 'perfail_checktime', int, 3600, 'additional checking interval for urls in db in seconds per resultless check', False)
|
||||||
self.add_item(section, 'max_fail', int, 5, 'number of fails after which an url is considered dead', False)
|
self.add_item(section, 'max_fail', int, 5, 'number of fails after which an url is considered dead', False)
|
||||||
@@ -52,32 +52,32 @@ class Config(ComboParser):
|
|||||||
|
|
||||||
self.aparser.add_argument("--file", help="import a single file containing proxy addrs", type=str, default='', required=False)
|
self.aparser.add_argument("--file", help="import a single file containing proxy addrs", type=str, default='', required=False)
|
||||||
|
|
||||||
section = 'flood'
|
section = 'flood'
|
||||||
self.add_item(section, 'server', str, None, 'irc server address', False)
|
self.add_item(section, 'server', str, None, 'irc server address', False)
|
||||||
self.add_item(section, 'target', str, None, 'target to flood', False)
|
self.add_item(section, 'target', str, None, 'target to flood', False)
|
||||||
self.add_item(section, 'nickserv', str, 'nickserv', "nickserv's nickname", False)
|
self.add_item(section, 'nickserv', str, 'nickserv', "nickserv's nickname", False)
|
||||||
self.add_item(section, 'message', str, None, 'message', False)
|
self.add_item(section, 'message', str, None, 'message', False)
|
||||||
self.add_item(section, 'threads', int, 1, '# of threads', False)
|
self.add_item(section, 'threads', int, 1, '# of threads', False)
|
||||||
self.add_item(section, 'register', int, 0, 'register nickname when required', False)
|
self.add_item(section, 'register', int, 0, 'register nickname when required', False)
|
||||||
|
|
||||||
self.add_item(section, 'wait', int, 0, 'wait prior sending messages', False)
|
self.add_item(section, 'wait', int, 0, 'wait prior sending messages', False)
|
||||||
self.add_item(section, 'once', int, 0, 'quit as soon as possible', False)
|
self.add_item(section, 'once', int, 0, 'quit as soon as possible', False)
|
||||||
self.add_item(section, 'hilight', int, 0, 'try to hilight all nicks?', False)
|
self.add_item(section, 'hilight', int, 0, 'try to hilight all nicks?', False)
|
||||||
self.add_item(section, 'waitonsuccess', int, 0, 'wait for a while on success', False)
|
self.add_item(section, 'waitonsuccess', int, 0, 'wait for a while on success', False)
|
||||||
self.add_item(section, 'debug', int, 0, 'use debug', False)
|
self.add_item(section, 'debug', int, 0, 'use debug', False)
|
||||||
self.add_item(section, 'duration', int, 180, 'maximum time to run', False)
|
self.add_item(section, 'duration', int, 180, 'maximum time to run', False)
|
||||||
self.add_item(section, 'delay', str, 14400, 'if waitonsuccess, wait for $delay before sending other bots', False)
|
self.add_item(section, 'delay', str, 14400, 'if waitonsuccess, wait for $delay before sending other bots', False)
|
||||||
self.add_item(section, 'nick', str, None, 'specify nickname to use', False)
|
self.add_item(section, 'nick', str, None, 'specify nickname to use', False)
|
||||||
self.add_item(section, 'use_ssl', int, 2, 'Use ssl? (0: false, 1: true, 2: random)', False)
|
self.add_item(section, 'use_ssl', int, 2, 'Use ssl? (0: false, 1: true, 2: random)', False)
|
||||||
self.add_item(section, 'cycle', int, 0, 'cycle flood', False)
|
self.add_item(section, 'cycle', int, 0, 'cycle flood', False)
|
||||||
self.add_item(section, 'change_nick', int, 0, 'Change nick between messages (useful when flooding privates)', False)
|
self.add_item(section, 'change_nick', int, 0, 'Change nick between messages (useful when flooding privates)', False)
|
||||||
self.add_item(section, 'use_timeout', int, 0, 'make connexions quit through timeout', False)
|
self.add_item(section, 'use_timeout', int, 0, 'make connexions quit through timeout', False)
|
||||||
self.add_item(section, 'clones', int, 1, 'Number of connexion repeat to run', False)
|
self.add_item(section, 'clones', int, 1, 'Number of connexion repeat to run', False)
|
||||||
self.add_item(section, 'query', bool, False, 'also flood in query', False)
|
self.add_item(section, 'query', bool, False, 'also flood in query', False)
|
||||||
self.add_item(section, 'noquerybefore', int, 10, 'do not send query before x secs being connected', False)
|
self.add_item(section, 'noquerybefore', int, 10, 'do not send query before x secs being connected', False)
|
||||||
self.add_item(section, 'oper', bool, False, 'piss of opers', False)
|
self.add_item(section, 'oper', bool, False, 'piss of opers', False)
|
||||||
self.add_item(section, 'whois', bool, False, 'piss of opers with /whois', False)
|
self.add_item(section, 'whois', bool, False, 'piss of opers with /whois', False)
|
||||||
self.add_item(section, 'modex', bool, False, 'make +/- x mode', False)
|
self.add_item(section, 'modex', bool, False, 'make +/- x mode', False)
|
||||||
self.add_item(section, 'os', bool, False, 'piss off opers with /os', False)
|
self.add_item(section, 'os', bool, False, 'piss off opers with /os', False)
|
||||||
self.add_item(section, 'file', str, None, 'read flood content from file', False)
|
self.add_item(section, 'file', str, None, 'read flood content from file', False)
|
||||||
self.add_item(section, 'failid', str, None, 'generate nickserv warn. about IDENTIFY attempts', False)
|
self.add_item(section, 'failid', str, None, 'generate nickserv warn. about IDENTIFY attempts', False)
|
||||||
|
|||||||
178
fetch.py
178
fetch.py
@@ -6,119 +6,119 @@ from misc import _log
|
|||||||
|
|
||||||
config = None
|
config = None
|
||||||
def set_config(cfg):
|
def set_config(cfg):
|
||||||
global config
|
global config
|
||||||
config = cfg
|
config = cfg
|
||||||
|
|
||||||
cleanhtml_re = [
|
cleanhtml_re = [
|
||||||
re.compile('<.*?>'),
|
re.compile('<.*?>'),
|
||||||
re.compile('\s+'),
|
re.compile('\s+'),
|
||||||
re.compile('::+'),
|
re.compile('::+'),
|
||||||
]
|
]
|
||||||
def cleanhtml(raw_html):
|
def cleanhtml(raw_html):
|
||||||
html = raw_html.replace(' ', ' ')
|
html = raw_html.replace(' ', ' ')
|
||||||
html = re.sub(cleanhtml_re[0], ':', html)
|
html = re.sub(cleanhtml_re[0], ':', html)
|
||||||
html = re.sub(cleanhtml_re[1], ':', html)
|
html = re.sub(cleanhtml_re[1], ':', html)
|
||||||
html = re.sub(cleanhtml_re[2], ':', html)
|
html = re.sub(cleanhtml_re[2], ':', html)
|
||||||
return html
|
return html
|
||||||
|
|
||||||
retry_messages = ('Engines cannot retrieve results', 'Rate limit exceeded')
|
retry_messages = ('Engines cannot retrieve results', 'Rate limit exceeded')
|
||||||
def fetch_contents(url, head = False):
|
def fetch_contents(url, head = False):
|
||||||
host, port, ssl, uri = _parse_url(url)
|
host, port, ssl, uri = _parse_url(url)
|
||||||
headers=[
|
headers=[
|
||||||
'Accept-Language: en-US,en;q=0.8',
|
'Accept-Language: en-US,en;q=0.8',
|
||||||
'Cache-Control: max-age=0',
|
'Cache-Control: max-age=0',
|
||||||
]
|
]
|
||||||
if config.ppf.debug:
|
if config.ppf.debug:
|
||||||
_log("connecting to %s... (header: %s)" % (url, str(head)), "debug")
|
_log("connecting to %s... (header: %s)" % (url, str(head)), "debug")
|
||||||
while True:
|
while True:
|
||||||
proxies = [rocksock.RocksockProxyFromURL('socks4://%s' % random.choice( config.torhosts ))]
|
proxies = [rocksock.RocksockProxyFromURL('socks4://%s' % random.choice( config.torhosts ))]
|
||||||
http = RsHttp(host,ssl=ssl,port=port, keep_alive=True, timeout=config.ppf.timeout, max_tries=config.ppf.http_retries, follow_redirects=True, auto_set_cookies=True, proxies=proxies, user_agent='Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0')
|
http = RsHttp(host,ssl=ssl,port=port, keep_alive=True, timeout=config.ppf.timeout, max_tries=config.ppf.http_retries, follow_redirects=True, auto_set_cookies=True, proxies=proxies, user_agent='Mozilla/5.0 (Windows NT 6.1; rv:60.0) Gecko/20100101 Firefox/60.0')
|
||||||
if not http.connect():
|
if not http.connect():
|
||||||
_log("failed to connect to %s"%url, "ppf")
|
_log("failed to connect to %s"%url, "ppf")
|
||||||
e = http.get_last_rocksock_exception()
|
e = http.get_last_rocksock_exception()
|
||||||
if not e:
|
if not e:
|
||||||
return ''
|
return ''
|
||||||
et = e.get_errortype()
|
et = e.get_errortype()
|
||||||
ee = e.get_error()
|
ee = e.get_error()
|
||||||
ef = e.get_failedproxy()
|
ef = e.get_failedproxy()
|
||||||
if et == rocksock.RS_ET_OWN and \
|
if et == rocksock.RS_ET_OWN and \
|
||||||
ee == rocksock.RS_E_TARGET_CONN_REFUSED \
|
ee == rocksock.RS_E_TARGET_CONN_REFUSED \
|
||||||
and ef == 0:
|
and ef == 0:
|
||||||
_log("could not connect to proxy 0 - check your connection", "error")
|
_log("could not connect to proxy 0 - check your connection", "error")
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
continue
|
continue
|
||||||
return ''
|
return ''
|
||||||
break
|
break
|
||||||
|
|
||||||
## only request header
|
## only request header
|
||||||
if head:
|
if head:
|
||||||
hdr = http.head(uri, headers)
|
hdr = http.head(uri, headers)
|
||||||
return hdr
|
return hdr
|
||||||
|
|
||||||
hdr, res = http.get(uri, headers)
|
hdr, res = http.get(uri, headers)
|
||||||
res = res.encode('utf-8') if isinstance(res, unicode) else res
|
res = res.encode('utf-8') if isinstance(res, unicode) else res
|
||||||
for retry_message in retry_messages:
|
for retry_message in retry_messages:
|
||||||
if retry_message in res: return ''
|
if retry_message in res: return ''
|
||||||
|
|
||||||
return res
|
return res
|
||||||
|
|
||||||
def valid_port(port):
|
def valid_port(port):
|
||||||
return port > 0 and port < 65535
|
return port > 0 and port < 65535
|
||||||
|
|
||||||
def is_usable_proxy(proxy):
|
def is_usable_proxy(proxy):
|
||||||
ip, port = proxy.split(':')
|
ip, port = proxy.split(':')
|
||||||
if not valid_port(int(port)): return False
|
if not valid_port(int(port)): return False
|
||||||
|
|
||||||
octets = ip.split('.')
|
octets = ip.split('.')
|
||||||
A = int(octets[0])
|
A = int(octets[0])
|
||||||
B = int(octets[1])
|
B = int(octets[1])
|
||||||
C = int(octets[2])
|
C = int(octets[2])
|
||||||
D = int(octets[3])
|
D = int(octets[3])
|
||||||
|
|
||||||
if (A < 1 or A > 254 or \
|
if (A < 1 or A > 254 or \
|
||||||
B > 255 or C > 255 or D > 255) or \
|
B > 255 or C > 255 or D > 255) or \
|
||||||
(A == 10 or A == 127) or \
|
(A == 10 or A == 127) or \
|
||||||
(A == 192 and B == 168) or \
|
(A == 192 and B == 168) or \
|
||||||
(A == 172 and B >= 16 and B <= 31): return False
|
(A == 172 and B >= 16 and B <= 31): return False
|
||||||
return True
|
return True
|
||||||
|
|
||||||
_known_proxies = {}
|
_known_proxies = {}
|
||||||
def extract_proxies(content, proxydb):
|
def extract_proxies(content, proxydb):
|
||||||
matches = re.findall(r'([0-9]+(?:\.[0-9]+){3}:[0-9]{2,5})[\D$]', cleanhtml(content))
|
matches = re.findall(r'([0-9]+(?:\.[0-9]+){3}:[0-9]{2,5})[\D$]', cleanhtml(content))
|
||||||
|
|
||||||
uniques_dict = {}
|
uniques_dict = {}
|
||||||
for p in matches:
|
for p in matches:
|
||||||
uniques_dict[p] = True
|
uniques_dict[p] = True
|
||||||
|
|
||||||
uniques = []
|
uniques = []
|
||||||
for p in uniques_dict.keys():
|
for p in uniques_dict.keys():
|
||||||
if is_usable_proxy(p): uniques.append(p)
|
if is_usable_proxy(p): uniques.append(p)
|
||||||
|
|
||||||
global _known_proxies
|
global _known_proxies
|
||||||
if len(_known_proxies) == 0:
|
if len(_known_proxies) == 0:
|
||||||
known = proxydb.execute('SELECT proxy FROM proxylist').fetchall()
|
known = proxydb.execute('SELECT proxy FROM proxylist').fetchall()
|
||||||
for k in known:
|
for k in known:
|
||||||
_known_proxies[k[0]] = True
|
_known_proxies[k[0]] = True
|
||||||
|
|
||||||
new = []
|
new = []
|
||||||
for p in uniques:
|
for p in uniques:
|
||||||
if not p in _known_proxies:
|
if not p in _known_proxies:
|
||||||
new.append(p)
|
new.append(p)
|
||||||
_known_proxies[p] = True
|
_known_proxies[p] = True
|
||||||
|
|
||||||
return len(uniques), new
|
return len(uniques), new
|
||||||
|
|
||||||
def extract_urls(content, urls = None, urignore=None):
|
def extract_urls(content, urls = None, urignore=None):
|
||||||
urls = [] if not urls else urls
|
urls = [] if not urls else urls
|
||||||
soup = soupify(content)
|
soup = soupify(content)
|
||||||
for a in soup.body.find_all('a'):
|
for a in soup.body.find_all('a'):
|
||||||
if not 'rel' in a.attrs or not 'noreferrer' in a.attrs['rel'] or a.attrs['href'] in urls: continue
|
if not 'rel' in a.attrs or not 'noreferrer' in a.attrs['rel'] or a.attrs['href'] in urls: continue
|
||||||
bad = False
|
bad = False
|
||||||
href = a.attrs['href']
|
href = a.attrs['href']
|
||||||
for i in urignore:
|
for i in urignore:
|
||||||
if re.findall(i, href):
|
if re.findall(i, href):
|
||||||
bad = True
|
bad = True
|
||||||
break
|
break
|
||||||
if not bad: urls.append(href)
|
if not bad: urls.append(href)
|
||||||
return urls
|
return urls
|
||||||
|
|
||||||
|
|||||||
594
ppf.py
594
ppf.py
@@ -17,370 +17,370 @@ config = Config()
|
|||||||
_known_proxies = {}
|
_known_proxies = {}
|
||||||
|
|
||||||
def import_from_file(fn, sqlite):
|
def import_from_file(fn, sqlite):
|
||||||
with open(fn, 'r') as f:
|
with open(fn, 'r') as f:
|
||||||
urls = [ url for url in f.read().split('\n') if url != '' ]
|
urls = [ url for url in f.read().split('\n') if url != '' ]
|
||||||
cinc = 0
|
cinc = 0
|
||||||
while True:
|
while True:
|
||||||
chunk = urls[cinc:cinc+200]
|
chunk = urls[cinc:cinc+200]
|
||||||
if len(chunk): dbs.insert_urls(chunk, 'import.txt', urldb)
|
if len(chunk): dbs.insert_urls(chunk, 'import.txt', urldb)
|
||||||
else: break
|
else: break
|
||||||
cinc = cinc + 200
|
cinc = cinc + 200
|
||||||
|
|
||||||
|
|
||||||
def get_content_type(url):
|
def get_content_type(url):
|
||||||
hdr = fetch.fetch_contents(url, head=True)
|
hdr = fetch.fetch_contents(url, head=True)
|
||||||
|
|
||||||
for h in hdr.split('\n'):
|
for h in hdr.split('\n'):
|
||||||
if h.lower().startswith('content-type: '): return h.lower().split(':')[1].strip()
|
if h.lower().startswith('content-type: '): return h.lower().split(':')[1].strip()
|
||||||
|
|
||||||
return ''
|
return ''
|
||||||
|
|
||||||
def is_good_content_type(string):
|
def is_good_content_type(string):
|
||||||
allowed_ct = [ 'text/html', 'text/plain', 'atom+xml' ]
|
allowed_ct = [ 'text/html', 'text/plain', 'atom+xml' ]
|
||||||
for ct in allowed_ct:
|
for ct in allowed_ct:
|
||||||
if ct.lower() in string.lower(): return True
|
if ct.lower() in string.lower(): return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def proxyleech(proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type):
|
def proxyleech(proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type):
|
||||||
if not content_type: content_type = get_content_type(url)
|
if not content_type: content_type = get_content_type(url)
|
||||||
|
|
||||||
if is_good_content_type(content_type):
|
if is_good_content_type(content_type):
|
||||||
try: content = fetch.fetch_contents(url)
|
try: content = fetch.fetch_contents(url)
|
||||||
except KeyboardInterrupt as e: raise e
|
except KeyboardInterrupt as e: raise e
|
||||||
except: content = ''
|
except: content = ''
|
||||||
else:
|
else:
|
||||||
content = ''
|
content = ''
|
||||||
|
|
||||||
unique_count, new = fetch.extract_proxies(content, proxydb)
|
unique_count, new = fetch.extract_proxies(content, proxydb)
|
||||||
|
|
||||||
if retrievals == 0: # new site
|
if retrievals == 0: # new site
|
||||||
if content != '' and unique_count == 0: # site works but has zero proxy addresses
|
if content != '' and unique_count == 0: # site works but has zero proxy addresses
|
||||||
error = 99999
|
error = 99999
|
||||||
else:
|
else:
|
||||||
if len(new) == 0:
|
if len(new) == 0:
|
||||||
stale_count += 1
|
stale_count += 1
|
||||||
else:
|
else:
|
||||||
stale_count = 0
|
stale_count = 0
|
||||||
if content == '':
|
if content == '':
|
||||||
error += 1
|
error += 1
|
||||||
else:
|
else:
|
||||||
retrievals += 1
|
retrievals += 1
|
||||||
error = 0
|
error = 0
|
||||||
if unique_count:
|
if unique_count:
|
||||||
extract_urls(content, url)
|
extract_urls(content, url)
|
||||||
|
|
||||||
urldb.execute('UPDATE uris SET error=?,stale_count=?,check_time=?,retrievals=?,proxies_added=?,content_type=? where url=?', (error, stale_count, int(time.time()), retrievals, proxies_added+len(new), content_type, url))
|
urldb.execute('UPDATE uris SET error=?,stale_count=?,check_time=?,retrievals=?,proxies_added=?,content_type=? where url=?', (error, stale_count, int(time.time()), retrievals, proxies_added+len(new), content_type, url))
|
||||||
urldb.commit()
|
urldb.commit()
|
||||||
|
|
||||||
if not len(new): return
|
if not len(new): return
|
||||||
|
|
||||||
dbs.insert_proxies(proxydb, new, url)
|
dbs.insert_proxies(proxydb, new, url)
|
||||||
|
|
||||||
|
|
||||||
def is_bad_url(uri, domain=None, samedomain=False):
|
def is_bad_url(uri, domain=None, samedomain=False):
|
||||||
# if uri needs to be from same domain and domains missmatch
|
# if uri needs to be from same domain and domains missmatch
|
||||||
if samedomain and str(uri.split('/')[2]).lower() != str(domain).lower():
|
if samedomain and str(uri.split('/')[2]).lower() != str(domain).lower():
|
||||||
return True
|
return True
|
||||||
for u in urignore:
|
for u in urignore:
|
||||||
if re.findall(u, uri): return True
|
if re.findall(u, uri): return True
|
||||||
return False
|
return False
|
||||||
|
|
||||||
def extract_urls(html, url):
|
def extract_urls(html, url):
|
||||||
mytime = int(time.time())
|
mytime = int(time.time())
|
||||||
proto = url.split(':')[0]
|
proto = url.split(':')[0]
|
||||||
domain = url.split('/')[2]
|
domain = url.split('/')[2]
|
||||||
urls = []
|
urls = []
|
||||||
|
|
||||||
soup = BeautifulSoup(html, features='lxml')
|
soup = BeautifulSoup(html, features='lxml')
|
||||||
|
|
||||||
for a in soup.find_all('a', href=True):
|
for a in soup.find_all('a', href=True):
|
||||||
item = a['href'].encode('utf-8') if isinstance(a['href'], unicode) else a['href']
|
item = a['href'].encode('utf-8') if isinstance(a['href'], unicode) else a['href']
|
||||||
item = item.strip()
|
item = item.strip()
|
||||||
|
|
||||||
if item.startswith('www.'):
|
if item.startswith('www.'):
|
||||||
item = 'http://%s' % item
|
item = 'http://%s' % item
|
||||||
elif not item.startswith('http'):
|
elif not item.startswith('http'):
|
||||||
if not item.startswith('/'): item = '/%s' % item
|
if not item.startswith('/'): item = '/%s' % item
|
||||||
item = '%s://%s%s' % (proto,domain,item)
|
item = '%s://%s%s' % (proto,domain,item)
|
||||||
|
|
||||||
elif is_bad_url(item, domain=domain, samedomain=config.ppf.extract_samedomain):
|
elif is_bad_url(item, domain=domain, samedomain=config.ppf.extract_samedomain):
|
||||||
continue
|
continue
|
||||||
if not item in urls: urls.append(item)
|
if not item in urls: urls.append(item)
|
||||||
|
|
||||||
if len(urls): dbs.insert_urls(urls, url, urldb) #insert_if_not_exists(urls)
|
if len(urls): dbs.insert_urls(urls, url, urldb) #insert_if_not_exists(urls)
|
||||||
|
|
||||||
def import_proxies_from_file(proxydb, fn):
|
def import_proxies_from_file(proxydb, fn):
|
||||||
content = open(fn, 'r').read()
|
content = open(fn, 'r').read()
|
||||||
unique_count, new = fetch.extract_proxies(content, proxydb)
|
unique_count, new = fetch.extract_proxies(content, proxydb)
|
||||||
if len(new):
|
if len(new):
|
||||||
dbs.insert_proxies(proxydb, new, fn)
|
dbs.insert_proxies(proxydb, new, fn)
|
||||||
return 0
|
return 0
|
||||||
return 1
|
return 1
|
||||||
|
|
||||||
def serve_loop(hs, done):
|
def serve_loop(hs, done):
|
||||||
client_threads = []
|
client_threads = []
|
||||||
while not done.is_set():
|
while not done.is_set():
|
||||||
c = hs.wait_client()
|
c = hs.wait_client()
|
||||||
|
|
||||||
evt_done = threading.Event()
|
evt_done = threading.Event()
|
||||||
cthread = threading.Thread(target=httpsrv_client_thread, args=(c,evt_done))
|
cthread = threading.Thread(target=httpsrv_client_thread, args=(c,evt_done))
|
||||||
cthread.daemon = True
|
cthread.daemon = True
|
||||||
cthread.start()
|
cthread.start()
|
||||||
|
|
||||||
ctrm = []
|
ctrm = []
|
||||||
for ct, ct_done in client_threads:
|
for ct, ct_done in client_threads:
|
||||||
if ct_done.is_set():
|
if ct_done.is_set():
|
||||||
ctrm.append((ct,ct_done))
|
ctrm.append((ct,ct_done))
|
||||||
ct.join()
|
ct.join()
|
||||||
|
|
||||||
if len(ctrm):
|
if len(ctrm):
|
||||||
client_threads = [ x for x in client_threads if not x in ctrm ]
|
client_threads = [ x for x in client_threads if not x in ctrm ]
|
||||||
|
|
||||||
client_threads.append((cthread, evt_done))
|
client_threads.append((cthread, evt_done))
|
||||||
|
|
||||||
def forbidden_page():
|
def forbidden_page():
|
||||||
return (
|
return (
|
||||||
'<!DOCTYPE html>\n'
|
'<!DOCTYPE html>\n'
|
||||||
' <head>\n'
|
' <head>\n'
|
||||||
' <style>div.e{position:fixed;top:25%;bottom:25%;left:25%;right:25%;font-size:150px;text-align:center;}</style>\n'
|
' <style>div.e{position:fixed;top:25%;bottom:25%;left:25%;right:25%;font-size:150px;text-align:center;}</style>\n'
|
||||||
' <title>Forbidden</title>\n'
|
' <title>Forbidden</title>\n'
|
||||||
' </head>\n'
|
' </head>\n'
|
||||||
' <body>\n'
|
' <body>\n'
|
||||||
' <div class="e">🖕</div>\n'
|
' <div class="e">🖕</div>\n'
|
||||||
' </body>\n'
|
' </body>\n'
|
||||||
'</html>')
|
'</html>')
|
||||||
|
|
||||||
def httpsrv_client_thread(c, evt_done):
|
def httpsrv_client_thread(c, evt_done):
|
||||||
req = c.read_request()
|
req = c.read_request()
|
||||||
if req is None: pass
|
if req is None: pass
|
||||||
elif len(watchlist) == 0:
|
elif len(watchlist) == 0:
|
||||||
c.redirect('/config.html')
|
c.redirect('/config.html')
|
||||||
elif os.path.isdir(req['url'][1:]):
|
elif os.path.isdir(req['url'][1:]):
|
||||||
c.send(403,'Forbidden', forbidden_page())
|
c.send(403,'Forbidden', forbidden_page())
|
||||||
elif req['url'] == '/':
|
elif req['url'] == '/':
|
||||||
c.redirect('/index.html')
|
c.redirect('/index.html')
|
||||||
elif req['url'].startswith('/index.html'):
|
elif req['url'].startswith('/index.html'):
|
||||||
variables = variables_from_request(req)
|
variables = variables_from_request(req)
|
||||||
r, redir = render_site(variables)
|
r, redir = render_site(variables)
|
||||||
if redir is not "":
|
if redir is not "":
|
||||||
c.redirect(redir)
|
c.redirect(redir)
|
||||||
else:
|
else:
|
||||||
if r == '': r = render_empty(variables=variables)
|
if r == '': r = render_empty(variables=variables)
|
||||||
c.send(200, "OK", r)
|
c.send(200, "OK", r)
|
||||||
elif not '..' in req['url'] and file_exists(os.getcwd() + req['url']):
|
elif not '..' in req['url'] and file_exists(os.getcwd() + req['url']):
|
||||||
c.serve_file(os.getcwd() + req['url'])
|
c.serve_file(os.getcwd() + req['url'])
|
||||||
elif req['url'] == '/robots.txt':
|
elif req['url'] == '/robots.txt':
|
||||||
c.send(200, "OK", "User-agent: *\nDisallow: /")
|
c.send(200, "OK", "User-agent: *\nDisallow: /")
|
||||||
|
|
||||||
elif req['url'].startswith('/config.html'):
|
elif req['url'].startswith('/config.html'):
|
||||||
if args.config > 0:
|
if args.config > 0:
|
||||||
variables=variables_from_request(req)
|
variables=variables_from_request(req)
|
||||||
r, redir = configpage(req,variables)
|
r, redir = configpage(req,variables)
|
||||||
else:
|
else:
|
||||||
redir = '/index.html'
|
redir = '/index.html'
|
||||||
if redir is not "":
|
if redir is not "":
|
||||||
c.redirect(redir)
|
c.redirect(redir)
|
||||||
else:
|
else:
|
||||||
if r == '': r = render_empty(variables=variables)
|
if r == '': r = render_empty(variables=variables)
|
||||||
c.send(200, "OK", r)
|
c.send(200, "OK", r)
|
||||||
|
|
||||||
else:
|
else:
|
||||||
c.send(404, "not exist", "the reqested file not exist!!!1")
|
c.send(404, "not exist", "the reqested file not exist!!!1")
|
||||||
c.disconnect()
|
c.disconnect()
|
||||||
evt_done.set()
|
evt_done.set()
|
||||||
|
|
||||||
def start_server(ip, port):
|
def start_server(ip, port):
|
||||||
done = threading.Event()
|
done = threading.Event()
|
||||||
from httpsrv import HttpSrv
|
from httpsrv import HttpSrv
|
||||||
hs = HttpSrv(ip, port)
|
hs = HttpSrv(ip, port)
|
||||||
try:
|
try:
|
||||||
hs.setup()
|
hs.setup()
|
||||||
except socket.error as e:
|
except socket.error as e:
|
||||||
if e.errno == errno.EADDRINUSE:
|
if e.errno == errno.EADDRINUSE:
|
||||||
sys.stderr.write((
|
sys.stderr.write((
|
||||||
"ERROR: server socket address in use\n"
|
"ERROR: server socket address in use\n"
|
||||||
"wait a couple seconds and try again.\n"
|
"wait a couple seconds and try again.\n"
|
||||||
"in case you're in pdb, you need to quit it\n"))
|
"in case you're in pdb, you need to quit it\n"))
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
else:
|
else:
|
||||||
raise e
|
raise e
|
||||||
|
|
||||||
t = threading.Thread(target=serve_loop, args=(hs, done))
|
t = threading.Thread(target=serve_loop, args=(hs, done))
|
||||||
t.daemon = True
|
t.daemon = True
|
||||||
t.start()
|
t.start()
|
||||||
return t, done
|
return t, done
|
||||||
|
|
||||||
def extract_proxies(content):
|
def extract_proxies(content):
|
||||||
matches = re.findall(r'([0-9]+(?:\.[0-9]+){3}:[0-9]{2,5})[\D$]', fetch.cleanhtml(content))
|
matches = re.findall(r'([0-9]+(?:\.[0-9]+){3}:[0-9]{2,5})[\D$]', fetch.cleanhtml(content))
|
||||||
uniques_dict = {}
|
uniques_dict = {}
|
||||||
for p in matches:
|
for p in matches:
|
||||||
uniques_dict[p] = True
|
uniques_dict[p] = True
|
||||||
|
|
||||||
uniques = []
|
uniques = []
|
||||||
for p in uniques_dict.keys():
|
for p in uniques_dict.keys():
|
||||||
if fetch.is_usable_proxy(p): uniques.append(p)
|
if fetch.is_usable_proxy(p): uniques.append(p)
|
||||||
|
|
||||||
return uniques
|
return uniques
|
||||||
|
|
||||||
|
|
||||||
class Leechered(threading.Thread):
|
class Leechered(threading.Thread):
|
||||||
#def __init__(self, proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type):
|
#def __init__(self, proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type):
|
||||||
def __init__(self, url, stale_count, error, retrievals, proxies_added, content_type):
|
def __init__(self, url, stale_count, error, retrievals, proxies_added, content_type):
|
||||||
self.status = 'nok'
|
self.status = 'nok'
|
||||||
self.proxylist = []
|
self.proxylist = []
|
||||||
self.running = True
|
self.running = True
|
||||||
self.url = url
|
self.url = url
|
||||||
self.stale_count = stale_count
|
self.stale_count = stale_count
|
||||||
self.error = error
|
self.error = error
|
||||||
self.retrievals = retrievals
|
self.retrievals = retrievals
|
||||||
self.proxies_added = proxies_added
|
self.proxies_added = proxies_added
|
||||||
self.content_type = content_type
|
self.content_type = content_type
|
||||||
self.execute = ''
|
self.execute = ''
|
||||||
threading.Thread.__init__(self)
|
threading.Thread.__init__(self)
|
||||||
|
|
||||||
def retrieve(self):
|
def retrieve(self):
|
||||||
return self.url, self.proxylist, self.stale_count, self.error, self.retrievals, self.content_type, self.proxies_added, self.execute
|
return self.url, self.proxylist, self.stale_count, self.error, self.retrievals, self.content_type, self.proxies_added, self.execute
|
||||||
def status(self):
|
def status(self):
|
||||||
return self.status
|
return self.status
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.status = 'nok'
|
self.status = 'nok'
|
||||||
|
|
||||||
if not self.content_type: self.content_type = get_content_type(self.url)
|
if not self.content_type: self.content_type = get_content_type(self.url)
|
||||||
|
|
||||||
if is_good_content_type(self.content_type):
|
if is_good_content_type(self.content_type):
|
||||||
try: content = fetch.fetch_contents(self.url)
|
try: content = fetch.fetch_contents(self.url)
|
||||||
except KeyboardInterrupt as e: raise e
|
except KeyboardInterrupt as e: raise e
|
||||||
except: content = ''
|
except: content = ''
|
||||||
else:
|
else:
|
||||||
content = ''
|
content = ''
|
||||||
|
|
||||||
unique = extract_proxies(content)
|
unique = extract_proxies(content)
|
||||||
self.proxylist = [ proxy for proxy in unique if not proxy in _known_proxies ]
|
self.proxylist = [ proxy for proxy in unique if not proxy in _known_proxies ]
|
||||||
proxy_count = len(self.proxylist)
|
proxy_count = len(self.proxylist)
|
||||||
|
|
||||||
if self.retrievals == 0: # new site
|
if self.retrievals == 0: # new site
|
||||||
if content != '' and len(self.proxylist) == 0: # site works but has zero proxy addresses
|
if content != '' and len(self.proxylist) == 0: # site works but has zero proxy addresses
|
||||||
self.error += 1
|
self.error += 1
|
||||||
self.stale_count += 1
|
self.stale_count += 1
|
||||||
elif proxy_count > 0:
|
elif proxy_count > 0:
|
||||||
self.error = 0
|
self.error = 0
|
||||||
self.stale_count = 0
|
self.stale_count = 0
|
||||||
else:
|
else:
|
||||||
self.error += 2
|
self.error += 2
|
||||||
self.stale_count += 2
|
self.stale_count += 2
|
||||||
else: # not a new site
|
else: # not a new site
|
||||||
# proxylist is empty
|
# proxylist is empty
|
||||||
if proxy_count == 0:
|
if proxy_count == 0:
|
||||||
self.stale_count += 1
|
self.stale_count += 1
|
||||||
# proxylist is not empty: site is working
|
# proxylist is not empty: site is working
|
||||||
else:
|
else:
|
||||||
self.stale_count = 0
|
self.stale_count = 0
|
||||||
self.error = 0
|
self.error = 0
|
||||||
# site has no content
|
# site has no content
|
||||||
if content == '':
|
if content == '':
|
||||||
self.error += 1
|
self.error += 1
|
||||||
self.stale_count += 1
|
self.stale_count += 1
|
||||||
#else:
|
#else:
|
||||||
# self.retrievals += 1
|
# self.retrievals += 1
|
||||||
# self.error = 0
|
# self.error = 0
|
||||||
# self.stale_count = 0
|
# self.stale_count = 0
|
||||||
# site has proxies
|
# site has proxies
|
||||||
if proxy_count:
|
if proxy_count:
|
||||||
self.error = 0
|
self.error = 0
|
||||||
self.stale_count = 0
|
self.stale_count = 0
|
||||||
extract_urls(content, self.url)
|
extract_urls(content, self.url)
|
||||||
|
|
||||||
self.execute = (self.error, self.stale_count, int(time.time()), self.retrievals, self.proxies_added+len(self.proxylist), self.content_type, self.url)
|
self.execute = (self.error, self.stale_count, int(time.time()), self.retrievals, self.proxies_added+len(self.proxylist), self.content_type, self.url)
|
||||||
self.status = 'ok'
|
self.status = 'ok'
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
config.load()
|
config.load()
|
||||||
fetch.set_config(config)
|
fetch.set_config(config)
|
||||||
|
|
||||||
|
|
||||||
proxydb = mysqlite.mysqlite(config.watchd.database, str)
|
proxydb = mysqlite.mysqlite(config.watchd.database, str)
|
||||||
dbs.create_table_if_not_exists(proxydb, 'proxylist')
|
dbs.create_table_if_not_exists(proxydb, 'proxylist')
|
||||||
known = proxydb.execute('SELECT proxy FROM proxylist').fetchall()
|
known = proxydb.execute('SELECT proxy FROM proxylist').fetchall()
|
||||||
for k in known:
|
for k in known:
|
||||||
_known_proxies[k[0]] = True
|
_known_proxies[k[0]] = True
|
||||||
|
|
||||||
with open('urignore.txt', 'r') as f:
|
with open('urignore.txt', 'r') as f:
|
||||||
urignore = [ i.strip() for i in f.read().split('\n') if len(i.strip()) ]
|
urignore = [ i.strip() for i in f.read().split('\n') if len(i.strip()) ]
|
||||||
|
|
||||||
urldb = mysqlite.mysqlite(config.ppf.database, str)
|
urldb = mysqlite.mysqlite(config.ppf.database, str)
|
||||||
dbs.create_table_if_not_exists(urldb, 'uris')
|
dbs.create_table_if_not_exists(urldb, 'uris')
|
||||||
import_from_file('import.txt', urldb)
|
import_from_file('import.txt', urldb)
|
||||||
if len(sys.argv) == 3 and sys.argv[1] == "--file":
|
if len(sys.argv) == 3 and sys.argv[1] == "--file":
|
||||||
sys.exit(import_proxies_from_file(proxydb, sys.argv[2]))
|
sys.exit(import_proxies_from_file(proxydb, sys.argv[2]))
|
||||||
|
|
||||||
# start proxy watcher
|
# start proxy watcher
|
||||||
if config.watchd.threads > 0:
|
if config.watchd.threads > 0:
|
||||||
watcherd = proxywatchd.Proxywatchd()
|
watcherd = proxywatchd.Proxywatchd()
|
||||||
watcherd.start()
|
watcherd.start()
|
||||||
else:
|
else:
|
||||||
watcherd = None
|
watcherd = None
|
||||||
|
|
||||||
#start_server(config.httpd.listenip, config.httpd.port)
|
#start_server(config.httpd.listenip, config.httpd.port)
|
||||||
|
|
||||||
#qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE error < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM() LIMIT 25'
|
#qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE error < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM() LIMIT 25'
|
||||||
qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE error < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM()'
|
qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE error < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM()'
|
||||||
#qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE stale_count < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM()'
|
#qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE stale_count < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM()'
|
||||||
threads = []
|
threads = []
|
||||||
rows = []
|
rows = []
|
||||||
rinc = 0
|
rinc = 0
|
||||||
reqtime = time.time() - 3600
|
reqtime = time.time() - 3600
|
||||||
statusmsg = time.time()
|
statusmsg = time.time()
|
||||||
while True:
|
while True:
|
||||||
try:
|
try:
|
||||||
time.sleep(random.random()/10)
|
time.sleep(random.random()/10)
|
||||||
if (time.time() - statusmsg) > 180:
|
if (time.time() - statusmsg) > 180:
|
||||||
_log('running %d thread(s) over %d' % (len(threads), config.ppf.threads), 'ppf')
|
_log('running %d thread(s) over %d' % (len(threads), config.ppf.threads), 'ppf')
|
||||||
statusmsg = time.time()
|
statusmsg = time.time()
|
||||||
if not len(rows):
|
if not len(rows):
|
||||||
if (time.time() - reqtime) > 3:
|
if (time.time() - reqtime) > 3:
|
||||||
rows = urldb.execute(qurl, (config.ppf.max_fail, config.ppf.checktime, config.ppf.perfail_checktime, int(time.time()))).fetchall()
|
rows = urldb.execute(qurl, (config.ppf.max_fail, config.ppf.checktime, config.ppf.perfail_checktime, int(time.time()))).fetchall()
|
||||||
reqtime = time.time()
|
reqtime = time.time()
|
||||||
if len(rows) < config.ppf.threads:
|
if len(rows) < config.ppf.threads:
|
||||||
time.sleep(60)
|
time.sleep(60)
|
||||||
rows = []
|
rows = []
|
||||||
else:
|
else:
|
||||||
_log('handing %d job(s) to %d thread(s)' % ( len(rows), config.ppf.threads ), 'ppf')
|
_log('handing %d job(s) to %d thread(s)' % ( len(rows), config.ppf.threads ), 'ppf')
|
||||||
#nao = time.time()
|
#nao = time.time()
|
||||||
#args = [ (nao, row[0]) for row in rows ]
|
#args = [ (nao, row[0]) for row in rows ]
|
||||||
#urldb.executemany('UPDATE uris SET check_time=? where url=?', args)
|
#urldb.executemany('UPDATE uris SET check_time=? where url=?', args)
|
||||||
#urldb.commit()
|
#urldb.commit()
|
||||||
|
|
||||||
|
|
||||||
for thread in threads:
|
for thread in threads:
|
||||||
if thread.status == 'ok':
|
if thread.status == 'ok':
|
||||||
url, proxylist, stale_count, error, retrievals, content_type, proxies_added, execute = thread.retrieve()
|
url, proxylist, stale_count, error, retrievals, content_type, proxies_added, execute = thread.retrieve()
|
||||||
new = []
|
new = []
|
||||||
for p in proxylist:
|
for p in proxylist:
|
||||||
if not p in _known_proxies:
|
if not p in _known_proxies:
|
||||||
new.append(p)
|
new.append(p)
|
||||||
_known_proxies[p]=1
|
_known_proxies[p]=1
|
||||||
execute = (error, stale_count, int(time.time()), retrievals, proxies_added+len(new), content_type, url)
|
execute = (error, stale_count, int(time.time()), retrievals, proxies_added+len(new), content_type, url)
|
||||||
urldb.execute('UPDATE uris SET error=?,stale_count=?,check_time=?,retrievals=?,proxies_added=?,content_type=? where url=?', execute)
|
urldb.execute('UPDATE uris SET error=?,stale_count=?,check_time=?,retrievals=?,proxies_added=?,content_type=? where url=?', execute)
|
||||||
urldb.commit()
|
urldb.commit()
|
||||||
if len(new): dbs.insert_proxies(proxydb, new, url)
|
if len(new): dbs.insert_proxies(proxydb, new, url)
|
||||||
|
|
||||||
threads = [ thread for thread in threads if thread.is_alive() ]
|
threads = [ thread for thread in threads if thread.is_alive() ]
|
||||||
if len(threads) < config.ppf.threads and len(rows):
|
if len(threads) < config.ppf.threads and len(rows):
|
||||||
row = random.choice(rows)
|
row = random.choice(rows)
|
||||||
urldb.execute('UPDATE uris SET check_time=? where url=?', (time.time(), row[0]))
|
urldb.execute('UPDATE uris SET check_time=? where url=?', (time.time(), row[0]))
|
||||||
urldb.commit()
|
urldb.commit()
|
||||||
rows.remove(row)
|
rows.remove(row)
|
||||||
t = Leechered(row[0], row[1], row[2], row[3], row[4], row[5])
|
t = Leechered(row[0], row[1], row[2], row[3], row[4], row[5])
|
||||||
threads.append(t)
|
threads.append(t)
|
||||||
t.start()
|
t.start()
|
||||||
#time.sleep(random.random()/100)
|
#time.sleep(random.random()/100)
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
if watcherd:
|
if watcherd:
|
||||||
watcherd.stop()
|
watcherd.stop()
|
||||||
watcherd.finish()
|
watcherd.finish()
|
||||||
break
|
break
|
||||||
|
|
||||||
print '\r',
|
print '\r',
|
||||||
|
|||||||
718
proxywatchd.py
718
proxywatchd.py
@@ -3,10 +3,10 @@
|
|||||||
import threading
|
import threading
|
||||||
import time, random, string, re, copy
|
import time, random, string, re, copy
|
||||||
try:
|
try:
|
||||||
from geoip import geolite2
|
from geoip import geolite2
|
||||||
geolite = True
|
geolite = True
|
||||||
except:
|
except:
|
||||||
geolite = False
|
geolite = False
|
||||||
|
|
||||||
from config import Config
|
from config import Config
|
||||||
|
|
||||||
@@ -20,415 +20,415 @@ _run_standalone = False
|
|||||||
cached_dns = dict()
|
cached_dns = dict()
|
||||||
|
|
||||||
def try_div(a, b):
|
def try_div(a, b):
|
||||||
if b != 0: return a/float(b)
|
if b != 0: return a/float(b)
|
||||||
return 0
|
return 0
|
||||||
|
|
||||||
def socks4_resolve(srvname, server_port):
|
def socks4_resolve(srvname, server_port):
|
||||||
srv = srvname
|
srv = srvname
|
||||||
if srv in cached_dns:
|
if srv in cached_dns:
|
||||||
srv = cached_dns[srvname]
|
srv = cached_dns[srvname]
|
||||||
if config.watchd.debug:
|
if config.watchd.debug:
|
||||||
_log("using cached ip (%s) for %s"%(srv, srvname), "debug")
|
_log("using cached ip (%s) for %s"%(srv, srvname), "debug")
|
||||||
else:
|
else:
|
||||||
dns_fail = False
|
dns_fail = False
|
||||||
try:
|
try:
|
||||||
af, sa = rocksock.resolve(rocksock.RocksockHostinfo(srvname, server_port), want_v4=True)
|
af, sa = rocksock.resolve(rocksock.RocksockHostinfo(srvname, server_port), want_v4=True)
|
||||||
if sa is not None:
|
if sa is not None:
|
||||||
cached_dns[srvname] = sa[0]
|
cached_dns[srvname] = sa[0]
|
||||||
srv = sa[0]
|
srv = sa[0]
|
||||||
else: dns_fail = True
|
else: dns_fail = True
|
||||||
except rocksock.RocksockException as e:
|
except rocksock.RocksockException as e:
|
||||||
assert(e.get_errortype() == rocksock.RS_ET_GAI)
|
assert(e.get_errortype() == rocksock.RS_ET_GAI)
|
||||||
dns_fail = True
|
dns_fail = True
|
||||||
if dns_fail:
|
if dns_fail:
|
||||||
fail_inc = 0
|
fail_inc = 0
|
||||||
_log("could not resolve connection target %s"%srvname, "ERROR")
|
_log("could not resolve connection target %s"%srvname, "ERROR")
|
||||||
return False
|
return False
|
||||||
return srv
|
return srv
|
||||||
|
|
||||||
|
|
||||||
class WorkerJob():
|
class WorkerJob():
|
||||||
def __init__(self, proxy, proto, failcount, success_count, total_duration, country, oldies = False):
|
def __init__(self, proxy, proto, failcount, success_count, total_duration, country, oldies = False):
|
||||||
self.proxy = proxy
|
self.proxy = proxy
|
||||||
self.proto = proto
|
self.proto = proto
|
||||||
self.failcount = failcount
|
self.failcount = failcount
|
||||||
self.checktime = None
|
self.checktime = None
|
||||||
self.success_count = success_count
|
self.success_count = success_count
|
||||||
self.total_duration = total_duration
|
self.total_duration = total_duration
|
||||||
self.country = country
|
self.country = country
|
||||||
self.isoldies = oldies
|
self.isoldies = oldies
|
||||||
|
|
||||||
def connect_socket(self):
|
def connect_socket(self):
|
||||||
srvname = random.choice(config.servers).strip()
|
srvname = random.choice(config.servers).strip()
|
||||||
protos = ['http', 'socks5', 'socks4'] if self.proto is None else [self.proto]
|
protos = ['http', 'socks5', 'socks4'] if self.proto is None else [self.proto]
|
||||||
use_ssl = random.choice([0,1]) if config.watchd.use_ssl == 2 else config.watchd.use_ssl
|
use_ssl = random.choice([0,1]) if config.watchd.use_ssl == 2 else config.watchd.use_ssl
|
||||||
server_port = 6697 if use_ssl else 6667
|
server_port = 6697 if use_ssl else 6667
|
||||||
|
|
||||||
fail_inc = 1
|
fail_inc = 1
|
||||||
|
|
||||||
for proto in protos:
|
for proto in protos:
|
||||||
torhost = random.choice(config.torhosts)
|
torhost = random.choice(config.torhosts)
|
||||||
# socks4 (without 4a) requires a raw ip address
|
# socks4 (without 4a) requires a raw ip address
|
||||||
# rocksock automatically resolves if needed, but it's more
|
# rocksock automatically resolves if needed, but it's more
|
||||||
# efficient to cache the result.
|
# efficient to cache the result.
|
||||||
if proto == 'socks4': srv = socks4_resolve(srvname, server_port)
|
if proto == 'socks4': srv = socks4_resolve(srvname, server_port)
|
||||||
else: srv = srvname
|
else: srv = srvname
|
||||||
## skip socks4 failed resolution
|
## skip socks4 failed resolution
|
||||||
if not srv: continue
|
if not srv: continue
|
||||||
|
|
||||||
duration = time.time()
|
duration = time.time()
|
||||||
#rocksock.RocksockProxyFromURL('socks4://%s' % torhost),
|
#rocksock.RocksockProxyFromURL('socks4://%s' % torhost),
|
||||||
proxies = [
|
proxies = [
|
||||||
rocksock.RocksockProxyFromURL('socks5://%s' % torhost),
|
rocksock.RocksockProxyFromURL('socks5://%s' % torhost),
|
||||||
rocksock.RocksockProxyFromURL('%s://%s' % (proto, self.proxy)),
|
rocksock.RocksockProxyFromURL('%s://%s' % (proto, self.proxy)),
|
||||||
]
|
]
|
||||||
|
|
||||||
try:
|
try:
|
||||||
sock = rocksock.Rocksock(host=srv, port=server_port, ssl=use_ssl, proxies=proxies, timeout=config.watchd.timeout)
|
sock = rocksock.Rocksock(host=srv, port=server_port, ssl=use_ssl, proxies=proxies, timeout=config.watchd.timeout)
|
||||||
sock.connect()
|
sock.connect()
|
||||||
sock.send('NICK\n')
|
sock.send('NICK\n')
|
||||||
return sock, proto, duration, torhost, srvname, 0
|
return sock, proto, duration, torhost, srvname, 0
|
||||||
except rocksock.RocksockException as e:
|
except rocksock.RocksockException as e:
|
||||||
if config.watchd.debug:
|
if config.watchd.debug:
|
||||||
_log("proxy failed: %s://%s: %s"%(proto, self.proxy, e.get_errormessage()), 'debug')
|
_log("proxy failed: %s://%s: %s"%(proto, self.proxy, e.get_errormessage()), 'debug')
|
||||||
|
|
||||||
et = e.get_errortype()
|
et = e.get_errortype()
|
||||||
err = e.get_error()
|
err = e.get_error()
|
||||||
fp = e.get_failedproxy()
|
fp = e.get_failedproxy()
|
||||||
|
|
||||||
sock.disconnect()
|
sock.disconnect()
|
||||||
|
|
||||||
if et == rocksock.RS_ET_OWN:
|
if et == rocksock.RS_ET_OWN:
|
||||||
if fp == 1 and \
|
if fp == 1 and \
|
||||||
err == rocksock.RS_E_REMOTE_DISCONNECTED or \
|
err == rocksock.RS_E_REMOTE_DISCONNECTED or \
|
||||||
err == rocksock.RS_E_HIT_TIMEOUT:
|
err == rocksock.RS_E_HIT_TIMEOUT:
|
||||||
# proxy is not online, so don't waste time trying all possible protocols
|
# proxy is not online, so don't waste time trying all possible protocols
|
||||||
break
|
break
|
||||||
elif fp == 0 and \
|
elif fp == 0 and \
|
||||||
err == rocksock.RS_E_TARGET_CONN_REFUSED:
|
err == rocksock.RS_E_TARGET_CONN_REFUSED:
|
||||||
fail_inc = 0
|
fail_inc = 0
|
||||||
if random.randint(0, (config.watchd.threads-1)/2) == 0:
|
if random.randint(0, (config.watchd.threads-1)/2) == 0:
|
||||||
_log("could not connect to proxy 0, sleep 5s", "ERROR")
|
_log("could not connect to proxy 0, sleep 5s", "ERROR")
|
||||||
time.sleep(5)
|
time.sleep(5)
|
||||||
elif et == rocksock.RS_ET_GAI:
|
elif et == rocksock.RS_ET_GAI:
|
||||||
assert(0)
|
assert(0)
|
||||||
fail_inc = 0
|
fail_inc = 0
|
||||||
_log("could not resolve connection target %s"%srvname, "ERROR")
|
_log("could not resolve connection target %s"%srvname, "ERROR")
|
||||||
break
|
break
|
||||||
|
|
||||||
except KeyboardInterrupt as e:
|
except KeyboardInterrupt as e:
|
||||||
raise(e)
|
raise(e)
|
||||||
|
|
||||||
return None, None, None, None, None, fail_inc
|
return None, None, None, None, None, fail_inc
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
self.checktime = int(time.time())
|
self.checktime = int(time.time())
|
||||||
|
|
||||||
sock, proto, duration, tor, srv, failinc = self.connect_socket()
|
sock, proto, duration, tor, srv, failinc = self.connect_socket()
|
||||||
if not sock:
|
if not sock:
|
||||||
self.failcount += failinc
|
self.failcount += failinc
|
||||||
return
|
return
|
||||||
try:
|
try:
|
||||||
recv = sock.recv(6)
|
recv = sock.recv(6)
|
||||||
#recv = sock.recvline()
|
#recv = sock.recvline()
|
||||||
|
|
||||||
# good data
|
# good data
|
||||||
if re.match('^(:|NOTICE|ERROR)', recv, re.IGNORECASE):
|
if re.match('^(:|NOTICE|ERROR)', recv, re.IGNORECASE):
|
||||||
duration = (time.time() - duration)
|
duration = (time.time() - duration)
|
||||||
|
|
||||||
if geolite and not self.country or self.country == 'unknown' or self.country == 'N/A':
|
if geolite and not self.country or self.country == 'unknown' or self.country == 'N/A':
|
||||||
match = geolite2.lookup(self.proxy.split(':')[0])
|
match = geolite2.lookup(self.proxy.split(':')[0])
|
||||||
if match is not None: self.country = match.country
|
if match is not None: self.country = match.country
|
||||||
else: self.country = 'N/A'
|
else: self.country = 'N/A'
|
||||||
|
|
||||||
self.proto = proto
|
self.proto = proto
|
||||||
self.failcount = 0
|
self.failcount = 0
|
||||||
self.success_count = self.success_count + 1
|
self.success_count = self.success_count + 1
|
||||||
self.total_duration += int(duration*1000)
|
self.total_duration += int(duration*1000)
|
||||||
torstats = "" if len(config.torhosts)==1 else ' tor: %s;'%tor
|
torstats = "" if len(config.torhosts)==1 else ' tor: %s;'%tor
|
||||||
recvstats = "".join([x if x in string.printable and ord(x) > 32 else '.' for x in recv])
|
recvstats = "".join([x if x in string.printable and ord(x) > 32 else '.' for x in recv])
|
||||||
_log('%s://%s (%s) d: %.2f sec(s);%s srv: %s; recv: %s' % (proto, self.proxy, self.country, duration, torstats, srv, recvstats), 'xxxxx')
|
_log('%s://%s (%s) d: %.2f sec(s);%s srv: %s; recv: %s' % (proto, self.proxy, self.country, duration, torstats, srv, recvstats), 'xxxxx')
|
||||||
else:
|
else:
|
||||||
self.failcount += 1
|
self.failcount += 1
|
||||||
except KeyboardInterrupt as e:
|
except KeyboardInterrupt as e:
|
||||||
raise e
|
raise e
|
||||||
except rocksock.RocksockException as e:
|
except rocksock.RocksockException as e:
|
||||||
self.failcount += 1
|
self.failcount += 1
|
||||||
finally:
|
finally:
|
||||||
sock.disconnect()
|
sock.disconnect()
|
||||||
|
|
||||||
|
|
||||||
class WorkerThread():
|
class WorkerThread():
|
||||||
def __init__ (self, id):
|
def __init__ (self, id):
|
||||||
self.id = id
|
self.id = id
|
||||||
self.done = threading.Event()
|
self.done = threading.Event()
|
||||||
self.thread = None
|
self.thread = None
|
||||||
self.workqueue = []
|
self.workqueue = []
|
||||||
self.workdone = []
|
self.workdone = []
|
||||||
self.lock = threading.Lock()
|
self.lock = threading.Lock()
|
||||||
def stop(self):
|
def stop(self):
|
||||||
self.done.set()
|
self.done.set()
|
||||||
def term(self):
|
def term(self):
|
||||||
if self.thread: self.thread.join()
|
if self.thread: self.thread.join()
|
||||||
def add_jobs(self, jobs):
|
def add_jobs(self, jobs):
|
||||||
with self.lock:
|
with self.lock:
|
||||||
self.workqueue.extend(jobs)
|
self.workqueue.extend(jobs)
|
||||||
def return_jobs(self):
|
def return_jobs(self):
|
||||||
with self.lock:
|
with self.lock:
|
||||||
jobs = self.workqueue
|
jobs = self.workqueue
|
||||||
self.workqueue = []
|
self.workqueue = []
|
||||||
return jobs
|
return jobs
|
||||||
def jobcount(self):
|
def jobcount(self):
|
||||||
return len(self.workqueue)
|
return len(self.workqueue)
|
||||||
def collect(self):
|
def collect(self):
|
||||||
wd = copy.copy(self.workdone)
|
wd = copy.copy(self.workdone)
|
||||||
self.workdone = []
|
self.workdone = []
|
||||||
return wd
|
return wd
|
||||||
def start_thread(self):
|
def start_thread(self):
|
||||||
self.thread = threading.Thread(target=self.workloop)
|
self.thread = threading.Thread(target=self.workloop)
|
||||||
self.thread.start()
|
self.thread.start()
|
||||||
def pop_if_possible(self):
|
def pop_if_possible(self):
|
||||||
with self.lock:
|
with self.lock:
|
||||||
if len(self.workqueue):
|
if len(self.workqueue):
|
||||||
job = self.workqueue.pop()
|
job = self.workqueue.pop()
|
||||||
else:
|
else:
|
||||||
job = None
|
job = None
|
||||||
return job
|
return job
|
||||||
def workloop(self):
|
def workloop(self):
|
||||||
success_count = 0
|
success_count = 0
|
||||||
job_count = 0
|
job_count = 0
|
||||||
duration_total = 0
|
duration_total = 0
|
||||||
duration_success_total = 0
|
duration_success_total = 0
|
||||||
while True:
|
while True:
|
||||||
job = self.pop_if_possible()
|
job = self.pop_if_possible()
|
||||||
if job:
|
if job:
|
||||||
nao = time.time()
|
nao = time.time()
|
||||||
job.run()
|
job.run()
|
||||||
spent = time.time() - nao
|
spent = time.time() - nao
|
||||||
if job.failcount == 0:
|
if job.failcount == 0:
|
||||||
duration_success_total += spent
|
duration_success_total += spent
|
||||||
success_count += 1
|
success_count += 1
|
||||||
job_count += 1
|
job_count += 1
|
||||||
duration_total += spent
|
duration_total += spent
|
||||||
self.workdone.append(job)
|
self.workdone.append(job)
|
||||||
elif not self.thread:
|
elif not self.thread:
|
||||||
break
|
break
|
||||||
if self.done.is_set(): break
|
if self.done.is_set(): break
|
||||||
time.sleep( random.random() / 100)
|
time.sleep( random.random() / 100)
|
||||||
if self.thread:
|
if self.thread:
|
||||||
succ_rate = try_div(success_count, job_count)*100
|
succ_rate = try_div(success_count, job_count)*100
|
||||||
avg_succ_t = try_div(duration_success_total, success_count)
|
avg_succ_t = try_div(duration_success_total, success_count)
|
||||||
avg_fail_t = try_div(duration_total-duration_success_total, job_count-success_count)
|
avg_fail_t = try_div(duration_total-duration_success_total, job_count-success_count)
|
||||||
avg_t = try_div(duration_total, job_count)
|
avg_t = try_div(duration_total, job_count)
|
||||||
_log("terminated, %d/%d (%.2f%%), avg.time S/F/T %.2f, %.2f, %.2f" \
|
_log("terminated, %d/%d (%.2f%%), avg.time S/F/T %.2f, %.2f, %.2f" \
|
||||||
% (success_count, job_count, succ_rate, avg_succ_t, avg_fail_t, avg_t) \
|
% (success_count, job_count, succ_rate, avg_succ_t, avg_fail_t, avg_t) \
|
||||||
, self.id)
|
, self.id)
|
||||||
|
|
||||||
class Proxywatchd():
|
class Proxywatchd():
|
||||||
|
|
||||||
def stop(self):
|
def stop(self):
|
||||||
_log('halting... (%d thread(s))' % len([item for item in self.threads if True]), 'watchd')
|
_log('halting... (%d thread(s))' % len([item for item in self.threads if True]), 'watchd')
|
||||||
self.stopping.set()
|
self.stopping.set()
|
||||||
|
|
||||||
def _cleanup(self):
|
def _cleanup(self):
|
||||||
for wt in self.threads:
|
for wt in self.threads:
|
||||||
wt.stop()
|
wt.stop()
|
||||||
for wt in self.threads:
|
for wt in self.threads:
|
||||||
wt.term()
|
wt.term()
|
||||||
self.collect_work()
|
self.collect_work()
|
||||||
self.submit_collected()
|
self.submit_collected()
|
||||||
self.stopped.set()
|
self.stopped.set()
|
||||||
|
|
||||||
def finish(self):
|
def finish(self):
|
||||||
if not self.in_background: self._cleanup()
|
if not self.in_background: self._cleanup()
|
||||||
while not self.stopped.is_set(): time.sleep(0.1)
|
while not self.stopped.is_set(): time.sleep(0.1)
|
||||||
success_rate = try_div(self.totals['success'], self.totals['submitted']) * 100
|
success_rate = try_div(self.totals['success'], self.totals['submitted']) * 100
|
||||||
_log("total results: %d/%d (%.2f%%)"%(self.totals['success'], self.totals['submitted'], success_rate), "watchd")
|
_log("total results: %d/%d (%.2f%%)"%(self.totals['success'], self.totals['submitted'], success_rate), "watchd")
|
||||||
|
|
||||||
def _prep_db(self):
|
def _prep_db(self):
|
||||||
self.mysqlite = mysqlite.mysqlite(config.watchd.database, str)
|
self.mysqlite = mysqlite.mysqlite(config.watchd.database, str)
|
||||||
def _close_db(self):
|
def _close_db(self):
|
||||||
if self.mysqlite:
|
if self.mysqlite:
|
||||||
self.mysqlite.close()
|
self.mysqlite.close()
|
||||||
self.mysqlite = None
|
self.mysqlite = None
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
config.load()
|
config.load()
|
||||||
self.in_background = False
|
self.in_background = False
|
||||||
self.threads = []
|
self.threads = []
|
||||||
self.stopping = threading.Event()
|
self.stopping = threading.Event()
|
||||||
self.stopped = threading.Event()
|
self.stopped = threading.Event()
|
||||||
|
|
||||||
# create table if needed
|
# create table if needed
|
||||||
self._prep_db()
|
self._prep_db()
|
||||||
self.mysqlite.execute('CREATE TABLE IF NOT EXISTS proxylist (proxy BLOB, country BLOB, added INT, failed INT, tested INT, source BLOB, dronebl INT, proto TEXT, success_count INT, total_duration INT)')
|
self.mysqlite.execute('CREATE TABLE IF NOT EXISTS proxylist (proxy BLOB, country BLOB, added INT, failed INT, tested INT, source BLOB, dronebl INT, proto TEXT, success_count INT, total_duration INT)')
|
||||||
self.mysqlite.commit()
|
self.mysqlite.commit()
|
||||||
self._close_db()
|
self._close_db()
|
||||||
|
|
||||||
self.submit_after = config.watchd.submit_after # number of collected jobs before writing db
|
self.submit_after = config.watchd.submit_after # number of collected jobs before writing db
|
||||||
self.jobs = []
|
self.jobs = []
|
||||||
self.collected = []
|
self.collected = []
|
||||||
self.totals = {
|
self.totals = {
|
||||||
'submitted':0,
|
'submitted':0,
|
||||||
'success':0,
|
'success':0,
|
||||||
}
|
}
|
||||||
|
|
||||||
def fetch_rows(self):
|
def fetch_rows(self):
|
||||||
self.isoldies = False
|
self.isoldies = False
|
||||||
q = 'SELECT proxy,proto,failed,success_count,total_duration,country FROM proxylist WHERE failed >= ? and failed < ? and (tested + ? + (failed * ?)) < ? ORDER BY RANDOM()'
|
q = 'SELECT proxy,proto,failed,success_count,total_duration,country FROM proxylist WHERE failed >= ? and failed < ? and (tested + ? + (failed * ?)) < ? ORDER BY RANDOM()'
|
||||||
rows = self.mysqlite.execute(q, (0, config.watchd.max_fail, config.watchd.checktime, config.watchd.perfail_checktime, time.time())).fetchall()
|
rows = self.mysqlite.execute(q, (0, config.watchd.max_fail, config.watchd.checktime, config.watchd.perfail_checktime, time.time())).fetchall()
|
||||||
# check oldies ?
|
# check oldies ?
|
||||||
if len(rows) < config.watchd.threads:
|
if len(rows) < config.watchd.threads:
|
||||||
rows = []
|
rows = []
|
||||||
if config.watchd.oldies:
|
if config.watchd.oldies:
|
||||||
self.isoldies = True
|
self.isoldies = True
|
||||||
## disable tor safeguard for old proxies
|
## disable tor safeguard for old proxies
|
||||||
if self.tor_safeguard: self.tor_safeguard = False
|
if self.tor_safeguard: self.tor_safeguard = False
|
||||||
rows = self.mysqlite.execute(q, (config.watchd.max_fail, config.watchd.max_fail + round( config.watchd.max_fail/2), config.watchd.checktime, config.watchd.oldies_checktime, time.time())).fetchall()
|
rows = self.mysqlite.execute(q, (config.watchd.max_fail, config.watchd.max_fail + round( config.watchd.max_fail/2), config.watchd.checktime, config.watchd.oldies_checktime, time.time())).fetchall()
|
||||||
return rows
|
return rows
|
||||||
|
|
||||||
def prepare_jobs(self):
|
def prepare_jobs(self):
|
||||||
self._prep_db()
|
self._prep_db()
|
||||||
## enable tor safeguard by default
|
## enable tor safeguard by default
|
||||||
self.tor_safeguard = config.watchd.tor_safeguard
|
self.tor_safeguard = config.watchd.tor_safeguard
|
||||||
rows = self.fetch_rows()
|
rows = self.fetch_rows()
|
||||||
#print('preparing jobbs, oldies: %s' % str(self.isoldies))
|
#print('preparing jobbs, oldies: %s' % str(self.isoldies))
|
||||||
for row in rows:
|
for row in rows:
|
||||||
job = WorkerJob(row[0], row[1], row[2], row[3], row[4], row[5], self.isoldies)
|
job = WorkerJob(row[0], row[1], row[2], row[3], row[4], row[5], self.isoldies)
|
||||||
self.jobs.append(job)
|
self.jobs.append(job)
|
||||||
self._close_db()
|
self._close_db()
|
||||||
|
|
||||||
def collect_work(self):
|
def collect_work(self):
|
||||||
for wt in self.threads:
|
for wt in self.threads:
|
||||||
self.collected.extend(wt.collect())
|
self.collected.extend(wt.collect())
|
||||||
|
|
||||||
def collect_unfinished(self):
|
def collect_unfinished(self):
|
||||||
for wt in self.threads:
|
for wt in self.threads:
|
||||||
jobs = wt.return_jobs()
|
jobs = wt.return_jobs()
|
||||||
self.jobs.extend(jobs)
|
self.jobs.extend(jobs)
|
||||||
if len(self.jobs):
|
if len(self.jobs):
|
||||||
_log("collected %d unfinished jobs"%len(self.jobs), "watchd")
|
_log("collected %d unfinished jobs"%len(self.jobs), "watchd")
|
||||||
|
|
||||||
def submit_collected(self):
|
def submit_collected(self):
|
||||||
if len(self.collected) == 0: return True
|
if len(self.collected) == 0: return True
|
||||||
sc = 0
|
sc = 0
|
||||||
args = []
|
args = []
|
||||||
for job in self.collected:
|
for job in self.collected:
|
||||||
if job.failcount == 0: sc += 1
|
if job.failcount == 0: sc += 1
|
||||||
args.append( (job.failcount, job.checktime, 1, job.country, job.proto, job.success_count, job.total_duration, job.proxy) )
|
args.append( (job.failcount, job.checktime, 1, job.country, job.proto, job.success_count, job.total_duration, job.proxy) )
|
||||||
|
|
||||||
success_rate = (float(sc) / len(self.collected)) * 100
|
success_rate = (float(sc) / len(self.collected)) * 100
|
||||||
ret = True
|
ret = True
|
||||||
if len(self.collected) >= 100 and success_rate <= config.watchd.outage_threshold and self.tor_safeguard:
|
if len(self.collected) >= 100 and success_rate <= config.watchd.outage_threshold and self.tor_safeguard:
|
||||||
_log("WATCHD %.2f%% SUCCESS RATE - tor circuit blocked? won't submit fails"%success_rate, "ERROR")
|
_log("WATCHD %.2f%% SUCCESS RATE - tor circuit blocked? won't submit fails"%success_rate, "ERROR")
|
||||||
if sc == 0: return False
|
if sc == 0: return False
|
||||||
args = []
|
args = []
|
||||||
for job in self.collected:
|
for job in self.collected:
|
||||||
if job.failcount == 0:
|
if job.failcount == 0:
|
||||||
args.append( (job.failcount, job.checktime, 1, job.country, job.proto, job.success_count, job.total_duration, job.proxy) )
|
args.append( (job.failcount, job.checktime, 1, job.country, job.proto, job.success_count, job.total_duration, job.proxy) )
|
||||||
ret = False
|
ret = False
|
||||||
|
|
||||||
_log("updating %d DB entries (success rate: %.2f%%)"%(len(self.collected), success_rate), 'watchd')
|
_log("updating %d DB entries (success rate: %.2f%%)"%(len(self.collected), success_rate), 'watchd')
|
||||||
self._prep_db()
|
self._prep_db()
|
||||||
query = 'UPDATE proxylist SET failed=?,tested=?,dronebl=?,country=?,proto=?,success_count=?,total_duration=? WHERE proxy=?'
|
query = 'UPDATE proxylist SET failed=?,tested=?,dronebl=?,country=?,proto=?,success_count=?,total_duration=? WHERE proxy=?'
|
||||||
self.mysqlite.executemany(query, args)
|
self.mysqlite.executemany(query, args)
|
||||||
self.mysqlite.commit()
|
self.mysqlite.commit()
|
||||||
self._close_db()
|
self._close_db()
|
||||||
self.collected = []
|
self.collected = []
|
||||||
self.totals['submitted'] += len(args)
|
self.totals['submitted'] += len(args)
|
||||||
self.totals['success'] += sc
|
self.totals['success'] += sc
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
def start(self):
|
def start(self):
|
||||||
if config.watchd.threads == 1 and _run_standalone:
|
if config.watchd.threads == 1 and _run_standalone:
|
||||||
return self._run()
|
return self._run()
|
||||||
else:
|
else:
|
||||||
return self._run_background()
|
return self._run_background()
|
||||||
|
|
||||||
def run(self):
|
def run(self):
|
||||||
if self.in_background:
|
if self.in_background:
|
||||||
while 1: time.sleep(1)
|
while 1: time.sleep(1)
|
||||||
|
|
||||||
def _run_background(self):
|
def _run_background(self):
|
||||||
self.in_background = True
|
self.in_background = True
|
||||||
t = threading.Thread(target=self._run)
|
t = threading.Thread(target=self._run)
|
||||||
t.start()
|
t.start()
|
||||||
|
|
||||||
def _run(self):
|
def _run(self):
|
||||||
_log('starting...', 'watchd')
|
_log('starting...', 'watchd')
|
||||||
|
|
||||||
for i in range(config.watchd.threads):
|
for i in range(config.watchd.threads):
|
||||||
threadid = ''.join( [ random.choice(string.letters) for x in range(5) ] )
|
threadid = ''.join( [ random.choice(string.letters) for x in range(5) ] )
|
||||||
wt = WorkerThread(threadid)
|
wt = WorkerThread(threadid)
|
||||||
if self.in_background:
|
if self.in_background:
|
||||||
wt.start_thread()
|
wt.start_thread()
|
||||||
self.threads.append(wt)
|
self.threads.append(wt)
|
||||||
#time.sleep( (random.random()/100) )
|
#time.sleep( (random.random()/100) )
|
||||||
time.sleep( (random.random()/10) )
|
time.sleep( (random.random()/10) )
|
||||||
|
|
||||||
sleeptime = 0
|
sleeptime = 0
|
||||||
while True:
|
while True:
|
||||||
|
|
||||||
if self.stopping.is_set():
|
if self.stopping.is_set():
|
||||||
print('stopping is_set')
|
print('stopping is_set')
|
||||||
if self.in_background: self._cleanup()
|
if self.in_background: self._cleanup()
|
||||||
break
|
break
|
||||||
|
|
||||||
if sleeptime == 0:
|
if sleeptime == 0:
|
||||||
sleeptime = 1
|
sleeptime = 1
|
||||||
else:
|
else:
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
sleeptime -= 1
|
sleeptime -= 1
|
||||||
continue
|
continue
|
||||||
|
|
||||||
if self.threads[random.choice(xrange(len(self.threads)))].jobcount() == 0:
|
if self.threads[random.choice(xrange(len(self.threads)))].jobcount() == 0:
|
||||||
self.collect_unfinished()
|
self.collect_unfinished()
|
||||||
if not len(self.jobs):
|
if not len(self.jobs):
|
||||||
self.collect_work()
|
self.collect_work()
|
||||||
if not self.submit_collected() and self.tor_safeguard:
|
if not self.submit_collected() and self.tor_safeguard:
|
||||||
_log("zzZzZzzZ sleeping 1 minute(s) due to tor issues - consider decreasing thread number!", "watchd")
|
_log("zzZzZzzZ sleeping 1 minute(s) due to tor issues - consider decreasing thread number!", "watchd")
|
||||||
self.collect_unfinished()
|
self.collect_unfinished()
|
||||||
sleeptime = 1*60
|
sleeptime = 1*60
|
||||||
else:
|
else:
|
||||||
self.prepare_jobs()
|
self.prepare_jobs()
|
||||||
else:
|
else:
|
||||||
if len(self.jobs) < len(self.threads):
|
if len(self.jobs) < len(self.threads):
|
||||||
# allow threads enough time to consume the jobs
|
# allow threads enough time to consume the jobs
|
||||||
sleeptime = 10
|
sleeptime = 10
|
||||||
#if len(self.jobs) >= len(self.threads):
|
#if len(self.jobs) >= len(self.threads):
|
||||||
if len(self.jobs):
|
if len(self.jobs):
|
||||||
_log("handing out %d jobs to %d thread(s)"% (len(self.jobs), len(self.threads)), 'watchd')
|
_log("handing out %d jobs to %d thread(s)"% (len(self.jobs), len(self.threads)), 'watchd')
|
||||||
jpt = len(self.jobs)/len(self.threads)
|
jpt = len(self.jobs)/len(self.threads)
|
||||||
if len(self.jobs)/float(len(self.threads)) - jpt > 0.0: jpt += 1
|
if len(self.jobs)/float(len(self.threads)) - jpt > 0.0: jpt += 1
|
||||||
for tid in xrange(len(self.threads)):
|
for tid in xrange(len(self.threads)):
|
||||||
self.threads[tid].add_jobs(self.jobs[tid*jpt:tid*jpt+jpt])
|
self.threads[tid].add_jobs(self.jobs[tid*jpt:tid*jpt+jpt])
|
||||||
self.jobs = []
|
self.jobs = []
|
||||||
|
|
||||||
if not self.in_background: # single_thread scenario
|
if not self.in_background: # single_thread scenario
|
||||||
self.threads[0].workloop()
|
self.threads[0].workloop()
|
||||||
|
|
||||||
self.collect_work()
|
self.collect_work()
|
||||||
|
|
||||||
if len(self.collected) > self.submit_after:
|
if len(self.collected) > self.submit_after:
|
||||||
if not self.submit_collected() and self.tor_safeguard:
|
if not self.submit_collected() and self.tor_safeguard:
|
||||||
_log("zzZzZzzZ sleeping 1 minute(s) due to tor issues - consider decreasing thread number!", "watchd")
|
_log("zzZzZzzZ sleeping 1 minute(s) due to tor issues - consider decreasing thread number!", "watchd")
|
||||||
self.collect_unfinished()
|
self.collect_unfinished()
|
||||||
sleeptime = 1*60
|
sleeptime = 1*60
|
||||||
|
|
||||||
time.sleep(1)
|
time.sleep(1)
|
||||||
sleeptime -= 1
|
sleeptime -= 1
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
_run_standalone = True
|
_run_standalone = True
|
||||||
|
|
||||||
config.load()
|
config.load()
|
||||||
|
|
||||||
w = Proxywatchd()
|
w = Proxywatchd()
|
||||||
try:
|
try:
|
||||||
w.start()
|
w.start()
|
||||||
w.run()
|
w.run()
|
||||||
except KeyboardInterrupt as e:
|
except KeyboardInterrupt as e:
|
||||||
pass
|
pass
|
||||||
finally:
|
finally:
|
||||||
w.stop()
|
w.stop()
|
||||||
w.finish()
|
w.finish()
|
||||||
|
|||||||
118
scraper.py
118
scraper.py
@@ -13,83 +13,83 @@ import sys
|
|||||||
config = Config()
|
config = Config()
|
||||||
|
|
||||||
with open('searx.instances') as h:
|
with open('searx.instances') as h:
|
||||||
searx_instances = [ line.strip() for line in h.readlines() if line.lower().startswith('http') ]
|
searx_instances = [ line.strip() for line in h.readlines() if line.lower().startswith('http') ]
|
||||||
print(searx_instances)
|
print(searx_instances)
|
||||||
|
|
||||||
def proxyfind(sqlite = None, urignore=None):
|
def proxyfind(sqlite = None, urignore=None):
|
||||||
search = ''
|
search = ''
|
||||||
random.shuffle(searx_instances)
|
random.shuffle(searx_instances)
|
||||||
|
|
||||||
## search by working proxy
|
## search by working proxy
|
||||||
if 'p' in config.scraper.query:
|
if 'p' in config.scraper.query:
|
||||||
proxydb = mysqlite.mysqlite(config.watchd.database,str)
|
proxydb = mysqlite.mysqlite(config.watchd.database,str)
|
||||||
proxies = [ i[0] for i in proxydb.execute('SELECT proxy FROM proxylist WHERE failed=0 ORDER BY RANDOM() LIMIT 10').fetchall() ]
|
proxies = [ i[0] for i in proxydb.execute('SELECT proxy FROM proxylist WHERE failed=0 ORDER BY RANDOM() LIMIT 10').fetchall() ]
|
||||||
if len(proxies) and random.random() < random.random():
|
if len(proxies) and random.random() < random.random():
|
||||||
search = ' '.join( random.sample(proxies, random.randint(1,2)))
|
search = ' '.join( random.sample(proxies, random.randint(1,2)))
|
||||||
|
|
||||||
## search by relative url
|
## search by relative url
|
||||||
if 'w' in config.scraper.query and not len(search) or random.random() < random.random():
|
if 'w' in config.scraper.query and not len(search) or random.random() < random.random():
|
||||||
if not sqlite: sqlite = mysqlite.mysqlite(config.ppf.database,str)
|
if not sqlite: sqlite = mysqlite.mysqlite(config.ppf.database,str)
|
||||||
uris = [ i[0] for i in sqlite.execute('SELECT url FROM uris WHERE error=0 and url not like "%github%" ORDER BY RANDOM() LIMIT 10').fetchall() ]
|
uris = [ i[0] for i in sqlite.execute('SELECT url FROM uris WHERE error=0 and url not like "%github%" ORDER BY RANDOM() LIMIT 10').fetchall() ]
|
||||||
if len(uris) > 0 and random.random() < random.random():
|
if len(uris) > 0 and random.random() < random.random():
|
||||||
if len(search): search = '%s OR ' % search
|
if len(search): search = '%s OR ' % search
|
||||||
search = search + 'site:%s' % random.choice(uris).split('/')[2]
|
search = search + 'site:%s' % random.choice(uris).split('/')[2]
|
||||||
|
|
||||||
## build string
|
## build string
|
||||||
if 's' in config.scraper.query and not len(search) or random.random() < random.random():
|
if 's' in config.scraper.query and not len(search) or random.random() < random.random():
|
||||||
if len(search): search = '%s OR ' % search
|
if len(search): search = '%s OR ' % search
|
||||||
search = search + random.choice(search_terms)
|
search = search + random.choice(search_terms)
|
||||||
|
|
||||||
if not len(search): return
|
if not len(search): return
|
||||||
#search_args = [ 'category=general', 'time_range=%s' % random.choice(['day','week','month','year']), 'q=%s' % urllib.quote_plus(search) ]
|
#search_args = [ 'category=general', 'time_range=%s' % random.choice(['day','week','month','year']), 'q=%s' % urllib.quote_plus(search) ]
|
||||||
search_args = [ 'category=general', 'time_range=%s' % random.choice(['day','week']), 'q=%s' % urllib.quote_plus(search) ]
|
search_args = [ 'category=general', 'time_range=%s' % random.choice(['day','week']), 'q=%s' % urllib.quote_plus(search) ]
|
||||||
random.shuffle(search_args)
|
random.shuffle(search_args)
|
||||||
search_arg = '&'.join(search_args)
|
search_arg = '&'.join(search_args)
|
||||||
|
|
||||||
if config.scraper.debug:
|
if config.scraper.debug:
|
||||||
print('search_arg: %s' % search_arg)
|
print('search_arg: %s' % search_arg)
|
||||||
|
|
||||||
for srx in searx_instances:
|
for srx in searx_instances:
|
||||||
x = 0
|
x = 0
|
||||||
while 1:
|
while 1:
|
||||||
urls = []
|
urls = []
|
||||||
if x > 0: content = fetch.fetch_contents('%s/?%s&pageno=%d' % (srx,search_arg,x))
|
if x > 0: content = fetch.fetch_contents('%s/?%s&pageno=%d' % (srx,search_arg,x))
|
||||||
else: content = fetch.fetch_contents('%s/?%s' % (srx,search_arg))
|
else: content = fetch.fetch_contents('%s/?%s' % (srx,search_arg))
|
||||||
if content: urls = fetch.extract_urls(content, urls, urignore)
|
if content: urls = fetch.extract_urls(content, urls, urignore)
|
||||||
|
|
||||||
if not len(urls): break
|
if not len(urls): break
|
||||||
dbs.insert_urls(urls, '%s/?%s (pageno: %d)' % (srx.split('/')[2],search_arg,x) , sqlite)
|
dbs.insert_urls(urls, '%s/?%s (pageno: %d)' % (srx.split('/')[2],search_arg,x) , sqlite)
|
||||||
x = x + 1
|
x = x + 1
|
||||||
|
|
||||||
|
|
||||||
def load_urignore():
|
def load_urignore():
|
||||||
## load bad terms
|
## load bad terms
|
||||||
with open('urignore.txt', 'r') as f:
|
with open('urignore.txt', 'r') as f:
|
||||||
urignore = [ i.strip() for i in f.read().split('\n') if len(i.strip()) ]
|
urignore = [ i.strip() for i in f.read().split('\n') if len(i.strip()) ]
|
||||||
## add searx instances as bad terms (avoid loops)
|
## add searx instances as bad terms (avoid loops)
|
||||||
for i in searx_instances:
|
for i in searx_instances:
|
||||||
urignore.append(i.split('/')[2])
|
urignore.append(i.split('/')[2])
|
||||||
return urignore
|
return urignore
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
config.load()
|
config.load()
|
||||||
fetch.set_config(config)
|
fetch.set_config(config)
|
||||||
|
|
||||||
proxydb = mysqlite.mysqlite(config.watchd.database, str)
|
proxydb = mysqlite.mysqlite(config.watchd.database, str)
|
||||||
dbs.create_table_if_not_exists(proxydb, 'proxylist')
|
dbs.create_table_if_not_exists(proxydb, 'proxylist')
|
||||||
|
|
||||||
urldb = mysqlite.mysqlite(config.ppf.database, str)
|
urldb = mysqlite.mysqlite(config.ppf.database, str)
|
||||||
dbs.create_table_if_not_exists(urldb, 'uris')
|
dbs.create_table_if_not_exists(urldb, 'uris')
|
||||||
|
|
||||||
## load search terms
|
## load search terms
|
||||||
with open('search_terms.txt', 'r') as f:
|
with open('search_terms.txt', 'r') as f:
|
||||||
search_terms = [ i.strip() for i in f.read().split('\n') if len(i.strip()) ]
|
search_terms = [ i.strip() for i in f.read().split('\n') if len(i.strip()) ]
|
||||||
|
|
||||||
urignore = load_urignore()
|
urignore = load_urignore()
|
||||||
|
|
||||||
while True:
|
while True:
|
||||||
try: proxyfind(urldb, urignore)
|
try: proxyfind(urldb, urignore)
|
||||||
except KeyboardInterrupt: break
|
except KeyboardInterrupt: break
|
||||||
|
|
||||||
print '\r',
|
print '\r',
|
||||||
|
|||||||
Reference in New Issue
Block a user