387 lines
11 KiB
Python
Executable File
387 lines
11 KiB
Python
Executable File
#!/usr/bin/env python
|
|
|
|
import dbs
|
|
import time
|
|
import mysqlite
|
|
import proxywatchd
|
|
from misc import _log
|
|
from config import Config
|
|
import fetch
|
|
import sys
|
|
from bs4 import BeautifulSoup
|
|
import re
|
|
import threading
|
|
import random
|
|
|
|
config = Config()
|
|
_known_proxies = {}
|
|
|
|
def import_from_file(fn, sqlite):
|
|
with open(fn, 'r') as f:
|
|
urls = [ url for url in f.read().split('\n') if url != '' ]
|
|
cinc = 0
|
|
while True:
|
|
chunk = urls[cinc:cinc+200]
|
|
if len(chunk): dbs.insert_urls(chunk, 'import.txt', urldb)
|
|
else: break
|
|
cinc = cinc + 200
|
|
|
|
|
|
def get_content_type(url):
|
|
hdr = fetch.fetch_contents(url, head=True)
|
|
|
|
for h in hdr.split('\n'):
|
|
if h.lower().startswith('content-type: '): return h.lower().split(':')[1].strip()
|
|
|
|
return ''
|
|
|
|
def is_good_content_type(string):
|
|
allowed_ct = [ 'text/html', 'text/plain', 'atom+xml' ]
|
|
for ct in allowed_ct:
|
|
if ct.lower() in string.lower(): return True
|
|
return False
|
|
|
|
def proxyleech(proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type):
|
|
if not content_type: content_type = get_content_type(url)
|
|
|
|
if is_good_content_type(content_type):
|
|
try: content = fetch.fetch_contents(url)
|
|
except KeyboardInterrupt as e: raise e
|
|
except: content = ''
|
|
else:
|
|
content = ''
|
|
|
|
unique_count, new = fetch.extract_proxies(content, proxydb)
|
|
|
|
if retrievals == 0: # new site
|
|
if content != '' and unique_count == 0: # site works but has zero proxy addresses
|
|
error = 99999
|
|
else:
|
|
if len(new) == 0:
|
|
stale_count += 1
|
|
else:
|
|
stale_count = 0
|
|
if content == '':
|
|
error += 1
|
|
else:
|
|
retrievals += 1
|
|
error = 0
|
|
if unique_count:
|
|
extract_urls(content, url)
|
|
|
|
urldb.execute('UPDATE uris SET error=?,stale_count=?,check_time=?,retrievals=?,proxies_added=?,content_type=? where url=?', (error, stale_count, int(time.time()), retrievals, proxies_added+len(new), content_type, url))
|
|
urldb.commit()
|
|
|
|
if not len(new): return
|
|
|
|
dbs.insert_proxies(proxydb, new, url)
|
|
|
|
|
|
def is_bad_url(uri, domain=None, samedomain=False):
|
|
# if uri needs to be from same domain and domains missmatch
|
|
if samedomain and str(uri.split('/')[2]).lower() != str(domain).lower():
|
|
return True
|
|
for u in urignore:
|
|
if re.findall(u, uri): return True
|
|
return False
|
|
|
|
def extract_urls(html, url):
|
|
mytime = int(time.time())
|
|
proto = url.split(':')[0]
|
|
domain = url.split('/')[2]
|
|
urls = []
|
|
|
|
soup = BeautifulSoup(html, features='lxml')
|
|
|
|
for a in soup.find_all('a', href=True):
|
|
item = a['href'].encode('utf-8') if isinstance(a['href'], unicode) else a['href']
|
|
item = item.strip()
|
|
|
|
if item.startswith('www.'):
|
|
item = 'http://%s' % item
|
|
elif not item.startswith('http'):
|
|
if not item.startswith('/'): item = '/%s' % item
|
|
item = '%s://%s%s' % (proto,domain,item)
|
|
|
|
elif is_bad_url(item, domain=domain, samedomain=config.ppf.extract_samedomain):
|
|
continue
|
|
if not item in urls: urls.append(item)
|
|
|
|
if len(urls): dbs.insert_urls(urls, url, urldb) #insert_if_not_exists(urls)
|
|
|
|
def import_proxies_from_file(proxydb, fn):
|
|
content = open(fn, 'r').read()
|
|
unique_count, new = fetch.extract_proxies(content, proxydb)
|
|
if len(new):
|
|
dbs.insert_proxies(proxydb, new, fn)
|
|
return 0
|
|
return 1
|
|
|
|
def serve_loop(hs, done):
|
|
client_threads = []
|
|
while not done.is_set():
|
|
c = hs.wait_client()
|
|
|
|
evt_done = threading.Event()
|
|
cthread = threading.Thread(target=httpsrv_client_thread, args=(c,evt_done))
|
|
cthread.daemon = True
|
|
cthread.start()
|
|
|
|
ctrm = []
|
|
for ct, ct_done in client_threads:
|
|
if ct_done.is_set():
|
|
ctrm.append((ct,ct_done))
|
|
ct.join()
|
|
|
|
if len(ctrm):
|
|
client_threads = [ x for x in client_threads if not x in ctrm ]
|
|
|
|
client_threads.append((cthread, evt_done))
|
|
|
|
def forbidden_page():
|
|
return (
|
|
'<!DOCTYPE html>\n'
|
|
' <head>\n'
|
|
' <style>div.e{position:fixed;top:25%;bottom:25%;left:25%;right:25%;font-size:150px;text-align:center;}</style>\n'
|
|
' <title>Forbidden</title>\n'
|
|
' </head>\n'
|
|
' <body>\n'
|
|
' <div class="e">🖕</div>\n'
|
|
' </body>\n'
|
|
'</html>')
|
|
|
|
def httpsrv_client_thread(c, evt_done):
|
|
req = c.read_request()
|
|
if req is None: pass
|
|
elif len(watchlist) == 0:
|
|
c.redirect('/config.html')
|
|
elif os.path.isdir(req['url'][1:]):
|
|
c.send(403,'Forbidden', forbidden_page())
|
|
elif req['url'] == '/':
|
|
c.redirect('/index.html')
|
|
elif req['url'].startswith('/index.html'):
|
|
variables = variables_from_request(req)
|
|
r, redir = render_site(variables)
|
|
if redir is not "":
|
|
c.redirect(redir)
|
|
else:
|
|
if r == '': r = render_empty(variables=variables)
|
|
c.send(200, "OK", r)
|
|
elif not '..' in req['url'] and file_exists(os.getcwd() + req['url']):
|
|
c.serve_file(os.getcwd() + req['url'])
|
|
elif req['url'] == '/robots.txt':
|
|
c.send(200, "OK", "User-agent: *\nDisallow: /")
|
|
|
|
elif req['url'].startswith('/config.html'):
|
|
if args.config > 0:
|
|
variables=variables_from_request(req)
|
|
r, redir = configpage(req,variables)
|
|
else:
|
|
redir = '/index.html'
|
|
if redir is not "":
|
|
c.redirect(redir)
|
|
else:
|
|
if r == '': r = render_empty(variables=variables)
|
|
c.send(200, "OK", r)
|
|
|
|
else:
|
|
c.send(404, "not exist", "the reqested file not exist!!!1")
|
|
c.disconnect()
|
|
evt_done.set()
|
|
|
|
def start_server(ip, port):
|
|
done = threading.Event()
|
|
from httpsrv import HttpSrv
|
|
hs = HttpSrv(ip, port)
|
|
try:
|
|
hs.setup()
|
|
except socket.error as e:
|
|
if e.errno == errno.EADDRINUSE:
|
|
sys.stderr.write((
|
|
"ERROR: server socket address in use\n"
|
|
"wait a couple seconds and try again.\n"
|
|
"in case you're in pdb, you need to quit it\n"))
|
|
sys.exit(1)
|
|
else:
|
|
raise e
|
|
|
|
t = threading.Thread(target=serve_loop, args=(hs, done))
|
|
t.daemon = True
|
|
t.start()
|
|
return t, done
|
|
|
|
def extract_proxies(content):
|
|
matches = re.findall(r'([0-9]+(?:\.[0-9]+){3}:[0-9]{2,5})[\D$]', fetch.cleanhtml(content))
|
|
uniques_dict = {}
|
|
for p in matches:
|
|
uniques_dict[p] = True
|
|
|
|
uniques = []
|
|
for p in uniques_dict.keys():
|
|
if fetch.is_usable_proxy(p): uniques.append(p)
|
|
|
|
return uniques
|
|
|
|
|
|
class Leechered(threading.Thread):
|
|
#def __init__(self, proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type):
|
|
def __init__(self, url, stale_count, error, retrievals, proxies_added, content_type):
|
|
self.status = 'nok'
|
|
self.proxylist = []
|
|
self.running = True
|
|
self.url = url
|
|
self.stale_count = stale_count
|
|
self.error = error
|
|
self.retrievals = retrievals
|
|
self.proxies_added = proxies_added
|
|
self.content_type = content_type
|
|
self.execute = ''
|
|
threading.Thread.__init__(self)
|
|
|
|
def retrieve(self):
|
|
return self.url, self.proxylist, self.stale_count, self.error, self.retrievals, self.content_type, self.proxies_added, self.execute
|
|
def status(self):
|
|
return self.status
|
|
|
|
def run(self):
|
|
self.status = 'nok'
|
|
|
|
if not self.content_type: self.content_type = get_content_type(self.url)
|
|
|
|
if is_good_content_type(self.content_type):
|
|
try: content = fetch.fetch_contents(self.url)
|
|
except KeyboardInterrupt as e: raise e
|
|
except: content = ''
|
|
else:
|
|
content = ''
|
|
|
|
unique = extract_proxies(content)
|
|
self.proxylist = [ proxy for proxy in unique if not proxy in _known_proxies ]
|
|
proxy_count = len(self.proxylist)
|
|
|
|
if self.retrievals == 0: # new site
|
|
if content != '' and len(self.proxylist) == 0: # site works but has zero proxy addresses
|
|
self.error += 1
|
|
self.stale_count += 1
|
|
elif proxy_count > 0:
|
|
self.error = 0
|
|
self.stale_count = 0
|
|
else:
|
|
self.error += 2
|
|
self.stale_count += 2
|
|
else: # not a new site
|
|
# proxylist is empty
|
|
if proxy_count == 0:
|
|
self.stale_count += 1
|
|
# proxylist is not empty: site is working
|
|
else:
|
|
self.stale_count = 0
|
|
self.error = 0
|
|
# site has no content
|
|
if content == '':
|
|
self.error += 1
|
|
self.stale_count += 1
|
|
#else:
|
|
# self.retrievals += 1
|
|
# self.error = 0
|
|
# self.stale_count = 0
|
|
# site has proxies
|
|
if proxy_count:
|
|
self.error = 0
|
|
self.stale_count = 0
|
|
extract_urls(content, self.url)
|
|
|
|
self.execute = (self.error, self.stale_count, int(time.time()), self.retrievals, self.proxies_added+len(self.proxylist), self.content_type, self.url)
|
|
self.status = 'ok'
|
|
|
|
|
|
if __name__ == '__main__':
|
|
config.load()
|
|
fetch.set_config(config)
|
|
|
|
|
|
proxydb = mysqlite.mysqlite(config.watchd.database, str)
|
|
dbs.create_table_if_not_exists(proxydb, 'proxylist')
|
|
known = proxydb.execute('SELECT proxy FROM proxylist').fetchall()
|
|
for k in known:
|
|
_known_proxies[k[0]] = True
|
|
|
|
with open('urignore.txt', 'r') as f:
|
|
urignore = [ i.strip() for i in f.read().split('\n') if len(i.strip()) ]
|
|
|
|
urldb = mysqlite.mysqlite(config.ppf.database, str)
|
|
dbs.create_table_if_not_exists(urldb, 'uris')
|
|
import_from_file('import.txt', urldb)
|
|
if len(sys.argv) == 3 and sys.argv[1] == "--file":
|
|
sys.exit(import_proxies_from_file(proxydb, sys.argv[2]))
|
|
|
|
# start proxy watcher
|
|
if config.watchd.threads > 0:
|
|
watcherd = proxywatchd.Proxywatchd()
|
|
watcherd.start()
|
|
else:
|
|
watcherd = None
|
|
|
|
#start_server(config.httpd.listenip, config.httpd.port)
|
|
|
|
#qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE error < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM() LIMIT 25'
|
|
qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE error < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM()'
|
|
#qurl = 'SELECT url,stale_count,error,retrievals,proxies_added,content_type FROM uris WHERE stale_count < ? and (check_time+?+((error+stale_count)*?) <?) ORDER BY RANDOM()'
|
|
threads = []
|
|
rows = []
|
|
rinc = 0
|
|
reqtime = time.time() - 3600
|
|
statusmsg = time.time()
|
|
while True:
|
|
try:
|
|
time.sleep(random.random()/10)
|
|
if (time.time() - statusmsg) > 180:
|
|
_log('running %d thread(s) over %d' % (len(threads), config.ppf.threads), 'ppf')
|
|
statusmsg = time.time()
|
|
if not len(rows):
|
|
if (time.time() - reqtime) > 3:
|
|
rows = urldb.execute(qurl, (config.ppf.max_fail, config.ppf.checktime, config.ppf.perfail_checktime, int(time.time()))).fetchall()
|
|
reqtime = time.time()
|
|
if len(rows) < config.ppf.threads:
|
|
time.sleep(60)
|
|
rows = []
|
|
else:
|
|
_log('handing %d job(s) to %d thread(s)' % ( len(rows), config.ppf.threads ), 'ppf')
|
|
#nao = time.time()
|
|
#args = [ (nao, row[0]) for row in rows ]
|
|
#urldb.executemany('UPDATE uris SET check_time=? where url=?', args)
|
|
#urldb.commit()
|
|
|
|
|
|
for thread in threads:
|
|
if thread.status == 'ok':
|
|
url, proxylist, stale_count, error, retrievals, content_type, proxies_added, execute = thread.retrieve()
|
|
new = []
|
|
for p in proxylist:
|
|
if not p in _known_proxies:
|
|
new.append(p)
|
|
_known_proxies[p]=1
|
|
execute = (error, stale_count, int(time.time()), retrievals, proxies_added+len(new), content_type, url)
|
|
urldb.execute('UPDATE uris SET error=?,stale_count=?,check_time=?,retrievals=?,proxies_added=?,content_type=? where url=?', execute)
|
|
urldb.commit()
|
|
if len(new): dbs.insert_proxies(proxydb, new, url)
|
|
|
|
threads = [ thread for thread in threads if thread.is_alive() ]
|
|
if len(threads) < config.ppf.threads and len(rows):
|
|
row = random.choice(rows)
|
|
urldb.execute('UPDATE uris SET check_time=? where url=?', (time.time(), row[0]))
|
|
urldb.commit()
|
|
rows.remove(row)
|
|
t = Leechered(row[0], row[1], row[2], row[3], row[4], row[5])
|
|
threads.append(t)
|
|
t.start()
|
|
#time.sleep(random.random()/100)
|
|
|
|
except KeyboardInterrupt:
|
|
if watcherd:
|
|
watcherd.stop()
|
|
watcherd.finish()
|
|
break
|
|
|
|
print '\r',
|