ppf: make scraper use extra proxies if available
This commit is contained in:
28
ppf.py
28
ppf.py
@@ -27,8 +27,8 @@ def import_from_file(fn, sqlite):
|
||||
cinc = cinc + 200
|
||||
|
||||
|
||||
def get_content_type(url):
|
||||
hdr = fetch.fetch_contents(url, head=True)
|
||||
def get_content_type(url, proxy):
|
||||
hdr = fetch.fetch_contents(url, head=True, proxy=proxy)
|
||||
|
||||
for h in hdr.split('\n'):
|
||||
if h.lower().startswith('content-type: '): return h.lower().split(':')[1].strip()
|
||||
@@ -41,13 +41,14 @@ def is_good_content_type(string):
|
||||
if ct.lower() in string.lower(): return True
|
||||
return False
|
||||
|
||||
def proxyleech(proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type):
|
||||
if not content_type: content_type = get_content_type(url)
|
||||
def proxyleech(proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type, proxy):
|
||||
if not content_type: content_type = get_content_type(url, proxy=proxy)
|
||||
|
||||
if is_good_content_type(content_type):
|
||||
try: content = fetch.fetch_contents(url)
|
||||
if is_good_content_type(content_type, proxy=proxy):
|
||||
try: content = fetch.fetch_contents(url, proxy=proxy)
|
||||
except KeyboardInterrupt as e: raise e
|
||||
except: content = ''
|
||||
except: raise
|
||||
#except: content = ''
|
||||
else:
|
||||
content = ''
|
||||
|
||||
@@ -225,7 +226,7 @@ def extract_proxies(content):
|
||||
|
||||
class Leechered(threading.Thread):
|
||||
#def __init__(self, proxydb, urldb, url, stale_count, error, retrievals, proxies_added, content_type):
|
||||
def __init__(self, url, stale_count, error, retrievals, proxies_added, content_type):
|
||||
def __init__(self, url, stale_count, error, retrievals, proxies_added, content_type, proxy):
|
||||
self.status = 'nok'
|
||||
self.proxylist = []
|
||||
self.running = True
|
||||
@@ -235,6 +236,7 @@ class Leechered(threading.Thread):
|
||||
self.retrievals = retrievals
|
||||
self.proxies_added = proxies_added
|
||||
self.content_type = content_type
|
||||
self.proxy = proxy
|
||||
self.execute = ''
|
||||
threading.Thread.__init__(self)
|
||||
|
||||
@@ -246,10 +248,10 @@ class Leechered(threading.Thread):
|
||||
def run(self):
|
||||
self.status = 'nok'
|
||||
|
||||
if not self.content_type: self.content_type = get_content_type(self.url)
|
||||
if not self.content_type: self.content_type = get_content_type(self.url, self.proxy)
|
||||
|
||||
if is_good_content_type(self.content_type):
|
||||
try: content = fetch.fetch_contents(self.url)
|
||||
try: content = fetch.fetch_contents(self.url, proxy=self.proxy)
|
||||
except KeyboardInterrupt as e: raise e
|
||||
except: content = ''
|
||||
else:
|
||||
@@ -353,6 +355,9 @@ if __name__ == '__main__':
|
||||
#urldb.commit()
|
||||
|
||||
|
||||
_proxylist = [ '%s://%s' % (p[0], p[1]) for p in proxydb.execute('SELECT proto,proxy from proxylist where failed=0').fetchall() ]
|
||||
if len(_proxylist) == 0: _proxylist = None
|
||||
|
||||
for thread in threads:
|
||||
if thread.status == 'ok':
|
||||
url, proxylist, stale_count, error, retrievals, content_type, proxies_added, execute = thread.retrieve()
|
||||
@@ -368,11 +373,12 @@ if __name__ == '__main__':
|
||||
|
||||
threads = [ thread for thread in threads if thread.is_alive() ]
|
||||
if len(threads) < config.ppf.threads and len(rows):
|
||||
p = random.sample(_proxylist, 5) if _proxylist is not None else None
|
||||
row = random.choice(rows)
|
||||
urldb.execute('UPDATE uris SET check_time=? where url=?', (time.time(), row[0]))
|
||||
urldb.commit()
|
||||
rows.remove(row)
|
||||
t = Leechered(row[0], row[1], row[2], row[3], row[4], row[5])
|
||||
t = Leechered(row[0], row[1], row[2], row[3], row[4], row[5], p)
|
||||
threads.append(t)
|
||||
t.start()
|
||||
#time.sleep(random.random()/100)
|
||||
|
||||
Reference in New Issue
Block a user