2018-08-09 15:42:21 +00:00
|
|
|
#!/usr/bin/env python3
|
|
|
|
# -*-coding:UTF-8 -*
|
|
|
|
|
|
|
|
import os
|
|
|
|
import sys
|
2018-08-13 07:23:14 +00:00
|
|
|
import re
|
2018-08-09 15:42:21 +00:00
|
|
|
import redis
|
|
|
|
import datetime
|
|
|
|
import time
|
|
|
|
import subprocess
|
2018-08-16 15:24:39 +00:00
|
|
|
import requests
|
2018-08-09 15:42:21 +00:00
|
|
|
|
2019-01-29 11:00:14 +00:00
|
|
|
from pyfaup.faup import Faup
|
|
|
|
|
2018-08-09 15:42:21 +00:00
|
|
|
sys.path.append(os.environ['AIL_BIN'])
|
|
|
|
from Helper import Process
|
|
|
|
from pubsublogger import publisher
|
|
|
|
|
2019-02-07 16:22:44 +00:00
|
|
|
# ======== GLOBAL VARIABLES ========
|
|
|
|
publisher.port = 6380
|
|
|
|
publisher.channel = "Script"
|
|
|
|
|
|
|
|
config_section = 'Crawler'
|
|
|
|
|
|
|
|
# Setup the I/O queues
|
|
|
|
p = Process(config_section)
|
|
|
|
|
|
|
|
accepted_services = ['onion', 'regular']
|
|
|
|
|
|
|
|
dic_regex = {}
|
|
|
|
dic_regex['onion'] = "((http|https|ftp)?(?:\://)?([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)*((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|localhost|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.onion)(\:[0-9]+)*(/($|[a-zA-Z0-9\.\,\?\'\\\+&%\$#\=~_\-]+))*)"
|
|
|
|
re.compile(dic_regex['onion'])
|
|
|
|
dic_regex['i2p'] = "((http|https|ftp)?(?:\://)?([a-zA-Z0-9\.\-]+(\:[a-zA-Z0-9\.&%\$\-]+)*@)*((25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9])\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[1-9]|0)\.(25[0-5]|2[0-4][0-9]|[0-1]{1}[0-9]{2}|[1-9]{1}[0-9]{1}|[0-9])|localhost|([a-zA-Z0-9\-]+\.)*[a-zA-Z0-9\-]+\.i2p)(\:[0-9]+)*(/($|[a-zA-Z0-9\.\,\?\'\\\+&%\$#\=~_\-]+))*)"
|
|
|
|
re.compile(dic_regex['i2p'])
|
|
|
|
dic_regex['regular'] = dic_regex['i2p']
|
|
|
|
|
|
|
|
faup = Faup()
|
|
|
|
|
|
|
|
PASTES_FOLDER = os.path.join(os.environ['AIL_HOME'], p.config.get("Directories", "pastes"))
|
|
|
|
|
|
|
|
r_serv_metadata = redis.StrictRedis(
|
|
|
|
host=p.config.get("ARDB_Metadata", "host"),
|
|
|
|
port=p.config.getint("ARDB_Metadata", "port"),
|
|
|
|
db=p.config.getint("ARDB_Metadata", "db"),
|
|
|
|
decode_responses=True)
|
|
|
|
|
|
|
|
r_cache = redis.StrictRedis(
|
|
|
|
host=p.config.get("Redis_Cache", "host"),
|
|
|
|
port=p.config.getint("Redis_Cache", "port"),
|
|
|
|
db=p.config.getint("Redis_Cache", "db"),
|
|
|
|
decode_responses=True)
|
|
|
|
|
|
|
|
r_onion = redis.StrictRedis(
|
|
|
|
host=p.config.get("ARDB_Onion", "host"),
|
|
|
|
port=p.config.getint("ARDB_Onion", "port"),
|
|
|
|
db=p.config.getint("ARDB_Onion", "db"),
|
|
|
|
decode_responses=True)
|
|
|
|
|
|
|
|
# ======== FUNCTIONS ========
|
2019-02-05 16:16:44 +00:00
|
|
|
def decode_val(value):
|
|
|
|
if value is not None:
|
|
|
|
value = value.decode()
|
|
|
|
return value
|
|
|
|
|
|
|
|
def load_type_blacklist(type_service):
|
|
|
|
# load domains blacklist
|
|
|
|
try:
|
|
|
|
with open(os.path.join(os.environ['AIL_BIN'],'/torcrawler/blacklist_{}.txt'.format(type_service)), 'r') as f:
|
|
|
|
# # TODO: # FIXME: remove this
|
|
|
|
r_onion.delete('blacklist_{}'.format(type_service))
|
|
|
|
lines = f.read().splitlines()
|
|
|
|
for line in lines:
|
|
|
|
r_onion.sadd('blacklist_{}'.format(type_service), line)
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2018-09-27 13:43:03 +00:00
|
|
|
def on_error_send_message_back_in_queue(type_hidden_service, domain, message):
|
|
|
|
# send this msg back in the queue
|
|
|
|
if not r_onion.sismember('{}_domain_crawler_queue'.format(type_hidden_service), domain):
|
|
|
|
r_onion.sadd('{}_domain_crawler_queue'.format(type_hidden_service), domain)
|
2019-01-29 15:08:59 +00:00
|
|
|
r_onion.sadd('{}_crawler_priority_queue'.format(type_hidden_service), message)
|
2018-09-27 13:43:03 +00:00
|
|
|
|
2018-09-18 14:20:13 +00:00
|
|
|
def crawl_onion(url, domain, date, date_month, message):
|
2018-08-13 07:23:14 +00:00
|
|
|
|
2019-01-29 11:00:14 +00:00
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'crawling_domain', domain)
|
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'started_time', datetime.datetime.now().strftime("%Y/%m/%d - %H:%M.%S"))
|
|
|
|
|
2018-08-16 15:24:39 +00:00
|
|
|
#if not r_onion.sismember('full_onion_up', domain) and not r_onion.sismember('onion_down:'+date , domain):
|
2018-08-21 13:54:53 +00:00
|
|
|
super_father = r_serv_metadata.hget('paste_metadata:'+paste, 'super_father')
|
|
|
|
if super_father is None:
|
|
|
|
super_father=paste
|
|
|
|
|
2018-12-17 15:04:12 +00:00
|
|
|
retry = True
|
|
|
|
nb_retry = 0
|
|
|
|
while retry:
|
|
|
|
try:
|
|
|
|
r = requests.get(splash_url , timeout=30.0)
|
|
|
|
retry = False
|
|
|
|
except Exception:
|
|
|
|
# TODO: relaunch docker or send error message
|
|
|
|
nb_retry += 1
|
|
|
|
|
2019-01-29 11:09:19 +00:00
|
|
|
if nb_retry == 6:
|
2018-12-17 15:04:12 +00:00
|
|
|
on_error_send_message_back_in_queue(type_hidden_service, domain, message)
|
|
|
|
publisher.error('{} SPASH DOWN'.format(splash_url))
|
|
|
|
print('--------------------------------------')
|
|
|
|
print(' \033[91m DOCKER SPLASH DOWN\033[0m')
|
|
|
|
print(' {} DOWN'.format(splash_url))
|
2019-01-29 11:00:14 +00:00
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'status', 'SPLASH DOWN')
|
|
|
|
nb_retry == 0
|
2018-12-17 15:04:12 +00:00
|
|
|
|
|
|
|
print(' \033[91m DOCKER SPLASH NOT AVAILABLE\033[0m')
|
|
|
|
print(' Retry({}) in 10 seconds'.format(nb_retry))
|
|
|
|
time.sleep(10)
|
2018-08-21 13:54:53 +00:00
|
|
|
|
|
|
|
if r.status_code == 200:
|
2019-01-29 11:00:14 +00:00
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'status', 'Crawling')
|
2018-09-24 14:23:14 +00:00
|
|
|
process = subprocess.Popen(["python", './torcrawler/tor_crawler.py', splash_url, type_hidden_service, url, domain, paste, super_father],
|
2018-08-21 13:54:53 +00:00
|
|
|
stdout=subprocess.PIPE)
|
|
|
|
while process.poll() is None:
|
|
|
|
time.sleep(1)
|
2018-08-16 15:24:39 +00:00
|
|
|
|
2018-08-21 13:54:53 +00:00
|
|
|
if process.returncode == 0:
|
2018-09-27 13:43:03 +00:00
|
|
|
output = process.stdout.read().decode()
|
|
|
|
print(output)
|
|
|
|
# error: splash:Connection to proxy refused
|
|
|
|
if 'Connection to proxy refused' in output:
|
|
|
|
on_error_send_message_back_in_queue(type_hidden_service, domain, message)
|
2018-09-28 13:23:27 +00:00
|
|
|
publisher.error('{} SPASH, PROXY DOWN OR BAD CONFIGURATION'.format(splash_url))
|
2018-09-27 13:43:03 +00:00
|
|
|
print('------------------------------------------------------------------------')
|
|
|
|
print(' \033[91m SPLASH: Connection to proxy refused')
|
|
|
|
print('')
|
|
|
|
print(' PROXY DOWN OR BAD CONFIGURATION\033[0m'.format(splash_url))
|
|
|
|
print('------------------------------------------------------------------------')
|
2019-01-29 11:00:14 +00:00
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'status', 'Error')
|
2018-09-27 13:43:03 +00:00
|
|
|
exit(-2)
|
2018-08-13 07:23:14 +00:00
|
|
|
else:
|
2018-08-21 13:54:53 +00:00
|
|
|
print(process.stdout.read())
|
2018-09-27 13:43:03 +00:00
|
|
|
exit(-1)
|
2018-08-21 13:54:53 +00:00
|
|
|
else:
|
2018-09-27 13:43:03 +00:00
|
|
|
on_error_send_message_back_in_queue(type_hidden_service, domain, message)
|
|
|
|
print('--------------------------------------')
|
|
|
|
print(' \033[91m DOCKER SPLASH DOWN\033[0m')
|
|
|
|
print(' {} DOWN'.format(splash_url))
|
2019-01-29 11:00:14 +00:00
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'status', 'Crawling')
|
2018-09-27 13:43:03 +00:00
|
|
|
exit(1)
|
2018-08-13 07:23:14 +00:00
|
|
|
|
2019-02-07 16:22:44 +00:00
|
|
|
# ======== MAIN ========
|
2018-08-09 15:42:21 +00:00
|
|
|
if __name__ == '__main__':
|
|
|
|
|
2018-09-24 14:23:14 +00:00
|
|
|
if len(sys.argv) != 3:
|
2019-02-05 16:16:44 +00:00
|
|
|
#print('usage:', 'Crawler.py', 'type_hidden_service (onion or i2p or regular)', 'splash_port')
|
|
|
|
print('usage:', 'Crawler.py', 'mode (manual or automatic)', 'splash_port')
|
2018-09-12 07:55:49 +00:00
|
|
|
exit(1)
|
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
mode = sys.argv[1]
|
2018-09-24 14:23:14 +00:00
|
|
|
splash_port = sys.argv[2]
|
2018-09-12 07:55:49 +00:00
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
if mode == 'automatic':
|
|
|
|
type_hidden_service = 'onion'
|
|
|
|
|
2019-02-07 16:22:44 +00:00
|
|
|
# verify crawler type (type_hidden_service)
|
|
|
|
if type_hidden_service not in accepted_services:
|
2018-08-24 08:13:56 +00:00
|
|
|
print('incorrect crawler type: {}'.format(type_hidden_service))
|
|
|
|
exit(0)
|
2019-02-07 16:22:44 +00:00
|
|
|
else:
|
|
|
|
publisher.info("Script Crawler started")
|
|
|
|
|
|
|
|
# load domains blacklist
|
|
|
|
load_type_blacklist(type_hidden_service)
|
2018-08-24 08:13:56 +00:00
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
splash_url = '{}:{}'.format( p.config.get("Crawler", "splash_url_onion"), splash_port)
|
2018-09-24 14:28:55 +00:00
|
|
|
print('splash url: {}'.format(splash_url))
|
2018-09-12 07:55:49 +00:00
|
|
|
|
2018-08-09 15:42:21 +00:00
|
|
|
crawler_depth_limit = p.config.getint("Crawler", "crawler_depth_limit")
|
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
# Crawler status
|
2019-01-29 11:00:14 +00:00
|
|
|
r_cache.sadd('all_crawler:{}'.format(type_hidden_service), splash_port)
|
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'status', 'Waiting')
|
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'started_time', datetime.datetime.now().strftime("%Y/%m/%d - %H:%M.%S"))
|
|
|
|
|
2018-09-28 14:29:09 +00:00
|
|
|
|
2018-08-09 15:42:21 +00:00
|
|
|
while True:
|
|
|
|
|
2019-02-07 16:22:44 +00:00
|
|
|
if mode == 'automatic':
|
|
|
|
# Priority Queue - Recovering the streamed message informations.
|
|
|
|
message = r_onion.spop('{}_crawler_priority_queue'.format(type_hidden_service))
|
2019-01-29 15:08:59 +00:00
|
|
|
|
2019-02-07 16:22:44 +00:00
|
|
|
if message is None:
|
|
|
|
# Recovering the streamed message informations.
|
|
|
|
message = r_onion.spop('{}_crawler_queue'.format(type_hidden_service))
|
|
|
|
else:
|
|
|
|
pass
|
2018-08-21 13:54:53 +00:00
|
|
|
|
2018-08-09 15:42:21 +00:00
|
|
|
if message is not None:
|
2018-08-21 13:54:53 +00:00
|
|
|
|
2018-08-09 15:42:21 +00:00
|
|
|
splitted = message.split(';')
|
|
|
|
if len(splitted) == 2:
|
|
|
|
url, paste = splitted
|
2018-09-12 07:55:49 +00:00
|
|
|
paste = paste.replace(PASTES_FOLDER+'/', '')
|
2018-08-21 13:54:53 +00:00
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
# extract data from url
|
|
|
|
faup.decode(url)
|
|
|
|
url_unpack = faup.get()
|
|
|
|
url = decode_val(url_unpack['url'])
|
|
|
|
port = decode_val(url_unpack['port'])
|
|
|
|
scheme = decode_val(url_unpack['scheme'])
|
|
|
|
domain = decode_val(url_unpack['domain'])
|
|
|
|
host = decode_val(url_unpack['domain'])
|
|
|
|
|
|
|
|
# Add Scheme to url
|
|
|
|
if scheme is None:
|
2018-08-13 07:23:14 +00:00
|
|
|
url= 'http://{}'.format(url)
|
2019-02-05 16:16:44 +00:00
|
|
|
domain_url = 'http://{}'.format(domain)
|
2018-08-13 07:23:14 +00:00
|
|
|
|
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
# remove url to crawl from queue
|
|
|
|
r_onion.srem('{}_domain_crawler_queue'.format(type_hidden_service), domain)
|
2018-08-13 07:23:14 +00:00
|
|
|
|
2019-01-29 11:09:19 +00:00
|
|
|
print()
|
|
|
|
print()
|
2018-09-26 14:34:27 +00:00
|
|
|
print('\033[92m------------------START CRAWLER------------------\033[0m')
|
|
|
|
print('crawler type: {}'.format(type_hidden_service))
|
|
|
|
print('\033[92m-------------------------------------------------\033[0m')
|
2018-08-13 07:23:14 +00:00
|
|
|
print('url: {}'.format(url))
|
|
|
|
print('domain: {}'.format(domain))
|
|
|
|
print('domain_url: {}'.format(domain_url))
|
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
if not r_onion.sismember('blacklist_{}'.format(type_hidden_service), domain):
|
2018-08-16 15:24:39 +00:00
|
|
|
|
|
|
|
date = datetime.datetime.now().strftime("%Y%m%d")
|
2018-08-21 13:54:53 +00:00
|
|
|
date_month = datetime.datetime.now().strftime("%Y%m")
|
|
|
|
|
2018-08-24 08:13:56 +00:00
|
|
|
if not r_onion.sismember('month_{}_up:{}'.format(type_hidden_service, date_month), domain) and not r_onion.sismember('{}_down:{}'.format(type_hidden_service, date), domain):
|
2019-01-29 16:04:45 +00:00
|
|
|
# first seen
|
|
|
|
if not r_onion.hexists('{}_metadata:{}'.format(type_hidden_service, domain), 'first_seen'):
|
|
|
|
r_onion.hset('{}_metadata:{}'.format(type_hidden_service, domain), 'first_seen', date)
|
|
|
|
|
2019-01-29 14:38:00 +00:00
|
|
|
# last_father
|
|
|
|
r_onion.hset('{}_metadata:{}'.format(type_hidden_service, domain), 'paste_parent', paste)
|
|
|
|
|
|
|
|
# last check
|
|
|
|
r_onion.hset('{}_metadata:{}'.format(type_hidden_service, domain), 'last_check', date)
|
2018-08-21 13:54:53 +00:00
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
# Launch Scrapy-Splash Crawler
|
2018-09-18 14:20:13 +00:00
|
|
|
crawl_onion(url, domain, date, date_month, message)
|
2019-02-05 16:16:44 +00:00
|
|
|
# Crawl Domain
|
2018-08-21 13:54:53 +00:00
|
|
|
if url != domain_url:
|
2019-02-05 16:16:44 +00:00
|
|
|
#Crawl Domain with port number
|
|
|
|
if port is not None:
|
|
|
|
print('{}:{}'.format(domain_url, port))
|
|
|
|
crawl_onion('{}:{}'.format(domain_url, port), domain, date, date_month, message)
|
|
|
|
#Crawl without port number
|
2018-09-12 07:55:49 +00:00
|
|
|
print(domain_url)
|
2018-09-18 14:20:13 +00:00
|
|
|
crawl_onion(domain_url, domain, date, date_month, message)
|
2018-08-21 13:54:53 +00:00
|
|
|
|
2019-02-05 16:16:44 +00:00
|
|
|
# update last check
|
|
|
|
r_onion.hset('{}_metadata:{}'.format(type_hidden_service, domain), 'last_check', date)
|
|
|
|
|
2018-08-21 13:54:53 +00:00
|
|
|
# save down onion
|
2018-08-24 08:13:56 +00:00
|
|
|
if not r_onion.sismember('{}_up:{}'.format(type_hidden_service, date), domain):
|
|
|
|
r_onion.sadd('{}_down:{}'.format(type_hidden_service, date), domain)
|
2018-08-16 15:24:39 +00:00
|
|
|
else:
|
2018-09-24 14:23:14 +00:00
|
|
|
#r_onion.hincrby('{}_link_up'.format(type_hidden_service), url, 1)
|
|
|
|
if r_onion.sismember('month_{}_up:{}'.format(type_hidden_service, date_month), domain) and r_serv_metadata.exists('paste_children:'+paste):
|
|
|
|
msg = 'infoleak:automatic-detection="{}";{}'.format(type_hidden_service, paste)
|
|
|
|
p.populate_set_out(msg, 'Tags')
|
2018-08-21 13:54:53 +00:00
|
|
|
|
2018-09-12 07:55:49 +00:00
|
|
|
# add onion screenshot history
|
|
|
|
# add crawled days
|
|
|
|
if r_onion.lindex('{}_history:{}'.format(type_hidden_service, domain), 0) != date:
|
|
|
|
r_onion.lpush('{}_history:{}'.format(type_hidden_service, domain), date)
|
|
|
|
# add crawled history by date
|
2019-02-05 16:16:44 +00:00
|
|
|
r_onion.lpush('{}_history:{}:{}'.format(type_hidden_service, domain, date), paste)
|
|
|
|
|
|
|
|
if mode == 'automatic':
|
|
|
|
# check external onions links (full_crawl)
|
|
|
|
external_domains = set()
|
|
|
|
for link in r_onion.smembers('domain_{}_external_links:{}'.format(type_hidden_service, domain)):
|
|
|
|
external_domain = re.findall(dic_regex[type_hidden_service], link)
|
|
|
|
external_domain.extend(re.findall(url_i2p, link))
|
|
|
|
if len(external_domain) > 0:
|
|
|
|
external_domain = external_domain[0][4]
|
|
|
|
else:
|
|
|
|
continue
|
|
|
|
if '.onion' in external_domain and external_domain != domain:
|
|
|
|
external_domains.add(external_domain)
|
|
|
|
elif '.i2p' in external_domain and external_domain != domain:
|
|
|
|
external_domains.add(external_domain)
|
|
|
|
if len(external_domains) >= 10:
|
|
|
|
r_onion.sadd('{}_potential_source'.format(type_hidden_service), domain)
|
|
|
|
r_onion.delete('domain_{}_external_links:{}'.format(type_hidden_service, domain))
|
|
|
|
print(r_onion.smembers('domain_{}_external_links:{}'.format(type_hidden_service, domain)))
|
|
|
|
|
|
|
|
# update list, last crawled sites
|
2018-08-24 08:13:56 +00:00
|
|
|
r_onion.lpush('last_{}'.format(type_hidden_service), domain)
|
|
|
|
r_onion.ltrim('last_{}'.format(type_hidden_service), 0, 15)
|
2018-08-21 13:54:53 +00:00
|
|
|
|
2019-01-29 11:00:14 +00:00
|
|
|
#update crawler status
|
|
|
|
r_cache.hset('metadata_crawler:{}'.format(splash_port), 'status', 'Waiting')
|
2019-01-29 11:09:19 +00:00
|
|
|
r_cache.hdel('metadata_crawler:{}'.format(splash_port), 'crawling_domain')
|
|
|
|
else:
|
2019-02-05 16:16:44 +00:00
|
|
|
print(' Blacklisted Site')
|
2019-01-29 11:09:19 +00:00
|
|
|
print()
|
|
|
|
print()
|
2019-01-29 11:00:14 +00:00
|
|
|
|
2018-08-09 15:42:21 +00:00
|
|
|
else:
|
|
|
|
continue
|
|
|
|
else:
|
|
|
|
time.sleep(1)
|