frontend python 3.5 upgrade

This commit is contained in:
Terrtia 2018-04-17 16:06:32 +02:00
parent 51d453be13
commit 8571a86344
14 changed files with 165 additions and 76 deletions

View file

@ -85,8 +85,6 @@ class Paste(object):
# in a month folder which is itself in a year folder.
# /year/month/day/paste.gz
# TODO use bytes ?
var = self.p_path.split('/')
self.p_date = Date(var[-4], var[-3], var[-2])
self.p_source = var[-5]
@ -280,7 +278,11 @@ class Paste(object):
def _get_p_duplicate(self):
self.p_duplicate = self.store.hget(self.p_path, "p_duplicate")
return self.p_duplicate if self.p_duplicate is not None else '[]'
if self.p_duplicate is not None:
self.p_duplicate = self.p_duplicate.decode('utf8')
return self.p_duplicate
else:
return '[]'
def save_all_attributes_redis(self, key=None):
"""

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''

View file

@ -1,8 +1,8 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
import redis
import ConfigParser
import configparser
import json
import datetime
import time
@ -72,7 +72,7 @@ with open('templates/header_base.html', 'r') as f:
modified_header = complete_header
#Add the header in the supplied order
for module_name, txt in to_add_to_header_dico.items():
for module_name, txt in list(to_add_to_header_dico.items()):
to_replace = '<!--{}-->'.format(module_name)
if to_replace in complete_header:
modified_header = modified_header.replace(to_replace, txt)

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
"Hepler to create a new webpage associated with a module."

View file

@ -1,10 +1,10 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
Flask global variables shared accross modules
'''
import ConfigParser
import configparser
import redis
import os
@ -18,7 +18,7 @@ if not os.path.exists(configfile):
Did you set environment variables? \
Or activate the virtualenv.')
cfg = ConfigParser.ConfigParser()
cfg = configparser.ConfigParser()
cfg.read(configfile)

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
@ -55,9 +55,10 @@ def event_stream_getImportantPasteByModule(module_name, year):
index = 0
all_pastes_list = getPastebyType(r_serv_db[year], module_name)
for path in all_pastes_list:
path = path.decode('utf8')
index += 1
paste = Paste.Paste(path)
content = paste.get_p_content().decode('utf8', 'ignore')
content = paste.get_p_content()
content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
curr_date = str(paste._get_p_date())
curr_date = curr_date[0:4]+'/'+curr_date[4:6]+'/'+curr_date[6:]
@ -92,9 +93,12 @@ def importantPasteByModule():
allPastes = getPastebyType(r_serv_db[currentSelectYear], module_name)
for path in allPastes[0:10]:
path = path.decode('utf8')
all_path.append(path)
#print(path)
#print(type(path))
paste = Paste.Paste(path)
content = paste.get_p_content().decode('utf8', 'ignore')
content = paste.get_p_content()
content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
all_content.append(content[0:content_range].replace("\"", "\'").replace("\r", " ").replace("\n", " "))
curr_date = str(paste._get_p_date())

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
@ -26,20 +26,44 @@ def event_stream():
pubsub = r_serv_log.pubsub()
pubsub.psubscribe("Script" + '.*')
for msg in pubsub.listen():
level = msg['channel'].split('.')[1]
# bytes conversion
try:
type = msg['type'].decode('utf8')
except:
type = msg['type']
try:
pattern = msg['pattern'].decode('utf8')
except:
pattern = msg['pattern']
try:
channel = msg['channel'].decode('utf8')
except:
channel = msg['channel']
try:
data = msg['data'].decode('utf8')
except:
data = msg['data']
msg = {'channel': channel, 'type': type, 'pattern': pattern, 'data': data}
level = (msg['channel']).split('.')[1]
if msg['type'] == 'pmessage' and level != "DEBUG":
yield 'data: %s\n\n' % json.dumps(msg)
def get_queues(r):
# We may want to put the llen in a pipeline to do only one query.
newData = []
for queue, card in r.hgetall("queues").iteritems():
for queue, card in r.hgetall("queues").items():
queue = queue.decode('utf8')
card = card.decode('utf8')
key = "MODULE_" + queue + "_"
keySet = "MODULE_TYPE_" + queue
for moduleNum in r.smembers(keySet):
moduleNum = moduleNum.decode('utf8')
value = ( r.get(key + str(moduleNum)) ).decode('utf8')
value = r.get(key + str(moduleNum))
if value is not None:
timestamp, path = value.split(", ")
if timestamp is not None:

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
@ -108,7 +108,7 @@ def search():
for path in r_serv_pasteName.smembers(q[0]):
r.append(path)
paste = Paste.Paste(path)
content = paste.get_p_content().decode('utf8', 'ignore')
content = paste.get_p_content()
content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
c.append(content[0:content_range])
curr_date = str(paste._get_p_date())
@ -126,7 +126,7 @@ def search():
for x in results:
r.append(x.items()[0][1])
paste = Paste.Paste(x.items()[0][1])
content = paste.get_p_content().decode('utf8', 'ignore')
content = paste.get_p_content()
content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
c.append(content[0:content_range])
curr_date = str(paste._get_p_date())
@ -175,7 +175,7 @@ def get_more_search_result():
for x in results:
path_array.append(x.items()[0][1])
paste = Paste.Paste(x.items()[0][1])
content = paste.get_p_content().decode('utf8', 'ignore')
content = paste.get_p_content()
content_range = max_preview_char if len(content)>max_preview_char else len(content)-1
preview_array.append(content[0:content_range])
curr_date = str(paste._get_p_date())

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
@ -57,16 +57,30 @@ def sentiment_analysis_getplotdata():
if getAllProviders == 'True':
if allProvider == "True":
range_providers = r_serv_charts.smembers('all_provider_set')
return jsonify(list(range_providers))
range_providers_str = []
for domain in range_providers:
m = domain.decode('utf8')
range_providers_str.append(m)
return jsonify(list(range_providers_str))
else:
range_providers = r_serv_charts.zrevrangebyscore('providers_set_'+ get_date_range(0)[0], '+inf', '-inf', start=0, num=8)
# if empty, get yesterday top providers
range_providers = r_serv_charts.zrevrangebyscore('providers_set_'+ get_date_range(1)[1], '+inf', '-inf', start=0, num=8) if range_providers == [] else range_providers
# if still empty, takes from all providers
if range_providers == []:
print 'today provider empty'
print('today provider empty')
range_providers = r_serv_charts.smembers('all_provider_set')
return jsonify(list(range_providers))
# decode bytes
range_providers_str = []
for domain in range_providers:
m = domain.decode('utf8')
range_providers_str.append(m)
return jsonify(list(range_providers_str))
elif provider is not None:
to_return = {}
@ -78,7 +92,7 @@ def sentiment_analysis_getplotdata():
list_value = []
for cur_id in r_serv_sentiment.smembers(cur_set_name):
cur_value = r_serv_sentiment.get(cur_id)
cur_value = (r_serv_sentiment.get(cur_id)).decode('utf8')
list_value.append(cur_value)
list_date[cur_timestamp] = list_value
to_return[provider] = list_date
@ -101,7 +115,7 @@ def sentiment_analysis_plot_tool_getdata():
if getProviders == 'True':
providers = []
for cur_provider in r_serv_charts.smembers('all_provider_set'):
providers.append(cur_provider)
providers.append(cur_provider.decode('utf8'))
return jsonify(providers)
else:
@ -130,7 +144,7 @@ def sentiment_analysis_plot_tool_getdata():
list_value = []
for cur_id in r_serv_sentiment.smembers(cur_set_name):
cur_value = r_serv_sentiment.get(cur_id)
cur_value = (r_serv_sentiment.get(cur_id)).decode('utf8')
list_value.append(cur_value)
list_date[cur_timestamp] = list_value
to_return[cur_provider] = list_date

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
@ -38,7 +38,7 @@ def showpaste(content_range):
p_size = paste.p_size
p_mime = paste.p_mime
p_lineinfo = paste.get_lines_info()
p_content = paste.get_p_content().decode('utf-8', 'ignore')
p_content = paste.get_p_content()
p_duplicate_full_list = json.loads(paste._get_p_duplicate())
p_duplicate_list = []
p_simil_list = []
@ -52,7 +52,7 @@ def showpaste(content_range):
else:
dup_list[2] = int(dup_list[2])
p_duplicate_full_list.sort(lambda x,y: cmp(x[2], y[2]), reverse=True)
#p_duplicate_full_list.sort(lambda x,y: cmp(x[2], y[2]), reverse=True)
# Combine multiple duplicate paste name and format for display
new_dup_list = []
@ -111,7 +111,7 @@ def showpreviewpaste():
def getmoredata():
requested_path = request.args.get('paste', '')
paste = Paste.Paste(requested_path)
p_content = paste.get_p_content().decode('utf-8', 'ignore')
p_content = paste.get_p_content()
to_return = p_content[max_preview_modal-1:]
return to_return
@ -126,8 +126,8 @@ def showDiff():
if maxLengthLine1 > DiffMaxLineLength or maxLengthLine2 > DiffMaxLineLength:
return "Can't make the difference as the lines are too long."
htmlD = difflib.HtmlDiff()
lines1 = p1.get_p_content().decode('utf8', 'ignore').splitlines()
lines2 = p2.get_p_content().decode('utf8', 'ignore').splitlines()
lines1 = p1.get_p_content().splitlines()
lines2 = p2.get_p_content().splitlines()
the_html = htmlD.make_file(lines1, lines2)
return the_html

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
@ -72,7 +72,7 @@ def Term_getValueOverRange(word, startDate, num_day, per_paste=""):
curr_to_return = 0
for timestamp in range(startDate, startDate - max(num_day)*oneDay, -oneDay):
value = r_serv_term.hget(per_paste+str(timestamp), word)
curr_to_return += int(value) if value is not None else 0
curr_to_return += int(value.decode('utf8')) if value is not None else 0
for i in num_day:
if passed_days == i-1:
to_return.append(curr_to_return)
@ -251,13 +251,13 @@ def terms_management_query_paste():
# check if regex or not
if term.startswith('/') and term.endswith('/'):
set_paste_name = "regex_" + term
track_list_path = r_serv_term.smembers(set_paste_name)
track_list_path = (r_serv_term.smembers(set_paste_name)).decode('utf8')
elif term.startswith('\\') and term.endswith('\\'):
set_paste_name = "set_" + term
track_list_path = r_serv_term.smembers(set_paste_name)
track_list_path = (r_serv_term.smembers(set_paste_name)).decode('utf8')
else:
set_paste_name = "tracked_" + term
track_list_path = r_serv_term.smembers(set_paste_name)
track_list_path = (r_serv_term.smembers(set_paste_name)).decode('utf8')
for path in track_list_path:
paste = Paste.Paste(path)
@ -268,7 +268,7 @@ def terms_management_query_paste():
p_size = paste.p_size
p_mime = paste.p_mime
p_lineinfo = paste.get_lines_info()
p_content = paste.get_p_content().decode('utf-8', 'ignore')
p_content = paste.get_p_content()
if p_content != 0:
p_content = p_content[0:400]
paste_info.append({"path": path, "date": p_date, "source": p_source, "encoding": p_encoding, "size": p_size, "mime": p_mime, "lineinfo": p_lineinfo, "content": p_content})
@ -310,7 +310,7 @@ def terms_management_action():
term = request.args.get('term')
notificationEmailsParam = request.args.get('emailAddresses')
if action is None or term is None:
if action is None or term is None or notificationEmailsParam is None:
return "None"
else:
if section == "followTerm":
@ -386,7 +386,7 @@ def terms_management_action():
r_serv_term.hdel(TrackedRegexDate_Name, term)
elif term.startswith('\\') and term.endswith('\\'):
r_serv_term.srem(TrackedSetSet_Name, term)
print(term)
#print(term)
r_serv_term.hdel(TrackedSetDate_Name, term)
else:
r_serv_term.srem(TrackedTermsSet_Name, term.lower())
@ -499,7 +499,7 @@ def terms_plot_top_data():
curr_value_range = int(value) if value is not None else 0
value_range.append([timestamp, curr_value_range])
to_return.append([term, value_range, tot_value, position])
to_return.append([term.decode('utf8'), value_range, tot_value, position])
return jsonify(to_return)
@ -534,7 +534,7 @@ def credentials_management_query_paste():
@terms.route("/credentials_management_action/", methods=['GET'])
def cred_management_action():
supplied = request.args.get('term').encode('utf-8')
supplied = request.args.get('term')
action = request.args.get('action')
section = request.args.get('section')
extensive = request.args.get('extensive')
@ -557,6 +557,7 @@ def cred_management_action():
iter_num = 0
tot_iter = len(AllUsernameInRedis)*len(possibilities)
for tempUsername in AllUsernameInRedis:
tempUsername = tempUsername.decode('utf8')
for poss in possibilities:
#FIXME print progress
if(iter_num % int(tot_iter/20) == 0):
@ -565,7 +566,7 @@ def cred_management_action():
iter_num += 1
if poss in tempUsername:
num = r_serv_cred.hget(REDIS_KEY_ALL_CRED_SET, tempUsername)
num = (r_serv_cred.hget(REDIS_KEY_ALL_CRED_SET, tempUsername)).decode('utf8')
if num is not None:
uniq_num_set.add(num)
for num in r_serv_cred.smembers(tempUsername):
@ -574,7 +575,7 @@ def cred_management_action():
data = {'usr': [], 'path': [], 'numPaste': [], 'simil': []}
for Unum in uniq_num_set:
levenRatio = 2.0
username = r_serv_cred.hget(REDIS_KEY_ALL_CRED_SET_REV, Unum)
username = (r_serv_cred.hget(REDIS_KEY_ALL_CRED_SET_REV, Unum)).decode('utf8')
# Calculate Levenshtein distance, ignore negative ratio
supp_splitted = supplied.split()
@ -585,9 +586,21 @@ def cred_management_action():
levenRatioStr = "{:.1%}".format(levenRatio)
data['usr'].append(username)
try:
Unum = Unum.decode('utf8')
except:
pass
allPathNum = list(r_serv_cred.smembers(REDIS_KEY_MAP_CRED_TO_PATH+'_'+Unum))
data['path'].append(allPathNum)
data['numPaste'].append(len(allPathNum))
# decode bytes
allPathNum_str = []
for p in allPathNum:
allPathNum_str.append(p.decode('utf8'))
data['path'].append(allPathNum_str)
data['numPaste'].append(len(allPathNum_str))
data['simil'].append(levenRatioStr)
to_return = {}

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
@ -28,6 +28,7 @@ def get_date_range(num_day):
for i in range(0, num_day+1):
date_list.append(date.substract_day(i))
return date_list
@ -47,6 +48,8 @@ def progressionCharts():
# Retreive all data from the last num_day
for date in date_range:
curr_value = r_serv_charts.hget(attribute_name, date)
if curr_value is not None:
curr_value = curr_value.decode('utf8')
bar_values.append([date[0:4]+'/'+date[4:6]+'/'+date[6:8], int(curr_value if curr_value is not None else 0)])
bar_values.insert(0, attribute_name)
return jsonify(bar_values)
@ -54,7 +57,14 @@ def progressionCharts():
else:
redis_progression_name = "z_top_progression_" + trending_name
keyw_value = r_serv_charts.zrevrangebyscore(redis_progression_name, '+inf', '-inf', withscores=True, start=0, num=10)
return jsonify(keyw_value)
# decode bytes
keyw_value_str = []
for domain, value in keyw_value:
m = domain.decode('utf8'), value
keyw_value_str.append(m)
return jsonify(keyw_value_str)
@trendings.route("/wordstrending/")
def wordstrending():

View file

@ -1,4 +1,4 @@
#!/usr/bin/env python2
#!/usr/bin/env python3.5
# -*-coding:UTF-8 -*
'''
@ -28,11 +28,18 @@ def get_top_relevant_data(server, module_name):
for date in get_date_range(15):
redis_progression_name_set = 'top_'+ module_name +'_set_' + date
member_set = server.zrevrangebyscore(redis_progression_name_set, '+inf', '-inf', withscores=True)
if len(member_set) == 0: #No data for this date
member_set_str = []
# decode bytes
for domain, value in member_set:
m = domain.decode('utf8'), value
member_set_str.append(m)
if len(member_set_str) == 0: #No data for this date
days += 1
else:
member_set.insert(0, ("passed_days", days))
return member_set
member_set_str.insert(0, ("passed_days", days))
return member_set_str
def get_date_range(num_day):
@ -85,9 +92,17 @@ def providersChart():
date_range = get_date_range(num_day)
# Retreive all data from the last num_day
for date in date_range:
curr_value_size = r_serv_charts.hget(keyword_name+'_'+'size', date)
curr_value_size = ( r_serv_charts.hget(keyword_name+'_'+'size', date) )
if curr_value_size is not None:
curr_value_size = curr_value_size.decode('utf8')
curr_value_num = r_serv_charts.hget(keyword_name+'_'+'num', date)
curr_value_size_avg = r_serv_charts.hget(keyword_name+'_'+'avg', date)
if curr_value_size_avg is not None:
curr_value_size_avg = curr_value_size_avg.decode('utf8')
if module_name == "size":
curr_value = float(curr_value_size_avg if curr_value_size_avg is not None else 0)
else:
@ -103,10 +118,17 @@ def providersChart():
redis_provider_name_set = redis_provider_name_set + get_date_range(0)[0]
member_set = r_serv_charts.zrevrangebyscore(redis_provider_name_set, '+inf', '-inf', withscores=True, start=0, num=8)
# decode bytes
member_set_str = []
for domain, value in member_set:
m = domain.decode('utf8'), value
member_set_str.append(m)
# Member set is a list of (value, score) pairs
if len(member_set) == 0:
member_set.append(("No relevant data", float(100)))
return jsonify(member_set)
if len(member_set_str) == 0:
member_set_str.append(("No relevant data", float(100)))
return jsonify(member_set_str)
@trendingmodules.route("/moduletrending/")