diff --git a/bin/Helper.py b/bin/Helper.py
index 66d7766a..e7338ceb 100755
--- a/bin/Helper.py
+++ b/bin/Helper.py
@@ -108,6 +108,7 @@ class Process(object):
self.modules = ConfigParser.ConfigParser()
self.modules.read(modulesfile)
self.subscriber_name = conf_section
+
self.pubsub = None
if self.modules.has_section(conf_section):
self.pubsub = PubSub()
@@ -118,6 +119,15 @@ class Process(object):
port=self.config.get('RedisPubSub', 'port'),
db=self.config.get('RedisPubSub', 'db'))
+ self.moduleNum = 1
+ for i in range(1, 50):
+ curr_num = self.r_temp.get("MODULE_"+self.subscriber_name + "_" + str(i))
+ if curr_num is None:
+ self.moduleNum = i
+ break
+
+
+
def populate_set_in(self):
# monoproc
src = self.modules.get(self.subscriber_name, 'subscribe')
@@ -142,15 +152,18 @@ class Process(object):
else:
try:
- path = message.split(".")[-2].split("/")[-1]
+ if ".gz" in message:
+ path = message.split(".")[-2].split("/")[-1]
+ else:
+ path = "?"
value = str(timestamp) + ", " + path
- self.r_temp.set("MODULE_"+self.subscriber_name, value)
+ self.r_temp.set("MODULE_"+self.subscriber_name + "_" + str(self.moduleNum), value)
return message
except:
path = "?"
value = str(timestamp) + ", " + path
- self.r_temp.set("MODULE_"+self.subscriber_name, value)
+ self.r_temp.set("MODULE_"+self.subscriber_name + "_" + str(self.moduleNum), value)
return message
def populate_set_out(self, msg, channel=None):
diff --git a/bin/LAUNCH.sh b/bin/LAUNCH.sh
index 024b22e4..b50a75d6 100755
--- a/bin/LAUNCH.sh
+++ b/bin/LAUNCH.sh
@@ -112,6 +112,8 @@ function launching_scripts {
echo -e $GREEN"\t* Launching ZMQ scripts"$DEFAULT
+ screen -S "Script" -X screen -t "ModuleInformation" bash -c './ModuleInformation.py -k 0 -c 1; read x'
+ sleep 0.1
screen -S "Script" -X screen -t "Global" bash -c './Global.py; read x'
sleep 0.1
screen -S "Script" -X screen -t "Duplicates" bash -c './Duplicates.py; read x'
@@ -159,8 +161,6 @@ function launching_scripts {
screen -S "Script" -X screen -t "Browse_warning_paste" bash -c './Browse_warning_paste.py; read x'
sleep 0.1
screen -S "Script" -X screen -t "SentimentAnalysis" bash -c './SentimentAnalysis.py; read x'
- sleep 0.1
- screen -S "Script" -X screen -t "ModuleInformation" bash -c './ModuleInformation.py -k 0; read x'
}
diff --git a/bin/ModuleInformation.py b/bin/ModuleInformation.py
index 8fcd8c78..d783418f 100755
--- a/bin/ModuleInformation.py
+++ b/bin/ModuleInformation.py
@@ -1,6 +1,18 @@
#!/usr/bin/env python2
# -*-coding:UTF-8 -*
+'''
+
+This module can be use to see information of running modules.
+These information are logged in "logs/moduleInfo.log"
+
+It can also try to manage them by killing inactive one.
+However, it does not support mutliple occurence of the same module
+(It will kill the first one obtained by get)
+
+
+'''
+
import time
import datetime
import redis
@@ -29,6 +41,9 @@ def getPid(module):
else:
return None
+def clearRedisModuleInfo():
+ for k in server.keys("MODULE_*"):
+ server.delete(k)
def kill_module(module):
print ''
@@ -62,6 +77,7 @@ if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Show info concerning running modules and log suspected stucked modules. May be use to automatically kill and restart stucked one.')
parser.add_argument('-r', '--refresh', type=int, required=False, default=1, help='Refresh rate')
parser.add_argument('-k', '--autokill', type=int, required=True, default=1, help='Enable auto kill option (1 for TRUE, anything else for FALSE)')
+ parser.add_argument('-c', '--clear', type=int, required=False, default=1, help='Clear the current module information (Used to clear data from old launched modules)')
args = parser.parse_args()
@@ -80,76 +96,103 @@ if __name__ == "__main__":
port=cfg.getint("Redis_Queues", "port"),
db=cfg.getint("Redis_Queues", "db"))
- while True:
-
- num = 0
- printarray1 = []
- printarray2 = []
- for queue, card in server.hgetall("queues").iteritems():
- key = "MODULE_" + queue
- value = server.get(key)
- if value is not None:
- timestamp, path = value.split(", ")
- if timestamp is not None and path is not None:
- num += 1
- startTime_readable = datetime.datetime.fromtimestamp(int(timestamp))
- processed_time_readable = str((datetime.datetime.now() - startTime_readable)).split('.')[0]
-
- if int(card) > 0:
- if int((datetime.datetime.now() - startTime_readable).total_seconds()) > threshold_stucked_module:
- log = open(log_filename, 'a')
- log.write(json.dumps([queue, card, str(startTime_readable), str(processed_time_readable), path]) + "\n")
- if args.autokill == 1:
- kill_module(queue)
-
- printarray1.append([str(num), str(queue), str(card), str(startTime_readable), str(processed_time_readable), str(path)])
-
- else:
- printarray2.append([str(num), str(queue), str(card), str(startTime_readable), str(processed_time_readable), str(path)])
-
- printarray1.sort(lambda x,y: cmp(x[4], y[4]), reverse=True)
- printarray2.sort(lambda x,y: cmp(x[4], y[4]), reverse=True)
- printarray1.insert(0,["#", "Queue", "Amount", "Paste start time", "Processing time for current paste (H:M:S)", "Paste hash"])
- printarray2.insert(0,["#", "Queue", "Amount", "Paste start time", "Time since idle (H:M:S)", "Last paste hash"])
-
- os.system('clear')
- t1 = AsciiTable(printarray1, title="Working queues")
- t1.column_max_width(1)
- if not t1.ok:
- longest_col = t1.column_widths.index(max(t1.column_widths))
- max_length_col = t1.column_max_width(longest_col)
- if max_length_col > 0:
- for i, content in enumerate(t1.table_data):
- if len(content[longest_col]) > max_length_col:
- temp = ''
- for l in content[longest_col].splitlines():
- if len(l) > max_length_col:
- temp += '\n'.join(textwrap.wrap(l, max_length_col)) + '\n'
- else:
- temp += l + '\n'
- content[longest_col] = temp.strip()
- t1.table_data[i] = content
-
- t2 = AsciiTable(printarray2, title="Idling queues")
- t2.column_max_width(1)
- if not t2.ok:
- longest_col = t2.column_widths.index(max(t2.column_widths))
- max_length_col = t2.column_max_width(longest_col)
- if max_length_col > 0:
- for i, content in enumerate(t2.table_data):
- if len(content[longest_col]) > max_length_col:
- temp = ''
- for l in content[longest_col].splitlines():
- if len(l) > max_length_col:
- temp += '\n'.join(textwrap.wrap(l, max_length_col)) + '\n'
- else:
- temp += l + '\n'
- content[longest_col] = temp.strip()
- t2.table_data[i] = content
+ if args.clear == 1:
+ clearRedisModuleInfo()
- print t1.table
- print '\n'
- print t2.table
+ module_file_array = set()
+ with open('../doc/all_modules.txt', 'r') as module_file:
+ for line in module_file:
+ module_file_array.add(line[:-1])
- time.sleep(args.refresh)
+ while True:
+
+ all_queue = set()
+ curr_range = 50
+ printarray1 = []
+ printarray2 = []
+ printarray3 = []
+ for queue, card in server.hgetall("queues").iteritems():
+ all_queue.add(queue)
+ key = "MODULE_" + queue + "_"
+ for i in range(1, 50):
+ curr_num = server.get("MODULE_"+ queue + "_" + str(i))
+ if curr_num is None:
+ curr_range = i
+ break
+
+ for moduleNum in range(1, curr_range):
+ value = server.get(key + str(moduleNum))
+ if value is not None:
+ timestamp, path = value.split(", ")
+ if timestamp is not None and path is not None:
+ startTime_readable = datetime.datetime.fromtimestamp(int(timestamp))
+ processed_time_readable = str((datetime.datetime.now() - startTime_readable)).split('.')[0]
+
+ if int(card) > 0:
+ if int((datetime.datetime.now() - startTime_readable).total_seconds()) > threshold_stucked_module:
+ log = open(log_filename, 'a')
+ log.write(json.dumps([queue, card, str(startTime_readable), str(processed_time_readable), path]) + "\n")
+ if args.autokill == 1:
+ kill_module(queue)
+
+ printarray1.append([str(queue), str(moduleNum), str(card), str(startTime_readable), str(processed_time_readable), str(path)])
+
+ else:
+ printarray2.append([str(queue), str(moduleNum), str(card), str(startTime_readable), str(processed_time_readable), str(path)])
+
+ for curr_queue in module_file_array:
+ if curr_queue not in all_queue:
+ printarray3.append([curr_queue, "Not running"])
+
+ printarray1.sort(lambda x,y: cmp(x[4], y[4]), reverse=True)
+ printarray2.sort(lambda x,y: cmp(x[4], y[4]), reverse=True)
+ printarray1.insert(0,["Queue", "#", "Amount", "Paste start time", "Processing time for current paste (H:M:S)", "Paste hash"])
+ printarray2.insert(0,["Queue", "#","Amount", "Paste start time", "Time since idle (H:M:S)", "Last paste hash"])
+ printarray3.insert(0,["Queue", "State"])
+
+ os.system('clear')
+ t1 = AsciiTable(printarray1, title="Working queues")
+ t1.column_max_width(1)
+ if not t1.ok:
+ longest_col = t1.column_widths.index(max(t1.column_widths))
+ max_length_col = t1.column_max_width(longest_col)
+ if max_length_col > 0:
+ for i, content in enumerate(t1.table_data):
+ if len(content[longest_col]) > max_length_col:
+ temp = ''
+ for l in content[longest_col].splitlines():
+ if len(l) > max_length_col:
+ temp += '\n'.join(textwrap.wrap(l, max_length_col)) + '\n'
+ else:
+ temp += l + '\n'
+ content[longest_col] = temp.strip()
+ t1.table_data[i] = content
+
+ t2 = AsciiTable(printarray2, title="Idling queues")
+ t2.column_max_width(1)
+ if not t2.ok:
+ longest_col = t2.column_widths.index(max(t2.column_widths))
+ max_length_col = t2.column_max_width(longest_col)
+ if max_length_col > 0:
+ for i, content in enumerate(t2.table_data):
+ if len(content[longest_col]) > max_length_col:
+ temp = ''
+ for l in content[longest_col].splitlines():
+ if len(l) > max_length_col:
+ temp += '\n'.join(textwrap.wrap(l, max_length_col)) + '\n'
+ else:
+ temp += l + '\n'
+ content[longest_col] = temp.strip()
+ t2.table_data[i] = content
+
+ t3 = AsciiTable(printarray3, title="Not running queues")
+ t3.column_max_width(1)
+
+ print t1.table
+ print '\n'
+ print t2.table
+ print '\n'
+ print t3.table
+
+ time.sleep(args.refresh)
diff --git a/bin/WebStats.py b/bin/WebStats.py
index 20c1f489..d8ff0876 100755
--- a/bin/WebStats.py
+++ b/bin/WebStats.py
@@ -66,25 +66,7 @@ def compute_progression(server, field_name, num_day, url_parsed):
# filter
if (keyword_total_sum > threshold_total_sum) and (keyword_increase > threshold_increase):
- if server.sismember(redis_progression_name_set, keyword): #if keyword is in the set
- server.hset(redis_progression_name, keyword, keyword_increase) #update its value
-
- elif (server.scard(redis_progression_name_set) < max_set_cardinality):
- server.sadd(redis_progression_name_set, keyword)
-
- else: #not in the set
- #Check value for all members
- member_set = []
- for keyw in server.smembers(redis_progression_name_set):
- member_set.append((keyw, int(server.hget(redis_progression_name, keyw))))
- member_set.sort(key=lambda tup: tup[1])
- if member_set[0][1] < keyword_increase:
- print 'removing', member_set[0][0] + '('+str(member_set[0][1])+')', 'and adding', keyword, str(keyword_increase)
- #remove min from set and add the new one
- server.srem(redis_progression_name_set, member_set[0][0])
- server.sadd(redis_progression_name_set, keyword)
- server.hdel(redis_progression_name, member_set[0][0])
- server.hset(redis_progression_name, keyword, keyword_increase)
+ server.zadd("z_top_progression_"+field_name, float(keyword_increase), keyword)
if __name__ == '__main__':
diff --git a/bin/launch_scripts.sh b/bin/launch_scripts.sh
index 1cdde370..694f0138 100755
--- a/bin/launch_scripts.sh
+++ b/bin/launch_scripts.sh
@@ -8,6 +8,8 @@ sleep 0.1
echo -e $GREEN"\t* Launching ZMQ scripts"$DEFAULT
+ screen -S "Script" -X screen -t "ModuleInformation" bash -c './ModuleInformation.py -k 0 -c 1; read x'
+ sleep 0.1
screen -S "Script" -X screen -t "Global" bash -c './Global.py; read x'
sleep 0.1
screen -S "Script" -X screen -t "Duplicates" bash -c './Duplicates.py; read x'
@@ -55,5 +57,3 @@ echo -e $GREEN"\t* Launching ZMQ scripts"$DEFAULT
screen -S "Script" -X screen -t "Browse_warning_paste" bash -c './Browse_warning_paste.py; read x'
sleep 0.1
screen -S "Script" -X screen -t "SentimentAnalysis" bash -c './SentimentAnalysis.py; read x'
- sleep 0.1
- screen -S "Script" -X screen -t "ModuleInformation" bash -c './ModuleInformation.py; read x'
diff --git a/doc/generate_graph_data.py b/doc/generate_graph_data.py
index cff1f538..c1b4b98e 100755
--- a/doc/generate_graph_data.py
+++ b/doc/generate_graph_data.py
@@ -32,6 +32,9 @@ with open('../bin/packages/modules.cfg', 'r') as f:
continue
output_set_graph = set()
+ with open('all_modules.txt', 'w') as f2:
+ for e in all_modules:
+ f2.write(e+"\n")
for module in modules.keys():
for stream_in in modules[module]['sub']:
diff --git a/var/www/Flask_server.py b/var/www/Flask_server.py
index 8b6e05e3..3c4346f1 100755
--- a/var/www/Flask_server.py
+++ b/var/www/Flask_server.py
@@ -83,18 +83,27 @@ def get_queues(r):
# We may want to put the llen in a pipeline to do only one query.
data = [(queue, int(card)) for queue, card in r.hgetall("queues").iteritems()]
newData = []
+
+ curr_range = 50
for queue, card in data:
- key = "MODULE_" + queue
- value = r.get(key)
- if value is not None:
- timestamp, path = value.split(", ")
- if timestamp is not None:
- startTime_readable = datetime.datetime.fromtimestamp(int(timestamp))
- processed_time_readable = str((datetime.datetime.now() - startTime_readable)).split('.')[0]
- seconds = int((datetime.datetime.now() - startTime_readable).total_seconds())
- newData.append( (queue, card, seconds) )
- else:
- newData.append( (queue, cards, 0) )
+ key = "MODULE_" + queue + "_"
+ for i in range(1, 50):
+ curr_num = r.get("MODULE_"+ queue + "_" + str(i))
+ if curr_num is None:
+ curr_range = i
+ break
+
+ for moduleNum in range(1, curr_range):
+ value = r.get(key + str(moduleNum))
+ if value is not None:
+ timestamp, path = value.split(", ")
+ if timestamp is not None:
+ startTime_readable = datetime.datetime.fromtimestamp(int(timestamp))
+ processed_time_readable = str((datetime.datetime.now() - startTime_readable)).split('.')[0]
+ seconds = int((datetime.datetime.now() - startTime_readable).total_seconds())
+ newData.append( (queue, card, seconds, moduleNum) )
+ else:
+ newData.append( (queue, cards, 0, moduleNum) )
return newData
@@ -103,40 +112,6 @@ def list_len(s):
return len(s)
app.jinja_env.filters['list_len'] = list_len
-def parseStringToList(the_string):
- strList = ""
- elemList = []
- for c in the_string:
- if c != ']':
- if c != '[' and c !=' ' and c != '"':
- strList += c
- else:
- the_list = strList.split(',')
- if len(the_list) == 3:
- elemList = elemList + the_list
- elif len(the_list) == 2:
- elemList.append(the_list)
- elif len(the_list) > 1:
- elemList.append(the_list[1:])
- strList = ""
- return elemList
-
-def parseStringToList2(the_string):
- if the_string == []:
- return []
- else:
- res = []
- tab_str = the_string.split('], [')
- tab_str[0] = tab_str[0][1:]+']'
- tab_str[len(tab_str)-1] = '['+tab_str[len(tab_str)-1][:-1]
- res.append(parseStringToList(tab_str[0]))
- for i in range(1, len(tab_str)-2):
- tab_str[i] = '['+tab_str[i]+']'
- res.append(parseStringToList(tab_str[i]))
- if len(tab_str) > 1:
- res.append(parseStringToList(tab_str[len(tab_str)-1]))
- return res
-
def showpaste(content_range):
requested_path = request.args.get('paste', '')
@@ -150,7 +125,7 @@ def showpaste(content_range):
p_mime = paste.p_mime
p_lineinfo = paste.get_lines_info()
p_content = paste.get_p_content().decode('utf-8', 'ignore')
- p_duplicate_full_list = parseStringToList2(paste._get_p_duplicate())
+ p_duplicate_full_list = json.loads(paste._get_p_duplicate())
p_duplicate_list = []
p_simil_list = []
p_hashtype_list = []
@@ -174,7 +149,7 @@ def showpaste(content_range):
hash_types = []
comp_vals = []
for i in indices:
- hash_types.append(p_duplicate_full_list[i][0])
+ hash_types.append(p_duplicate_full_list[i][0].encode('utf8'))
comp_vals.append(p_duplicate_full_list[i][2])
dup_list_removed.append(i)
@@ -281,19 +256,9 @@ def progressionCharts():
return jsonify(bar_values)
else:
- redis_progression_name = 'top_progression_'+trending_name
- redis_progression_name_set = 'top_progression_'+trending_name+'_set'
-
- # Iterate over element in top_x_set and retreive their value
- member_set = []
- for keyw in r_serv_charts.smembers(redis_progression_name_set):
- keyw_value = r_serv_charts.hget(redis_progression_name, keyw)
- keyw_value = keyw_value if keyw_value is not None else 0
- member_set.append((keyw, int(keyw_value)))
- member_set.sort(key=lambda tup: tup[1], reverse=True)
- if len(member_set) == 0:
- member_set.append(("No relevant data", int(100)))
- return jsonify(member_set)
+ redis_progression_name = "z_top_progression_" + trending_name
+ keyw_value = r_serv_charts.zrevrangebyscore(redis_progression_name, '+inf', '-inf', withscores=True, start=0, num=10)
+ return jsonify(keyw_value)
@app.route("/_moduleCharts", methods=['GET'])
def modulesCharts():
@@ -472,7 +437,7 @@ def sentiment_analysis_trending():
return render_template("sentiment_analysis_trending.html")
-@app.route("/sentiment_analysis_getplotdata/")
+@app.route("/sentiment_analysis_getplotdata/", methods=['GET'])
def sentiment_analysis_getplotdata():
# Get the top providers based on number of pastes
oneHour = 60*60
@@ -481,19 +446,27 @@ def sentiment_analysis_getplotdata():
dateStart = dateStart.replace(minute=0, second=0, microsecond=0)
dateStart_timestamp = calendar.timegm(dateStart.timetuple())
- to_return = {}
- range_providers = r_serv_charts.zrevrangebyscore('providers_set_'+ get_date_range(0)[0], '+inf', '-inf', start=0, num=8)
- # if empty, get yesterday top providers
- print 'providers_set_'+ get_date_range(1)[1]
- range_providers = r_serv_charts.zrevrangebyscore('providers_set_'+ get_date_range(1)[1], '+inf', '-inf', start=0, num=8) if range_providers == [] else range_providers
- # if still empty, takes from all providers
- if range_providers == []:
- print 'today provider empty'
- range_providers = r_serv_charts.smembers('all_provider_set')
+ getAllProviders = request.args.get('getProviders')
+ provider = request.args.get('provider')
+ allProvider = request.args.get('all')
+ if getAllProviders == 'True':
+ if allProvider == "True":
+ range_providers = r_serv_charts.smembers('all_provider_set')
+ return jsonify(list(range_providers))
+ else:
+ range_providers = r_serv_charts.zrevrangebyscore('providers_set_'+ get_date_range(0)[0], '+inf', '-inf', start=0, num=8)
+ # if empty, get yesterday top providers
+ range_providers = r_serv_charts.zrevrangebyscore('providers_set_'+ get_date_range(1)[1], '+inf', '-inf', start=0, num=8) if range_providers == [] else range_providers
+ # if still empty, takes from all providers
+ if range_providers == []:
+ print 'today provider empty'
+ range_providers = r_serv_charts.smembers('all_provider_set')
+ return jsonify(range_providers)
- for cur_provider in range_providers:
- print cur_provider
- cur_provider_name = cur_provider + '_'
+ elif provider is not None:
+ to_return = {}
+
+ cur_provider_name = provider + '_'
list_date = {}
for cur_timestamp in range(int(dateStart_timestamp), int(dateStart_timestamp)-sevenDays-oneHour, -oneHour):
cur_set_name = cur_provider_name + str(cur_timestamp)
@@ -503,9 +476,10 @@ def sentiment_analysis_getplotdata():
cur_value = r_serv_sentiment.get(cur_id)
list_value.append(cur_value)
list_date[cur_timestamp] = list_value
- to_return[cur_provider] = list_date
+ to_return[provider] = list_date
- return jsonify(to_return)
+ return jsonify(to_return)
+ return "Bad request"
diff --git a/var/www/static/js/indexjavascript.js b/var/www/static/js/indexjavascript.js
index 359a1e56..1bdb29eb 100644
--- a/var/www/static/js/indexjavascript.js
+++ b/var/www/static/js/indexjavascript.js
@@ -223,9 +223,16 @@ function create_queue_table() {
var tr = document.createElement('TR')
for(j = 0; j < 2; j++){
var td = document.createElement('TD')
- td.appendChild(document.createTextNode(glob_tabvar.row1[i][j]));
+ var moduleNum = j == 0 ? "." + glob_tabvar.row1[i][3] : "";
+ td.appendChild(document.createTextNode(glob_tabvar.row1[i][j] + moduleNum));
tr.appendChild(td)
}
+ // Used to decide the color of the row
+ // We have glob_tabvar.row1[][j] with:
+ // - j=0: ModuleName
+ // - j=1: queueLength
+ // - j=2: LastProcessedPasteTime
+ // - j=3: Number of the module belonging in the same category
if (parseInt(glob_tabvar.row1[i][2]) > 60*2 && parseInt(glob_tabvar.row1[i][1]) > 2)
tr.className += " danger";
else if (parseInt(glob_tabvar.row1[i][2]) > 60*1)
diff --git a/var/www/static/js/sentiment_trending.js b/var/www/static/js/sentiment_trending.js
index a56723b8..d96c6fbb 100644
--- a/var/www/static/js/sentiment_trending.js
+++ b/var/www/static/js/sentiment_trending.js
@@ -1,4 +1,42 @@
+/* Functions and config */
+function add_new_graph_today(id) {
+ return "
" +
+ "
" +
+ "Graph "+id+"" +
+ "Avg" +
+ "
" +
+ "
" +
+ "
" +
+ "" +
+ "" +
+ " | " +
+ " | " +
+ "
" +
+ "" +
+ "
" +
+ "
" +
+ "
";
+};
+function add_new_graph_week(id) {
+ return "" +
+ "
" +
+ "Graph "+id+"" +
+ "Avg" +
+ "
" +
+ "
" +
+ "
" +
+ "" +
+ "" +
+ " | " +
+ " | " +
+ "
" +
+ "" +
+ "
" +
+ "
" +
+ "
";
+}
+
function generate_offset_to_time(num){
var to_ret = {};
for(i=0; i<=num; i++) {
@@ -15,7 +53,7 @@
var to_ret = {};
for(i=day; i>=0; i--){
for(j=0; j<24; j++){
- var t1 =now.getDate()-i + ":";
+ var t1 =now.getDate()-i + ":";
var t2 =now.getHours()-(23-j);
t2 = t2 < 0 ? 24+t2 : t2;
t2 += "h";
@@ -40,34 +78,79 @@
barColor: '#00bf5f',
negBarColor: '#f22929',
zeroColor: '#ffff00',
-
+
tooltipFormat: '● {{offset:names}}, {{value}} ',
};
-$.getJSON("/sentiment_analysis_getplotdata/",
- function(data) {
- var all_data = [];
- var plot_data = [];
- var graph_avg = [];
- var array_provider = Object.keys(data);
- var dates_providers = Object.keys(data[array_provider[0]]);
- var dateStart = parseInt(dates_providers[0]);
- var oneHour = 60*60;
- var oneWeek = oneHour*24*7;
+/* Plot and queries */
- var all_graph_day_sum = 0.0;
- var all_graph_hour_sum = 0.0;
- var all_graph_hour_maxVal = 0.0;
- var all_day_avg = 0.0;
- var all_day_avg_maxVal = 0.0;
+var all_graph_day_sum = 0.0;
+var all_graph_hour_sum = 0.0;
+var all_graph_hour_sum_minus = 0.0;
+var all_graph_hour_maxVal = 0.0;
+var all_day_avg = 0.0;
+var all_day_avg_maxVal = 0.0;
+var graph_avg = [];
+var all_data = [];
+var provider_already_loaded = [];
+var totNumGraph = 0;
+
+// Query all providers name then launch the query and plot process for each of them.
+// When everything is terminated, plot the widgets (Gauge, canvasJS, table)
+// input: all - set to 'True' if you take all providers
+function draw_page(all) {
+ $.getJSON("/sentiment_analysis_getplotdata/?getProviders=True&all="+all,
+ function(data) {
+ var promises = [];
+
+ var the_length = provider_already_loaded.length == 0 ? 0 : provider_already_loaded.length;
+ for(i=0; i max_value ? Math.abs(pos-neg) : max_value;
if(curr_date >= dateStart+oneWeek-23*oneHour){
- max_value_day = Math.abs(pos-neg) > max_value_day ? Math.abs(pos-neg) : max_value_day;
- day_sum += (pos-neg);
+ max_value_day = Math.abs(pos-neg) > max_value_day ? Math.abs(pos-neg) : max_value_day; day_sum += (pos-neg);
day_sum_elem++;
}
if(curr_date > dateStart+oneWeek-2*oneHour && curr_date <=dateStart+oneWeek-oneHour){
@@ -121,17 +202,16 @@ $.getJSON("/sentiment_analysis_getplotdata/",
}
all_graph_day_sum += day_sum;
all_graph_hour_sum += hour_sum;
+ all_graph_hour_sum_minus += hour_sum > 0 ? 0 : 1;
all_graph_hour_maxVal = Math.abs(hour_sum) > all_graph_hour_maxVal ? Math.abs(hour_sum) : all_graph_hour_maxVal;
- var curr_avg = curr_sum / (curr_sum_elem);
+ var curr_avg = curr_sum / (curr_sum_elem);
if(isNaN(curr_avg))
curr_avg = 0.0
- //var curr_avg = curr_sum / (oneWeek/oneHour);
- //var curr_avg = curr_sum / (spark_data.length);
graph_avg.push([curr_provider, curr_avg]);
plot_data.push(spark_data);
all_data.push(graph_data);
-
+
sparklineOptions.chartRangeMax = max_value;
sparklineOptions.chartRangeMin = -max_value;
@@ -141,11 +221,11 @@ $.getJSON("/sentiment_analysis_getplotdata/",
var num = graphNum + 1;
var placeholder = '.sparkLineStatsWeek' + num;
sparklineOptions.barWidth = 2;
- $(placeholder).sparkline(plot_data[graphNum], sparklineOptions);
+ $(placeholder).sparkline(plot_data[0], sparklineOptions);
$(placeholder+'t').text(curr_provider);
- var curr_avg_text = isNaN(curr_avg) ? "No data" : curr_avg.toFixed(5);
+ var curr_avg_text = isNaN(curr_avg) ? "No data" : curr_avg.toFixed(5);
$(placeholder+'s').text(curr_avg_text);
-
+
sparklineOptions.barWidth = 18;
sparklineOptions.tooltipFormat = '● Avg: {{value}} '
$(placeholder+'b').sparkline([curr_avg], sparklineOptions);
@@ -169,8 +249,8 @@ $.getJSON("/sentiment_analysis_getplotdata/",
// print today
- var data_length = plot_data[graphNum].length;
- var data_today = plot_data[graphNum].slice(data_length-24, data_length);
+ var data_length = plot_data[0].length;
+ var data_today = plot_data[0].slice(data_length-24, data_length);
placeholder = '.sparkLineStatsToday' + num;
sparklineOptions.barWidth = 14;
@@ -199,155 +279,124 @@ $.getJSON("/sentiment_analysis_getplotdata/",
$(avgName).addClass("panel-warning")
}
- }//for loop
-
-
- /* ---------------- Gauge ---------------- */
- var gaugeOptions = {
- animateEasing: true,
-
- elementWidth: 200,
- elementHeight: 125,
-
- arcFillStart: 10,
- arcFillEnd: 12,
- arcFillTotal: 20,
- incTot: 1.0,
-
- arcBgColorLight: 200,
- arcBgColorSat: 0,
- arcStrokeFg: 20,
- arcStrokeBg: 30,
-
- colorArcFg: '#FF3300',
- animateSpeed: 1,
-
- };
- // Clone object
- var gaugeOptions2 = jQuery.extend(true, {}, gaugeOptions);
- var gaugeOptions3 = jQuery.extend(true, {}, gaugeOptions);
-
-
-
- gaugeOptions.appendTo = '#gauge_today_last_hour';
- gaugeOptions.dialLabel = 'Last hour';
- gaugeOptions.elementId = 'gauge1';
- var piePercent = (all_graph_hour_sum / 8) / all_graph_hour_maxVal;
- gaugeOptions.inc = piePercent;
- var gauge_today_last_hour = new FlexGauge(gaugeOptions);
-
- gaugeOptions2.appendTo = '#gauge_today_last_days';
- gaugeOptions2.dialLabel = 'Today';
- gaugeOptions2.elementId = 'gauge2';
- //piePercent = (all_graph_day_sum / (8*24)) / max_value;
- piePercent = (all_day_avg / 8) / all_day_avg_maxVal;
- gaugeOptions2.inc = piePercent;
- var gauge_today_last_days = new FlexGauge(gaugeOptions2);
-
- gaugeOptions3.appendTo = '#gauge_week';
- gaugeOptions3.dialLabel = 'Week';
- gaugeOptions3.elementId = 'gauge3';
-
- var graph_avg_sum = 0.0;
- var temp_max_val = 0.0;
- for (i=0; i temp_max_val ? Math.abs(graph_avg[i][1]) : temp_max_val;
}
-
- piePercent = (graph_avg_sum / graph_avg.length) / temp_max_val;
- gaugeOptions3.inc = piePercent;
- var gauge_today_last_days = new FlexGauge(gaugeOptions3);
+ );
+ return query_plot
+}
- /* --------- Sort providers -------- */
- graph_avg.sort(function(a, b){return b[1]-a[1]});
+function draw_widgets() {
- for (i=1; i<6; i++){
- $('.worst'+i).text(graph_avg[7-(i-1)][0]);
- $('.best'+i).text(graph_avg[i-1][0]);
- }
+ /* ---------------- Gauge ---------------- */
+ var gaugeOptions = {
+ animateEasing: true,
- /* ----------- CanvasJS ------------ */
+ elementWidth: 200,
+ elementHeight: 125,
- var comp_sum_day_pos = 0.0;
- var comp_sum_day_neg = 0.0;
- var comp_sum_hour_pos = 0.0;
- var comp_sum_hour_neg = 0.0;
- for(graphNum=0; graphNum<8; graphNum++){
- curr_graphData = all_data[graphNum];
- var gauge_data = curr_graphData.slice(curr_graphData.length-24, curr_graphData.length);
- for (i=1; i< gauge_data.length; i++){
- comp_sum_day_pos += gauge_data[i].compoundPos;
- comp_sum_day_neg += gauge_data[i].compoundNeg;
+ arcFillStart: 10,
+ arcFillEnd: 12,
+ arcFillTotal: 20,
+ incTot: 1.0,
- if(i == 23){
- comp_sum_hour_pos += gauge_data[i].compoundPos;
- comp_sum_hour_neg += gauge_data[i].compoundNeg;
- }
+ arcBgColorLight: 200,
+ arcBgColorSat: 0,
+ arcStrokeFg: 20,
+ arcStrokeBg: 30,
+
+ colorArcFg: '#FF3300',
+ animateSpeed: 1,
+
+ };
+ // Clone object
+ var gaugeOptions2 = jQuery.extend(true, {}, gaugeOptions);
+ var gaugeOptions3 = jQuery.extend(true, {}, gaugeOptions);
+
+
+
+ gaugeOptions.appendTo = '#gauge_today_last_hour';
+ gaugeOptions.dialLabel = 'Last hour';
+ gaugeOptions.elementId = 'gauge1';
+ var piePercent = (all_graph_hour_sum / (totNumGraph - all_graph_hour_sum_minus)) / all_graph_hour_maxVal;
+ gaugeOptions.inc = piePercent;
+ var gauge_today_last_hour = new FlexGauge(gaugeOptions);
+
+ gaugeOptions2.appendTo = '#gauge_today_last_days';
+ gaugeOptions2.dialLabel = 'Today';
+ gaugeOptions2.elementId = 'gauge2';
+ piePercent = (all_day_avg / totNumGraph) / all_day_avg_maxVal;
+ gaugeOptions2.inc = piePercent;
+ var gauge_today_last_days = new FlexGauge(gaugeOptions2);
+
+ gaugeOptions3.appendTo = '#gauge_week';
+ gaugeOptions3.dialLabel = 'Week';
+ gaugeOptions3.elementId = 'gauge3';
+
+ var graph_avg_sum = 0.0;
+ var temp_max_val = 0.0;
+ for (i=0; i temp_max_val ? Math.abs(graph_avg[i][1]) : temp_max_val;
+ }
+
+ piePercent = (graph_avg_sum / graph_avg.length) / temp_max_val;
+ gaugeOptions3.inc = piePercent;
+ var gauge_today_last_days = new FlexGauge(gaugeOptions3);
+
+
+ /* --------- Sort providers -------- */
+
+ graph_avg.sort(function(a, b){return b[1]-a[1]});
+
+ for (i=1; i<6; i++){
+ $('.worst'+i).text(graph_avg[7-(i-1)][0]);
+ $('.best'+i).text(graph_avg[i-1][0]);
+ }
+
+ /* ----------- CanvasJS ------------ */
+
+ var comp_sum_day_pos = 0.0;
+ var comp_sum_day_neg = 0.0;
+ var comp_sum_hour_pos = 0.0;
+ var comp_sum_hour_neg = 0.0;
+ for(graphNum=0; graphNumPositive: {y}",
- type: "bar",
- color: "green",
- dataPoints: [
- {y: comp_sum_hour_pos/8}
- ]
- },
- {
- toolTipContent: "Negative: {y}",
- type: "bar",
- color: "red",
- dataPoints: [
- {y: comp_sum_hour_neg/8}
- ]
- }
- ]
- };
-
- var chart_canvas1 = new CanvasJS.Chart("bar_today_last_hour", options_canvasJS_1);
+ }
- var options_canvasJS_2 = {
-
- animationEnabled: true,
- axisY: {
- tickThickness: 0,
- lineThickness: 0,
- valueFormatString: " ",
- gridThickness: 0
- },
- axisX: {
- tickThickness: 0,
- lineThickness: 0,
- labelFontSize: 0.1,
- },
- data: [
+ var options_canvasJS_1 = {
+
+ animationEnabled: true,
+ axisY: {
+ tickThickness: 0,
+ lineThickness: 0,
+ valueFormatString: " ",
+ gridThickness: 0
+ },
+ axisX: {
+ tickThickness: 0,
+ lineThickness: 0,
+ labelFontSize: 0.1,
+ },
+ data: [
{
toolTipContent: "Positive: {y}",
type: "bar",
color: "green",
dataPoints: [
- {y: comp_sum_day_pos/8}
+ {y: comp_sum_hour_pos/totNumGraph}
]
},
{
@@ -355,32 +404,51 @@ $.getJSON("/sentiment_analysis_getplotdata/",
type: "bar",
color: "red",
dataPoints: [
- {y: comp_sum_day_neg/8}
+ {y: comp_sum_hour_neg/totNumGraph}
]
}
+ ]
+ };
+
+ var chart_canvas1 = new CanvasJS.Chart("bar_today_last_hour", options_canvasJS_1);
+
+ var options_canvasJS_2 = {
+
+ animationEnabled: true,
+ axisY: {
+ tickThickness: 0,
+ lineThickness: 0,
+ valueFormatString: " ",
+ gridThickness: 0
+ },
+ axisX: {
+ tickThickness: 0,
+ lineThickness: 0,
+ labelFontSize: 0.1,
+ },
+ data: [
+ {
+ toolTipContent: "Positive: {y}",
+ type: "bar",
+ color: "green",
+ dataPoints: [
+ {y: comp_sum_day_pos/totNumGraph}
]
- };
-
- var chart_canvas2 = new CanvasJS.Chart("bar_today_last_days", options_canvasJS_2);
-
- chart_canvas1.render();
- chart_canvas2.render();
-
-
-
- }
-);
-
-
-
-
-
-
-
-
-
-
-
+ },
+ {
+ toolTipContent: "Negative: {y}",
+ type: "bar",
+ color: "red",
+ dataPoints: [
+ {y: comp_sum_day_neg/totNumGraph}
+ ]
+ }
+ ]
+ };
+ var chart_canvas2 = new CanvasJS.Chart("bar_today_last_days", options_canvasJS_2);
+ chart_canvas1.render();
+ chart_canvas2.render();
+}
diff --git a/var/www/templates/index.html b/var/www/templates/index.html
index dc360008..28c3aff9 100644
--- a/var/www/templates/index.html
+++ b/var/www/templates/index.html
@@ -154,7 +154,6 @@
-