Compare commits
6 Commits
e94fd665ad
...
e254667256
| Author | SHA1 | Date |
|---|---|---|
|
|
e254667256 | |
|
|
b21d0bf8ba | |
|
|
f0a6a1c8aa | |
|
|
75f41fb9f5 | |
|
|
33d63b120d | |
|
|
b40efa4bbb |
|
|
@ -52,7 +52,7 @@ __MAPPING__ = {
|
||||||
StoreRender
|
StoreRender
|
||||||
],
|
],
|
||||||
SimulationOrderAnalyzer: [
|
SimulationOrderAnalyzer: [
|
||||||
JSONRender,
|
#JSONRender,
|
||||||
# SimulationOrderRender,
|
# SimulationOrderRender,
|
||||||
SimulationGroupRender
|
SimulationGroupRender
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,7 @@ class ResultStore:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
result = []
|
result = []
|
||||||
for key in self.store:
|
for key in sorted(self.store):
|
||||||
result += self.store[key]
|
result += self.store[key]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -204,7 +204,7 @@ class ActivityMapper(Analyzer):
|
||||||
board_data = get_board_data(self.settings.source, self.instance_config_id, entry["sequence_id"],
|
board_data = get_board_data(self.settings.source, self.instance_config_id, entry["sequence_id"],
|
||||||
entry["board_id"])
|
entry["board_id"])
|
||||||
entry["extra_data"] = board_data
|
entry["extra_data"] = board_data
|
||||||
entry["extra_data"]["activity_type"] = self.classify_entry(entry)
|
entry["extra_data"]["activity_type"] = self.last_board_type
|
||||||
entry['coordinate'] = self.new_coordinate()
|
entry['coordinate'] = self.new_coordinate()
|
||||||
self.timeline.append(entry)
|
self.timeline.append(entry)
|
||||||
return False
|
return False
|
||||||
|
|
@ -293,8 +293,8 @@ class InstanceConfig(Analyzer):
|
||||||
print(entry)
|
print(entry)
|
||||||
self.store["instance_id"] = json_path(entry, self.settings.custom["instance_config_id"])
|
self.store["instance_id"] = json_path(entry, self.settings.custom["instance_config_id"])
|
||||||
|
|
||||||
def result(self, store: ResultStore):
|
def result(self, store: ResultStore, name=None):
|
||||||
store.add(Result(type(self), dict(self.store)))
|
store.add(Result(type(self), dict(self.store), name=name))
|
||||||
|
|
||||||
|
|
||||||
class SimulationOrderAnalyzer(Analyzer):
|
class SimulationOrderAnalyzer(Analyzer):
|
||||||
|
|
@ -305,8 +305,8 @@ class SimulationOrderAnalyzer(Analyzer):
|
||||||
self.store = defaultdict(lambda: -1) # TODO verify
|
self.store = defaultdict(lambda: -1) # TODO verify
|
||||||
self.order = []
|
self.order = []
|
||||||
|
|
||||||
def result(self, store: ResultStore) -> None:
|
def result(self, store: ResultStore, name=None) -> None:
|
||||||
store.add(Result(type(self), [self.store[sim] for sim in self.order]))
|
store.add(Result(type(self), [self.store[sim] for sim in self.order], name=name))
|
||||||
|
|
||||||
def process(self, entry: dict) -> bool:
|
def process(self, entry: dict) -> bool:
|
||||||
entry_type = entry[self.settings.type_field]
|
entry_type = entry[self.settings.type_field]
|
||||||
|
|
|
||||||
|
|
@ -137,3 +137,72 @@ class ProgressAnalyzer(Analyzer):
|
||||||
if entry[self.settings.type_field] in self.settings.boards:
|
if entry[self.settings.type_field] in self.settings.boards:
|
||||||
self.board[entry["timestamp"]] = entry
|
self.board[entry["timestamp"]] = entry
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class MetaDataAnalyzer(Analyzer):
|
||||||
|
"""collect metadata"""
|
||||||
|
__name__ = "MetaDataAnalyzer"
|
||||||
|
|
||||||
|
def result(self, store: ResultStore, name=None) -> None:
|
||||||
|
store.add(Result(type(self), dict(self.store)))
|
||||||
|
|
||||||
|
def process(self, entry: dict) -> bool:
|
||||||
|
if not "metadata" in self.settings.custom:
|
||||||
|
return False
|
||||||
|
for mdata in self.settings.custom["metadata"]:
|
||||||
|
key = self.settings.custom["metadata"]
|
||||||
|
if key in entry:
|
||||||
|
self.store[mdata] = json_path(entry, key)
|
||||||
|
|
||||||
|
def __init__(self, settings: LogSettings) -> None:
|
||||||
|
super().__init__(settings)
|
||||||
|
self.store = {}
|
||||||
|
|
||||||
|
|
||||||
|
def write_logentry_count_csv(LogEntryCountCSV, store, render, analyzers):
|
||||||
|
global cat, data, lines, csvfile
|
||||||
|
LogEntryCountCSV.summary = None
|
||||||
|
for cat in store.get_categories():
|
||||||
|
data = store.get_category(cat)
|
||||||
|
render(analyzers.LogEntryCountAnalyzer, data, name=cat)
|
||||||
|
if LogEntryCountCSV.summary:
|
||||||
|
headers = []
|
||||||
|
lines = []
|
||||||
|
for name in LogEntryCountCSV.summary:
|
||||||
|
data = LogEntryCountCSV.summary[name]
|
||||||
|
for head in data:
|
||||||
|
if not head in headers:
|
||||||
|
headers.append(head)
|
||||||
|
line = [name]
|
||||||
|
for head in headers:
|
||||||
|
line.append(data[head]) if head in data else line.append(0)
|
||||||
|
lines.append(line)
|
||||||
|
import csv
|
||||||
|
|
||||||
|
with open('logentrycount.csv', 'w', newline='') as csvfile:
|
||||||
|
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONE)
|
||||||
|
writer.writerow(["name"] + [h.split(".")[-1] for h in headers])
|
||||||
|
for line in lines:
|
||||||
|
writer.writerow(line)
|
||||||
|
|
||||||
|
|
||||||
|
def write_simulation_flag_csv(store):
|
||||||
|
global csvfile, result, i
|
||||||
|
from datetime import datetime
|
||||||
|
import json
|
||||||
|
json.dump(store.serializable(), open("simus.json", "w"), indent=2)
|
||||||
|
with open("simus.csv", "w") as csvfile:
|
||||||
|
csvfile.write("instanceconfig,log,simu,answered,universe_state,selected_actions,timestamp,time\n")
|
||||||
|
for key in store.get_store():
|
||||||
|
csvfile.write("{}\n".format(key))
|
||||||
|
for result in store.store[key]:
|
||||||
|
csvfile.write(",{}\n".format(result.name))
|
||||||
|
for i in result.get():
|
||||||
|
csvfile.write(",,{},{},{},{},{},{}\n".format(
|
||||||
|
i['answers']['@id'],
|
||||||
|
i['answers']['answered'],
|
||||||
|
len(i['answers']['universe_state']) if i['answers']['universe_state'] else 0,
|
||||||
|
len(i['selected_actions']) if i['selected_actions'] else 0,
|
||||||
|
i['timestamp'],
|
||||||
|
str(datetime.fromtimestamp(i['timestamp'] / 1000))
|
||||||
|
))
|
||||||
|
|
@ -186,7 +186,13 @@ class SimulationOrderRender(Render):
|
||||||
|
|
||||||
class SimulationGroupRender(Render):
|
class SimulationGroupRender(Render):
|
||||||
def render(self, results: List[Result], name=None):
|
def render(self, results: List[Result], name=None):
|
||||||
data = [r.get() for r in self.filter(results)]
|
#data = [r.get() for r in self.filter(results)]
|
||||||
|
data = []
|
||||||
|
for r in self.filter(results):
|
||||||
|
raw = r.get()
|
||||||
|
if len(raw) < 6:
|
||||||
|
raw = [0] + raw
|
||||||
|
data.append(raw)
|
||||||
print(name, len(data))
|
print(name, len(data))
|
||||||
# graph_fit(list(data), name=name)
|
# graph_fit(list(data), name=name)
|
||||||
graph_plot(list(data), ylabel="simulation retries", title="sequential simulation retries", rotation=None,
|
graph_plot(list(data), ylabel="simulation retries", title="sequential simulation retries", rotation=None,
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,393 @@
|
||||||
|
import json
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
import analyzers
|
||||||
|
from util.geo import calc_distance
|
||||||
|
|
||||||
|
|
||||||
|
def time_distribution(store):
|
||||||
|
# json.dump(store.serializable(), open("new.json", "w"), indent=1)
|
||||||
|
|
||||||
|
keys = [
|
||||||
|
"simu",
|
||||||
|
"question",
|
||||||
|
"image",
|
||||||
|
"audio",
|
||||||
|
"video",
|
||||||
|
"other",
|
||||||
|
"map"
|
||||||
|
]
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
# results = []
|
||||||
|
|
||||||
|
places = defaultdict(list)
|
||||||
|
|
||||||
|
for log in store.get_all():
|
||||||
|
result = defaultdict(lambda: 0)
|
||||||
|
for i in log.get()['track']:
|
||||||
|
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
|
||||||
|
result[i['properties']['activity_type']] += duration
|
||||||
|
print(json.dumps(result, indent=4))
|
||||||
|
total = sum(result.values())
|
||||||
|
print(total)
|
||||||
|
percentage = defaultdict(lambda: 0)
|
||||||
|
minutes = defaultdict(lambda: 0)
|
||||||
|
for i in result:
|
||||||
|
percentage[i] = result[i] / total
|
||||||
|
minutes[i] = result[i] / 60_000
|
||||||
|
print(json.dumps(percentage, indent=4))
|
||||||
|
if not 'error' in result:
|
||||||
|
# places[log.get()['instance']].append(percentage)
|
||||||
|
places[log.get()['instance']].append(minutes)
|
||||||
|
|
||||||
|
for place in places:
|
||||||
|
places[place] = sorted(places[place], key=lambda item: item['map'])
|
||||||
|
|
||||||
|
dummy = [0] * len(keys)
|
||||||
|
results = []
|
||||||
|
sites = []
|
||||||
|
from util.meta_temp import CONFIG_NAMES
|
||||||
|
|
||||||
|
for i in places:
|
||||||
|
for j in places[i]:
|
||||||
|
ordered = []
|
||||||
|
for k in keys:
|
||||||
|
ordered.append(j[k])
|
||||||
|
results.append(ordered)
|
||||||
|
results.append(dummy)
|
||||||
|
sites.append(CONFIG_NAMES[i] if i in CONFIG_NAMES else "---")
|
||||||
|
|
||||||
|
size = len(results)
|
||||||
|
ind = np.arange(size)
|
||||||
|
width = 0.9
|
||||||
|
print(results)
|
||||||
|
data = list(zip(*results))
|
||||||
|
print(data)
|
||||||
|
lines = []
|
||||||
|
bottom = [0] * len(results)
|
||||||
|
for i in range(0, len(data)):
|
||||||
|
lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
|
||||||
|
for k, x in enumerate(data[i]):
|
||||||
|
bottom[k] += x
|
||||||
|
plt.legend(lines, keys)
|
||||||
|
plt.title(", ".join(sites))
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
# size = len(results)
|
||||||
|
# ind = np.arange(size)
|
||||||
|
# width = 0.9
|
||||||
|
# print(results)
|
||||||
|
# data = list(zip(*results))
|
||||||
|
# print(data)
|
||||||
|
# lines = []
|
||||||
|
# bottom = [0] * len(results)
|
||||||
|
# for i in range(0, len(data)):
|
||||||
|
# lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
|
||||||
|
# for k, x in enumerate(data[i]):
|
||||||
|
# bottom[k] += x
|
||||||
|
# plt.legend(lines, keys)
|
||||||
|
# plt.title("Zwei Spiele in Filderstadt (t1=237min; t2=67min)")
|
||||||
|
# plt.show()
|
||||||
|
|
||||||
|
# json.dump(store.serializable(), open("new.json", "w"), indent=1)
|
||||||
|
|
||||||
|
|
||||||
|
from collections import defaultdict
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from util.meta_temp import CONFIG_NAMES
|
||||||
|
|
||||||
|
keys = [
|
||||||
|
"simu",
|
||||||
|
"question",
|
||||||
|
"image",
|
||||||
|
"audio",
|
||||||
|
"video",
|
||||||
|
"other",
|
||||||
|
"map",
|
||||||
|
# "error"
|
||||||
|
]
|
||||||
|
|
||||||
|
loc_keys = [
|
||||||
|
"question",
|
||||||
|
"image",
|
||||||
|
"audio",
|
||||||
|
"video"
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_data(store, relative_values=True, sort=True, show_errors=False):
|
||||||
|
places = defaultdict(list)
|
||||||
|
|
||||||
|
for log in store.get_all():
|
||||||
|
if not log.analysis() == analyzers.ActivityMapper:
|
||||||
|
continue
|
||||||
|
result = defaultdict(lambda: 0)
|
||||||
|
for i in log.get()['track']:
|
||||||
|
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
|
||||||
|
result[i['properties']['activity_type']] += duration
|
||||||
|
print(json.dumps(result, indent=4))
|
||||||
|
total = sum(result.values())
|
||||||
|
print(total)
|
||||||
|
percentage = defaultdict(lambda: 0)
|
||||||
|
minutes = defaultdict(lambda: 0)
|
||||||
|
for i in result:
|
||||||
|
percentage[i] = result[i] / total
|
||||||
|
minutes[i] = result[i] / 60_000
|
||||||
|
print(json.dumps(percentage, indent=4))
|
||||||
|
if not 'error' in result or show_errors:
|
||||||
|
if relative_values:
|
||||||
|
places[log.get()['instance']].append(percentage)
|
||||||
|
else:
|
||||||
|
places[log.get()['instance']].append(minutes)
|
||||||
|
if sort:
|
||||||
|
for place in places:
|
||||||
|
places[place] = sorted(places[place], key=lambda item: item['map'])
|
||||||
|
return places
|
||||||
|
|
||||||
|
|
||||||
|
whitelist = ['16fc3117-61db-4f50-b84f-81de6310206f', '5e64ce07-1c16-4d50-ac4e-b3117847ea43',
|
||||||
|
'90278021-4c57-464e-90b1-d603799d07eb', 'ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771']
|
||||||
|
|
||||||
|
|
||||||
|
def get_data_distance(store, relative_values=True, sort=True, show_errors=False):
|
||||||
|
places = defaultdict(list)
|
||||||
|
|
||||||
|
for log in store.get_all():
|
||||||
|
if not log.analysis() == analyzers.ActivityMapper:
|
||||||
|
continue
|
||||||
|
result = defaultdict(lambda: 0)
|
||||||
|
for i in log.get()['track']:
|
||||||
|
coords = i['coordinates']
|
||||||
|
if len(coords) > 1:
|
||||||
|
distance = calc_distance(coords)
|
||||||
|
result[i['properties']['activity_type']] += distance
|
||||||
|
total = sum(result.values())
|
||||||
|
percentage = defaultdict(lambda: 0)
|
||||||
|
for i in result:
|
||||||
|
if not total == 0:
|
||||||
|
percentage[i] = result[i] / total
|
||||||
|
if not 'error' in result or show_errors:
|
||||||
|
if relative_values:
|
||||||
|
places[log.get()['instance']].append(percentage)
|
||||||
|
else:
|
||||||
|
places[log.get()['instance']].append(result)
|
||||||
|
if sort:
|
||||||
|
for place in places:
|
||||||
|
places[place] = sorted(places[place], key=lambda item: item['map'])
|
||||||
|
return places
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_data(store, sort=False, relative=True):
|
||||||
|
places = defaultdict(list)
|
||||||
|
simu_distribution = defaultdict(lambda: 0)
|
||||||
|
# divisiors = {"time":60_000, "space":1000000}
|
||||||
|
for log in store.get_all():
|
||||||
|
if not log.analysis() == analyzers.ActivityMapper:
|
||||||
|
continue
|
||||||
|
result = defaultdict(lambda: defaultdict(lambda: 0))
|
||||||
|
for i in log.get()['track']:
|
||||||
|
coords = i['coordinates']
|
||||||
|
if len(coords) > 1:
|
||||||
|
distance = calc_distance(coords)
|
||||||
|
else:
|
||||||
|
distance = 0.0
|
||||||
|
result["space"][i['properties']['activity_type']] += distance
|
||||||
|
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
|
||||||
|
result["time"][i['properties']['activity_type']] += duration
|
||||||
|
total_space = sum(result["space"].values())
|
||||||
|
total_time = sum(result["time"].values())
|
||||||
|
percentage = defaultdict(lambda: defaultdict(lambda: 0))
|
||||||
|
total = defaultdict(lambda: defaultdict(lambda: 0))
|
||||||
|
for i in result["space"]:
|
||||||
|
if not total_space == 0:
|
||||||
|
percentage[i]["space"] = result["space"][i] / total_space
|
||||||
|
else:
|
||||||
|
percentage[i]["space"] = 0
|
||||||
|
if not total_time == 0:
|
||||||
|
percentage[i]["time"] = result["time"][i] / total_time
|
||||||
|
else:
|
||||||
|
percentage[i]["time"] = 0
|
||||||
|
for t in ("space", "time"):
|
||||||
|
# total[i][t] += (result[t][i] / divisiors[t])
|
||||||
|
total[i][t] += result[t][i]
|
||||||
|
print(percentage)
|
||||||
|
if not 'error' in result:
|
||||||
|
if relative:
|
||||||
|
value = percentage
|
||||||
|
else:
|
||||||
|
value = total
|
||||||
|
places[log.get()['instance']].append(value)
|
||||||
|
simus = defaultdict(lambda: 0)
|
||||||
|
for item in log.get()['boards']:
|
||||||
|
if item["extra_data"]["activity_type"] == "simu":
|
||||||
|
simus[item["board_id"]] += 1
|
||||||
|
simu_distribution[len(simus)] += 1
|
||||||
|
|
||||||
|
if sort:
|
||||||
|
for place in places:
|
||||||
|
places[place] = sorted(places[place], key=lambda item: item['map']['time'])
|
||||||
|
print(simu_distribution)
|
||||||
|
return places
|
||||||
|
|
||||||
|
|
||||||
|
def stack_data(keys, places, type="space"):
|
||||||
|
divisiors = {"time": 60_000, "space": 1000}
|
||||||
|
# divisiors = {"time": 1, "space": 1}
|
||||||
|
dummy = [0] * len(keys)
|
||||||
|
results = []
|
||||||
|
sites = []
|
||||||
|
for i in sorted(places):
|
||||||
|
if not i in whitelist:
|
||||||
|
continue
|
||||||
|
place = sorted(places[i], key=lambda item: item['map'][type])
|
||||||
|
for j in place:
|
||||||
|
ordered = []
|
||||||
|
for k in keys:
|
||||||
|
if k in j:
|
||||||
|
ordered.append(j[k][type] / divisiors[type])
|
||||||
|
else:
|
||||||
|
ordered.append(0)
|
||||||
|
print(sum(ordered))
|
||||||
|
# if sum(ordered) > 0.9 and sum(ordered) < 4000 and sum(ordered)>10:
|
||||||
|
if sum(ordered) > 0.9 and sum(ordered) < 100:
|
||||||
|
# print(sum(ordered), 1-sum(ordered))
|
||||||
|
# if sum(ordered)<1:
|
||||||
|
# ordered[-2] = 1-sum(ordered[:-2], ordered[-1])
|
||||||
|
results.append(ordered)
|
||||||
|
results.append(dummy)
|
||||||
|
sites.append(CONFIG_NAMES[i] if i in CONFIG_NAMES else "---")
|
||||||
|
return results, sites
|
||||||
|
|
||||||
|
|
||||||
|
def plot_data(places, keys):
|
||||||
|
results, sites = stack_data(keys, places)
|
||||||
|
dpi = 86.1
|
||||||
|
plt.figure(figsize=(1280 / dpi, 720 / dpi))
|
||||||
|
size = len(results)
|
||||||
|
print("{} elements total".format(size))
|
||||||
|
ind = np.arange(size)
|
||||||
|
width = 1
|
||||||
|
# print(results)
|
||||||
|
data = list(zip(*results))
|
||||||
|
# print(data)
|
||||||
|
lines = []
|
||||||
|
bottom = [0] * size
|
||||||
|
plt.ticklabel_format(useMathText=False)
|
||||||
|
for i in range(0, len(data)):
|
||||||
|
lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
|
||||||
|
for k, x in enumerate(data[i]):
|
||||||
|
bottom[k] += x
|
||||||
|
plt.legend(lines, keys)
|
||||||
|
plt.title(", ".join(sites))
|
||||||
|
# plt.show()
|
||||||
|
dpi = 86
|
||||||
|
plt.savefig("space_abs_{}.png".format(size), dpi=dpi, bbox_inches="tight")
|
||||||
|
|
||||||
|
|
||||||
|
colors = {
|
||||||
|
"simu": "blue",
|
||||||
|
"question": "orange",
|
||||||
|
"image": "green",
|
||||||
|
"audio": "red",
|
||||||
|
"video": "purple",
|
||||||
|
"other": "brown",
|
||||||
|
"map": "violet",
|
||||||
|
# "error":"grey",
|
||||||
|
"tasks": "olive",
|
||||||
|
}
|
||||||
|
markers = [".", "o", "x", "s", "*", "D", "p", ",", "<", ">", "^", "v", "1", "2", "3", "4"]
|
||||||
|
|
||||||
|
|
||||||
|
def plot_time_space(time_data, space_data, keys):
|
||||||
|
# assuming time_data and space_data are in same order!
|
||||||
|
marker = 0
|
||||||
|
for id in time_data:
|
||||||
|
for k in keys:
|
||||||
|
for i in range(len(time_data[id])):
|
||||||
|
print(time_data[id][i][k], space_data[id][i][k])
|
||||||
|
plt.plot(time_data[id][i][k], space_data[id][i][k], color=colors[k], marker=markers[marker])
|
||||||
|
marker += 1
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
# plt.cla()
|
||||||
|
# plt.clf()
|
||||||
|
# plt.close()
|
||||||
|
|
||||||
|
def group_locationbased_tasks(data):
|
||||||
|
for id in data:
|
||||||
|
for log in data[id]:
|
||||||
|
loc = {"space": 0, "time": 0}
|
||||||
|
for k in log:
|
||||||
|
if k in loc_keys:
|
||||||
|
for i in ["space", "time"]:
|
||||||
|
loc[i] += log[k][i]
|
||||||
|
log["tasks"] = loc
|
||||||
|
|
||||||
|
|
||||||
|
def plot_time_space_rel(combined, keys):
|
||||||
|
groups = defaultdict(list)
|
||||||
|
keys = list(keys)
|
||||||
|
keys.remove("other")
|
||||||
|
for i in loc_keys:
|
||||||
|
keys.remove(i)
|
||||||
|
keys.append("tasks")
|
||||||
|
ids = []
|
||||||
|
group_locationbased_tasks(combined)
|
||||||
|
for k in keys:
|
||||||
|
for id in sorted(combined):
|
||||||
|
if id not in whitelist:
|
||||||
|
continue
|
||||||
|
if not id in ids:
|
||||||
|
ids.append(id)
|
||||||
|
group = 0.0
|
||||||
|
count = 0
|
||||||
|
for item in combined[id]:
|
||||||
|
if k in item:
|
||||||
|
time = item[k]["time"] / 1000
|
||||||
|
distance = item[k]["space"]
|
||||||
|
if time > 0:
|
||||||
|
group += (distance / time)
|
||||||
|
count += 1
|
||||||
|
else:
|
||||||
|
print("div by zero", distance, time)
|
||||||
|
if count > 0:
|
||||||
|
groups[k].append(group / count)
|
||||||
|
else:
|
||||||
|
groups[k].append(0.0)
|
||||||
|
print(ids)
|
||||||
|
ind = np.arange(len(ids))
|
||||||
|
width = .7 / len(groups)
|
||||||
|
print(ind)
|
||||||
|
print(json.dumps(groups, indent=1))
|
||||||
|
bars = []
|
||||||
|
dpi = 200
|
||||||
|
plt.figure(figsize=(1280 / dpi, 720 / dpi))
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
for k in groups:
|
||||||
|
print(groups[k])
|
||||||
|
if not len(groups[k]):
|
||||||
|
groups[k].append(0)
|
||||||
|
ind = ind + (width)
|
||||||
|
bars.append(ax.bar((ind + width * len(groups) / 2), groups[k], width, color=colors[k]))
|
||||||
|
ax.set_xticks(ind + width / 2)
|
||||||
|
ax.set_xticklabels(list([CONFIG_NAMES[i] if i in CONFIG_NAMES else "---" for i in ids]))
|
||||||
|
kmh = plt.hlines((1 / 3.6), 0.3, 4.2, linestyles="dashed", label="1 km/h", linewidths=1)
|
||||||
|
plt.legend(bars + [kmh], keys + [kmh.get_label()])
|
||||||
|
print(combined.keys(), ids)
|
||||||
|
print([CONFIG_NAMES[i] if i in CONFIG_NAMES else "---" for i in ids])
|
||||||
|
# plt.show()
|
||||||
|
dpi = 200
|
||||||
|
plt.savefig("speed2.png", dpi=dpi)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
# plot_time_space_rel(temporal_data_rel, spatial_data_rel, keys)
|
||||||
|
|
||||||
|
# plot_data(combined, keys)
|
||||||
|
# plot_data(get_data_distance(store,relative_values=False), keys)
|
||||||
|
|
@ -13,9 +13,9 @@
|
||||||
],
|
],
|
||||||
"analyzers": {
|
"analyzers": {
|
||||||
"analyzers": [
|
"analyzers": [
|
||||||
"BiogamesCategorizer",
|
"SimulationCategorizer",
|
||||||
"ActivityMapper",
|
"SimulationOrderAnalyzer",
|
||||||
"SimulationFlagsAnalyzer"
|
"ActivityMapper"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"dis":[
|
"dis":[
|
||||||
|
|
@ -67,14 +67,19 @@
|
||||||
"action":"PAUSE"
|
"action":"PAUSE"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"coordinates": "location.coordinates"
|
"coordinates": "location.coordinates",
|
||||||
|
"metadata":{
|
||||||
|
"timestamp": "timestamp",
|
||||||
|
"gamefield": "instance_id",
|
||||||
|
"user": "player_group_name"
|
||||||
|
}
|
||||||
},
|
},
|
||||||
"source":{
|
"source":{
|
||||||
"type": "Biogames",
|
"type": "Biogames",
|
||||||
"url": "http://0.0.0.0:5000/game2/instance/log/list/",
|
"url": "http://0.0.0.0:5000/game2/instance/log/list/",
|
||||||
"login_url": "http://localhost:5000/game2/auth/json-login",
|
"login_url": "http://localhost:5000/game2/auth/json-login",
|
||||||
"username": "dev",
|
"username": "ba",
|
||||||
"password": "dev",
|
"password": "853451",
|
||||||
"host":"http://0.0.0.0:5000"
|
"host":"http://0.0.0.0:5000"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -62,7 +62,7 @@ distribution = defaultdict(lambda: 0)
|
||||||
finished_and_simu = defaultdict(list)
|
finished_and_simu = defaultdict(list)
|
||||||
files = {}
|
files = {}
|
||||||
actions_dist = defaultdict(list)
|
actions_dist = defaultdict(list)
|
||||||
with open('/home/agp8x/git/uni/ma/project/data/0000_ref') as src:
|
with open('/home/clemens/git/ma/test/src') as src:
|
||||||
for line in src:
|
for line in src:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
instance_id, log = get_json(line)
|
instance_id, log = get_json(line)
|
||||||
|
|
|
||||||
244
log_analyzer.py
244
log_analyzer.py
|
|
@ -2,14 +2,16 @@ import json
|
||||||
import logging
|
import logging
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import analyzers
|
import analyzers
|
||||||
from analyzers import get_renderer, Analyzer, render, Store
|
from analyzers import get_renderer, render
|
||||||
from analyzers.analyzer import ResultStore
|
from analyzers.analyzer import ResultStore
|
||||||
|
from analyzers.analyzer.default import write_logentry_count_csv, write_simulation_flag_csv
|
||||||
|
from analyzers.render import wip
|
||||||
from analyzers.render.default import LogEntryCountCSV
|
from analyzers.render.default import LogEntryCountCSV
|
||||||
|
from analyzers.render.wip import time_distribution, plot_data
|
||||||
from analyzers.settings import LogSettings, load_settings
|
from analyzers.settings import LogSettings, load_settings
|
||||||
from loaders import LOADERS
|
from loaders import LOADERS
|
||||||
|
from util.processing import grep, run_analysis, src_file
|
||||||
|
|
||||||
logging.basicConfig(format='%(levelname)s %(name)s:%(message)s', level=logging.DEBUG)
|
logging.basicConfig(format='%(levelname)s %(name)s:%(message)s', level=logging.DEBUG)
|
||||||
log: logging.Logger = logging.getLogger(__name__)
|
log: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
@ -18,95 +20,9 @@ logging.getLogger('requests').setLevel(logging.WARN)
|
||||||
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
logging.getLogger("urllib3").setLevel(logging.WARNING)
|
||||||
|
|
||||||
|
|
||||||
def process_log(logfile: str, settings: LogSettings) -> List[Analyzer]:
|
|
||||||
loader = LOADERS[settings.log_format]()
|
|
||||||
try:
|
|
||||||
loader.load(logfile)
|
|
||||||
except BaseException as e:
|
|
||||||
raise RuntimeError(e)
|
|
||||||
analyzers: List[Analyzer] = []
|
|
||||||
log.debug("build analyzers")
|
|
||||||
for analyzer in settings.analyzers:
|
|
||||||
analyzers.append(analyzer(settings))
|
|
||||||
log.debug("process entries")
|
|
||||||
for entry in loader.get_entry():
|
|
||||||
for analyzer in analyzers:
|
|
||||||
try:
|
|
||||||
if analyzer.process(entry):
|
|
||||||
break
|
|
||||||
except KeyError as e:
|
|
||||||
log.exception(e)
|
|
||||||
return analyzers
|
|
||||||
|
|
||||||
|
|
||||||
def run_analysis(log_ids: list, settings):
|
|
||||||
store: ResultStore = ResultStore()
|
|
||||||
for log_id in log_ids:
|
|
||||||
for analysis in process_log(log_id, settings):
|
|
||||||
log.info("* Result for " + analysis.name())
|
|
||||||
analysis.result(store, name=log_id)
|
|
||||||
return store
|
|
||||||
|
|
||||||
|
|
||||||
def load_ids(name: str):
|
|
||||||
log_ids = []
|
|
||||||
with open(name) as src:
|
|
||||||
for line in src:
|
|
||||||
line = line.strip()
|
|
||||||
log_ids.append(line)
|
|
||||||
return log_ids
|
|
||||||
|
|
||||||
|
|
||||||
def urach_logs(log_ids, settings):
|
def urach_logs(log_ids, settings):
|
||||||
return ["data/inst_{id}.{format}".format(id=log_id, format=settings.log_format) for log_id in log_ids]
|
# return ["data/inst_{id}.{format}".format(id=log_id, format=settings.log_format) for log_id in log_ids]
|
||||||
|
return ["data/{id}.{format}".format(id=log_id, format=settings.log_format) for log_id in log_ids]
|
||||||
|
|
||||||
def write_logentry_count_csv():
|
|
||||||
global cat, data, lines, csvfile
|
|
||||||
LogEntryCountCSV.summary = None
|
|
||||||
for cat in store.get_categories():
|
|
||||||
data = store.get_category(cat)
|
|
||||||
render(analyzers.LogEntryCountAnalyzer, data, name=cat)
|
|
||||||
if LogEntryCountCSV.summary:
|
|
||||||
headers = []
|
|
||||||
lines = []
|
|
||||||
for name in LogEntryCountCSV.summary:
|
|
||||||
data = LogEntryCountCSV.summary[name]
|
|
||||||
for head in data:
|
|
||||||
if not head in headers:
|
|
||||||
headers.append(head)
|
|
||||||
line = [name]
|
|
||||||
for head in headers:
|
|
||||||
line.append(data[head]) if head in data else line.append(0)
|
|
||||||
lines.append(line)
|
|
||||||
import csv
|
|
||||||
|
|
||||||
with open('logentrycount.csv', 'w', newline='') as csvfile:
|
|
||||||
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONE)
|
|
||||||
writer.writerow(["name"] + [h.split(".")[-1] for h in headers])
|
|
||||||
for line in lines:
|
|
||||||
writer.writerow(line)
|
|
||||||
|
|
||||||
|
|
||||||
def write_simulation_flag_csv():
|
|
||||||
global csvfile, result, i
|
|
||||||
from datetime import datetime
|
|
||||||
json.dump(store.serializable(), open("simus.json", "w"), indent=2)
|
|
||||||
with open("simus.csv", "w") as csvfile:
|
|
||||||
csvfile.write("instanceconfig,log,simu,answered,universe_state,selected_actions,timestamp,time\n")
|
|
||||||
for key in store.get_store():
|
|
||||||
csvfile.write("{}\n".format(key))
|
|
||||||
for result in store.store[key]:
|
|
||||||
csvfile.write(",{}\n".format(result.name))
|
|
||||||
for i in result.get():
|
|
||||||
csvfile.write(",,{},{},{},{},{},{}\n".format(
|
|
||||||
i['answers']['@id'],
|
|
||||||
i['answers']['answered'],
|
|
||||||
len(i['answers']['universe_state']) if i['answers']['universe_state'] else 0,
|
|
||||||
len(i['selected_actions']) if i['selected_actions'] else 0,
|
|
||||||
i['timestamp'],
|
|
||||||
str(datetime.fromtimestamp(i['timestamp'] / 1000))
|
|
||||||
))
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|
@ -119,19 +35,35 @@ if __name__ == '__main__':
|
||||||
# "91abfd4b31a5562b1c66be37d9",
|
# "91abfd4b31a5562b1c66be37d9",
|
||||||
# "597b704fe9ace475316c345903",
|
# "597b704fe9ace475316c345903",
|
||||||
# "e01a684aa29dff9ddd9705edf8",
|
# "e01a684aa29dff9ddd9705edf8",
|
||||||
|
"597b704fe9ace475316c345903",
|
||||||
|
"e01a684aa29dff9ddd9705edf8",
|
||||||
|
"fbf9d64ae0bdad0de7efa3eec6",
|
||||||
# "fbf9d64ae0bdad0de7efa3eec6",
|
# "fbf9d64ae0bdad0de7efa3eec6",
|
||||||
"fe1331481f85560681f86827ec",
|
"fe1331481f85560681f86827ec", # urach
|
||||||
# "fe1331481f85560681f86827ec"]
|
# "fe1331481f85560681f86827ec"]
|
||||||
"fec57041458e6cef98652df625", ]
|
"fec57041458e6cef98652df625",
|
||||||
,settings)
|
]
|
||||||
store: ResultStore = run_analysis(log_ids_urach, settings)
|
, settings)
|
||||||
|
log_ids_gf = grep(["9d11b749c78a57e786bf5c8d28", # filderstadt
|
||||||
|
"a192ff420b8bdd899fd28573e2", # eichstätt
|
||||||
|
"3a3d994c04b1b1d87168422309", # stadtökologie
|
||||||
|
"fe1331481f85560681f86827ec", # urach
|
||||||
|
"96f6d9cc556b42f3b2fec0a2cb7ed36e" # oberelsbach
|
||||||
|
],
|
||||||
|
"/home/clemens/git/ma/test/src",
|
||||||
|
settings)
|
||||||
|
log_ids = src_file("/home/clemens/git/ma/test/filtered_5_actions")
|
||||||
|
|
||||||
|
#store: ResultStore = run_analysis(log_ids_gf, settings, LOADERS)
|
||||||
|
#store: ResultStore = run_analysis(log_ids, settings, LOADERS)
|
||||||
|
|
||||||
if False:
|
if False:
|
||||||
for r in get_renderer(analyzers.LocomotionActionAnalyzer):
|
for r in get_renderer(analyzers.LocomotionActionAnalyzer):
|
||||||
r().render(store.get_all())
|
r().render(store.get_all())
|
||||||
if False:
|
if False:
|
||||||
render(analyzers.LocationAnalyzer, store.get_all())
|
render(analyzers.LocationAnalyzer, store.get_all())
|
||||||
# print(json.dumps(store.serializable(), indent=1))
|
# print(json.dumps(store.serializable(), indent=1))
|
||||||
if True:
|
if False:
|
||||||
for cat in store.get_categories():
|
for cat in store.get_categories():
|
||||||
render(analyzers.ActivityMapper, store.get_category(cat), name=cat)
|
render(analyzers.ActivityMapper, store.get_category(cat), name=cat)
|
||||||
# render(analyzers.ProgressAnalyzer, store.get_all())
|
# render(analyzers.ProgressAnalyzer, store.get_all())
|
||||||
|
|
@ -147,110 +79,30 @@ if __name__ == '__main__':
|
||||||
data = store.get_category(cat)
|
data = store.get_category(cat)
|
||||||
render(analyzers.SimulationOrderAnalyzer, data, name=cat)
|
render(analyzers.SimulationOrderAnalyzer, data, name=cat)
|
||||||
if False:
|
if False:
|
||||||
write_logentry_count_csv()
|
write_logentry_count_csv(LogEntryCountCSV, store, render, analyzers)
|
||||||
if False:
|
if False:
|
||||||
write_simulation_flag_csv()
|
write_simulation_flag_csv(store)
|
||||||
|
|
||||||
|
|
||||||
def calc_distance(geojson: str):
|
|
||||||
from shapely.geometry import LineString
|
|
||||||
from shapely.ops import transform
|
|
||||||
from functools import partial
|
|
||||||
import pyproj
|
|
||||||
track = LineString(json.loads(geojson)['coordinates'])
|
|
||||||
project = partial(
|
|
||||||
pyproj.transform,
|
|
||||||
pyproj.Proj(init='EPSG:4326'),
|
|
||||||
pyproj.Proj(init='EPSG:32633'))
|
|
||||||
return transform(project, track).length
|
|
||||||
|
|
||||||
|
|
||||||
if False:
|
if False:
|
||||||
# json.dump(store.serializable(), open("new.json", "w"), indent=1)
|
time_distribution(store)
|
||||||
from collections import defaultdict
|
|
||||||
|
|
||||||
keys = [
|
if True:
|
||||||
"simu",
|
# spatial_data = get_data_distance(store,relative_values=False)
|
||||||
"question",
|
# temporal_data = get_data(store,relative_values=False)
|
||||||
"image",
|
# spatial_data_rel = get_data_distance(store,relative_values=True)
|
||||||
"audio",
|
# temporal_data_rel = get_data(store,relative_values=True)
|
||||||
"video",
|
# temporal_data_rel = json.load(open("temporal_rel.json"))
|
||||||
"other",
|
# spatial_data_rel = json.load(open("spatial_rel.json"))
|
||||||
"map"
|
# import IPython
|
||||||
]
|
# IPython.embed()
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
|
|
||||||
# results = []
|
|
||||||
|
|
||||||
places = defaultdict(list)
|
|
||||||
|
|
||||||
for log in store.get_all():
|
|
||||||
result = defaultdict(lambda: 0)
|
|
||||||
for i in log.get()['track']:
|
|
||||||
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
|
|
||||||
result[i['properties']['activity_type']] += duration
|
|
||||||
print(json.dumps(result, indent=4))
|
|
||||||
total = sum(result.values())
|
|
||||||
print(total)
|
|
||||||
percentage = defaultdict(lambda: 0)
|
|
||||||
minutes = defaultdict(lambda: 0)
|
|
||||||
for i in result:
|
|
||||||
percentage[i] = result[i] / total
|
|
||||||
minutes[i] = result[i] / 60_000
|
|
||||||
print(json.dumps(percentage, indent=4))
|
|
||||||
if not 'error' in result:
|
|
||||||
# places[log.get()['instance']].append(percentage)
|
|
||||||
places[log.get()['instance']].append(minutes)
|
|
||||||
|
|
||||||
for place in places:
|
|
||||||
places[place] = sorted(places[place], key=lambda item: item['map'])
|
|
||||||
|
|
||||||
dummy = [0] * len(keys)
|
|
||||||
results = []
|
|
||||||
sites = []
|
|
||||||
from util.meta_temp import CONFIG_NAMES
|
|
||||||
|
|
||||||
for i in places:
|
|
||||||
for j in places[i]:
|
|
||||||
ordered = []
|
|
||||||
for k in keys:
|
|
||||||
ordered.append(j[k])
|
|
||||||
results.append(ordered)
|
|
||||||
results.append(dummy)
|
|
||||||
sites.append(CONFIG_NAMES[i] if i in CONFIG_NAMES else "---")
|
|
||||||
|
|
||||||
size = len(results)
|
|
||||||
ind = np.arange(size)
|
|
||||||
width = 0.9
|
|
||||||
print(results)
|
|
||||||
data = list(zip(*results))
|
|
||||||
print(data)
|
|
||||||
lines = []
|
|
||||||
bottom = [0] * len(results)
|
|
||||||
for i in range(0, len(data)):
|
|
||||||
lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
|
|
||||||
for k, x in enumerate(data[i]):
|
|
||||||
bottom[k] += x
|
|
||||||
plt.legend(lines, keys)
|
|
||||||
plt.title(", ".join(sites))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
# size = len(results)
|
|
||||||
# ind = np.arange(size)
|
|
||||||
# width = 0.9
|
|
||||||
# print(results)
|
|
||||||
# data = list(zip(*results))
|
|
||||||
# print(data)
|
|
||||||
# lines = []
|
|
||||||
# bottom = [0] * len(results)
|
|
||||||
# for i in range(0, len(data)):
|
|
||||||
# lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
|
|
||||||
# for k, x in enumerate(data[i]):
|
|
||||||
# bottom[k] += x
|
|
||||||
# plt.legend(lines, keys)
|
|
||||||
# plt.title("Zwei Spiele in Filderstadt (t1=237min; t2=67min)")
|
|
||||||
# plt.show()
|
|
||||||
|
|
||||||
|
# print(json.dumps(get_all_data(store)))
|
||||||
|
# json.dump(get_all_data(store), open("combined.json", "w"))
|
||||||
|
# combined = get_all_data(store, sort=True, relative=True)
|
||||||
|
# json.dump(combined, open("combined_rel.json", "w"))
|
||||||
|
# combined = json.load(open("combined_rel.json"))
|
||||||
|
combined = json.load(open("combined_total.json"))
|
||||||
|
# plot_time_space_rel(combined, keys)
|
||||||
|
plot_data(combined, wip.keys)
|
||||||
|
|
||||||
|
|
||||||
# for analyzers in analyzers:
|
# for analyzers in analyzers:
|
||||||
|
|
|
||||||
|
|
@ -4,4 +4,5 @@ matplotlib==2.1.0
|
||||||
osmnx==0.6
|
osmnx==0.6
|
||||||
networkx==2.0
|
networkx==2.0
|
||||||
pydot==1.2.3
|
pydot==1.2.3
|
||||||
scipy==1.0.0
|
scipy==1.0.0
|
||||||
|
ipython==6.2.1
|
||||||
|
|
@ -12,87 +12,14 @@
|
||||||
<script src="https://rawgit.com/Leaflet/Leaflet.heat/gh-pages/dist/leaflet-heat.js"></script>
|
<script src="https://rawgit.com/Leaflet/Leaflet.heat/gh-pages/dist/leaflet-heat.js"></script>
|
||||||
|
|
||||||
<script src="my.js"></script>
|
<script src="my.js"></script>
|
||||||
<style>
|
<link href="style.css" rel="stylesheet"/>
|
||||||
.mapDiv {
|
<main>
|
||||||
width: 1024px;
|
<div class="mapDiv" id="mainMap"></div>
|
||||||
height: 768px;
|
<div class="sequenceContainer">
|
||||||
}
|
<div class="sequence"></div>
|
||||||
|
</div>
|
||||||
|
</main>
|
||||||
|
|
||||||
.board {
|
<!--div style="font-size:0.1px;position:absolute;bottom:0;">OSM Logo: CC-BY-SA
|
||||||
width: 32px;
|
|
||||||
height: 32px;
|
|
||||||
display: inline-block;
|
|
||||||
position: relative;
|
|
||||||
}
|
|
||||||
|
|
||||||
.board img {
|
|
||||||
max-width: 100%;
|
|
||||||
max-height: 100%;
|
|
||||||
position: absolute;
|
|
||||||
/*bottom: 0px;*/
|
|
||||||
}
|
|
||||||
|
|
||||||
.board img:hover {
|
|
||||||
max-width: inherit;
|
|
||||||
max-height: inherit;
|
|
||||||
z-index: 99;
|
|
||||||
top: 20px;
|
|
||||||
}
|
|
||||||
|
|
||||||
.highlight {
|
|
||||||
/*what a nice way to highlight*/
|
|
||||||
display: none;
|
|
||||||
}
|
|
||||||
|
|
||||||
.simu {
|
|
||||||
background-color: blue;
|
|
||||||
}
|
|
||||||
|
|
||||||
.question {
|
|
||||||
background-color: orange;
|
|
||||||
}
|
|
||||||
|
|
||||||
.image {
|
|
||||||
background-color: green;
|
|
||||||
}
|
|
||||||
|
|
||||||
.audio {
|
|
||||||
background-color: red;
|
|
||||||
}
|
|
||||||
|
|
||||||
.video {
|
|
||||||
background-color: purple;
|
|
||||||
}
|
|
||||||
|
|
||||||
.other {
|
|
||||||
background-color: brown;
|
|
||||||
}
|
|
||||||
|
|
||||||
.map {
|
|
||||||
background-color: violet;
|
|
||||||
}
|
|
||||||
|
|
||||||
.error {
|
|
||||||
background-color: grey;
|
|
||||||
}
|
|
||||||
|
|
||||||
ul {
|
|
||||||
list-style-type: none;
|
|
||||||
overflow: auto;
|
|
||||||
overflow-y: hidden;
|
|
||||||
display: inline-block;
|
|
||||||
/*max-width:100%;
|
|
||||||
margin: 0 0 1em;
|
|
||||||
white-space: nowrap;
|
|
||||||
height:200px;*/
|
|
||||||
}
|
|
||||||
|
|
||||||
li {
|
|
||||||
display: inline-block;
|
|
||||||
vertical-align: top;
|
|
||||||
}
|
|
||||||
|
|
||||||
</style>
|
|
||||||
<div style="font-size:0.1px;position:absolute;bottom:0;">OSM Logo: CC-BY-SA
|
|
||||||
http://wiki.openstreetmap.org/wiki/File:Mag_map-120x120.png
|
http://wiki.openstreetmap.org/wiki/File:Mag_map-120x120.png
|
||||||
</div>
|
</div-->
|
||||||
|
|
@ -1,24 +1,27 @@
|
||||||
//$.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_03b9b6b4-c8ab-4182-8902-1620eebe8889.json", function (data) {
|
$.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_03b9b6b4-c8ab-4182-8902-1620eebe8889.json", function (data) { //urach
|
||||||
$.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_de7df5b5-edd5-4070-840f-68854ffab9aa.json", function (data) {
|
//$.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_de7df5b5-edd5-4070-840f-68854ffab9aa.json", function (data) { //urach
|
||||||
|
//$.getJSON("data/90278021-4c57-464e-90b1-d603799d07eb_07da99c9-398a-424f-99fc-2701763a63e9.json", function (data) { //eichstätt
|
||||||
|
//$.getJSON("data/13241906-cdae-441a-aed0-d57ebeb37cac_d33976a6-8a56-4a63-b492-fe5427dbf377.json", function (data) { //stadtökologie
|
||||||
|
//$.getJSON("data/5e64ce07-1c16-4d50-ac4e-b3117847ea43_2f664d7b-f0d8-42f5-8731-c034ef86703e.json", function (data) { //filderstadt
|
||||||
var images = {};
|
var images = {};
|
||||||
var mapContainer = $("<div />", {id: "mainMap", class: "mapDiv"});
|
|
||||||
mapContainer.appendTo("body");
|
|
||||||
var tiles = {
|
var tiles = {
|
||||||
"osm": L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {
|
|
||||||
attribution: '© <a href="http://osm.org/copyright">OpenStreetMap</a> contributors',
|
|
||||||
}),
|
|
||||||
"openstreetmap": L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {
|
"openstreetmap": L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {
|
||||||
|
maxNativeZoom: 19,
|
||||||
|
maxZoom: 24,
|
||||||
attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>'
|
attribution: '© <a href="http://www.openstreetmap.org/copyright">OpenStreetMap</a>'
|
||||||
}),
|
}),
|
||||||
"esri sat": L.tileLayer('https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}', {
|
"esri sat": L.tileLayer('https://server.arcgisonline.com/ArcGIS/rest/services/World_Imagery/MapServer/tile/{z}/{y}/{x}', {
|
||||||
|
maxNativeZoom: 19,
|
||||||
|
maxZoom: 24,
|
||||||
attribution: 'Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community'
|
attribution: 'Tiles © Esri — Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community'
|
||||||
}),
|
}),
|
||||||
"google sat": L.tileLayer('http://{s}.google.com/vt/lyrs=s&x={x}&y={y}&z={z}', {
|
"google sat": L.tileLayer('https://{s}.google.com/vt/lyrs=s&x={x}&y={y}&z={z}', {
|
||||||
maxZoom: 20,
|
maxNativeZoom: 20,
|
||||||
|
maxZoom: 24,
|
||||||
subdomains: ['mt0', 'mt1', 'mt2', 'mt3']
|
subdomains: ['mt0', 'mt1', 'mt2', 'mt3']
|
||||||
})
|
})
|
||||||
};
|
};
|
||||||
var map = L.map("mainMap", {layers: [tiles.osm], maxZoom: 22, maxNativeZoom: 19});
|
var map = L.map("mainMap", {layers: [tiles.openstreetmap]});
|
||||||
|
|
||||||
function styleTrack(feature) {
|
function styleTrack(feature) {
|
||||||
var styles = {};
|
var styles = {};
|
||||||
|
|
@ -68,23 +71,32 @@ $.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_de7df5b5-edd5-4070-840f-688
|
||||||
var heat = L.heatLayer(coords);
|
var heat = L.heatLayer(coords);
|
||||||
L.control.layers(tiles, {"heatmap": heat}).addTo(map);
|
L.control.layers(tiles, {"heatmap": heat}).addTo(map);
|
||||||
|
|
||||||
var marker = null;
|
|
||||||
var list = $("<ul />");
|
var list = $("<ul />");
|
||||||
|
var current = {
|
||||||
|
"pos":data["boards"][1].coordinate.coordinates
|
||||||
|
};
|
||||||
|
console.log(current);
|
||||||
|
var marker = L.marker([current.pos[1], current.pos[0]]).addTo(map);
|
||||||
$.each(data["boards"], function (index, entry) {
|
$.each(data["boards"], function (index, entry) {
|
||||||
//console.log(index, entry);
|
//console.log(index, entry);
|
||||||
var item = $("<li>", {class: entry.extra_data.activity_type});
|
var item = $("<li>", {class: entry.extra_data.activity_type});
|
||||||
var container = $("<div>", {class: "board"});
|
var container = $("<div>", {class: "board"});
|
||||||
var image = $("<img>", {src: entry.image.replace("static/progress/", ""), height: 200});
|
var image = $("<img>", {src: entry.image.replace("static/progress/", "")});
|
||||||
image.attr("data-time", entry.timestamp);
|
image.attr("data-time", entry.timestamp);
|
||||||
image.hover(function () {
|
image.hover(function () {
|
||||||
marker = L.geoJSON(entry.coordinate).addTo(map);
|
marker.setLatLng([entry.coordinate.coordinates[1], entry.coordinate.coordinates[0]]);
|
||||||
}, function () {
|
}, function () {
|
||||||
map.removeLayer(marker);
|
marker.setLatLng(current.pos.coordinates[1], current.pos.coordinates[0]);
|
||||||
|
});
|
||||||
|
image.click(function (e) {
|
||||||
|
current.board = image;
|
||||||
|
current.pos = entry.coordinate;
|
||||||
});
|
});
|
||||||
images[entry.timestamp] = {image: image, coordinate: entry.coordinate};
|
images[entry.timestamp] = {image: image, coordinate: entry.coordinate};
|
||||||
image.appendTo(container);
|
image.appendTo(container);
|
||||||
container.appendTo(item);
|
container.appendTo(item);
|
||||||
item.appendTo(list);
|
item.appendTo(list);
|
||||||
});
|
});
|
||||||
list.appendTo("body");
|
current.board=images[data["boards"][1].timestamp];
|
||||||
|
list.appendTo(".sequence");
|
||||||
});
|
});
|
||||||
|
|
@ -0,0 +1,105 @@
|
||||||
|
/*.mapDiv {
|
||||||
|
width: 1024px;
|
||||||
|
height: 768px;
|
||||||
|
}*/
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
.highlight {
|
||||||
|
/*what a nice way to highlight*/
|
||||||
|
display: none;
|
||||||
|
}
|
||||||
|
|
||||||
|
.simu {
|
||||||
|
background-color: blue;
|
||||||
|
}
|
||||||
|
|
||||||
|
.question {
|
||||||
|
background-color: orange;
|
||||||
|
}
|
||||||
|
|
||||||
|
.image {
|
||||||
|
background-color: green;
|
||||||
|
}
|
||||||
|
|
||||||
|
.audio {
|
||||||
|
background-color: red;
|
||||||
|
}
|
||||||
|
|
||||||
|
.video {
|
||||||
|
background-color: purple;
|
||||||
|
}
|
||||||
|
|
||||||
|
.other {
|
||||||
|
background-color: brown;
|
||||||
|
}
|
||||||
|
|
||||||
|
.map {
|
||||||
|
background-color: violet;
|
||||||
|
}
|
||||||
|
|
||||||
|
.error {
|
||||||
|
background-color: grey;
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
.board {
|
||||||
|
width: 32px;
|
||||||
|
height: 32px;
|
||||||
|
display: inline-block;
|
||||||
|
}
|
||||||
|
|
||||||
|
.board img {
|
||||||
|
max-width: 32px;
|
||||||
|
max-height: 32px;
|
||||||
|
position: absolute;
|
||||||
|
/*bottom: 0px;*/
|
||||||
|
}
|
||||||
|
|
||||||
|
.board:hover img{
|
||||||
|
max-width: 205px;
|
||||||
|
max-height: 295px;
|
||||||
|
z-index: 99;
|
||||||
|
top: 5px;
|
||||||
|
right:0px;
|
||||||
|
}
|
||||||
|
ul {
|
||||||
|
list-style-type: none;
|
||||||
|
overflow: auto;
|
||||||
|
overflow-y: hidden;
|
||||||
|
display: inline-block;
|
||||||
|
/*max-width:100%;
|
||||||
|
margin: 0 0 1em;
|
||||||
|
white-space: nowrap;
|
||||||
|
height:200px;*/
|
||||||
|
}
|
||||||
|
|
||||||
|
li {
|
||||||
|
display: inline-block;
|
||||||
|
vertical-align: top;
|
||||||
|
padding: 2px;
|
||||||
|
margin-bottom: 2px;
|
||||||
|
}
|
||||||
|
|
||||||
|
body{
|
||||||
|
height: 100%;
|
||||||
|
padding:0;
|
||||||
|
margin:0;
|
||||||
|
}
|
||||||
|
|
||||||
|
main{
|
||||||
|
display: flex;
|
||||||
|
flex-direction: column;
|
||||||
|
height:100%;
|
||||||
|
}
|
||||||
|
|
||||||
|
.mapDiv {
|
||||||
|
flex-grow:1;
|
||||||
|
}
|
||||||
|
.sequenceContainer{
|
||||||
|
flex-grow: 0;
|
||||||
|
min-height:300px;
|
||||||
|
padding-right: 210px;
|
||||||
|
position: relative;
|
||||||
|
|
||||||
|
}
|
||||||
|
|
@ -0,0 +1,12 @@
|
||||||
|
def calc_distance(geojson: str):
|
||||||
|
from shapely.geometry import LineString
|
||||||
|
from shapely.ops import transform
|
||||||
|
from functools import partial
|
||||||
|
import pyproj
|
||||||
|
import json
|
||||||
|
track = LineString(json.loads(geojson)['coordinates'])
|
||||||
|
project = partial(
|
||||||
|
pyproj.transform,
|
||||||
|
pyproj.Proj(init='EPSG:4326'),
|
||||||
|
pyproj.Proj(init='EPSG:32633'))
|
||||||
|
return transform(project, track).length
|
||||||
|
|
@ -0,0 +1,66 @@
|
||||||
|
import logging
|
||||||
|
from typing import List
|
||||||
|
|
||||||
|
from analyzers.analyzer import ResultStore, Analyzer
|
||||||
|
from analyzers.settings import LogSettings
|
||||||
|
|
||||||
|
log: logging.Logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def process_log(logfile: str, settings: LogSettings, loaders) -> List[Analyzer]:
|
||||||
|
loader = loaders[settings.log_format]()
|
||||||
|
try:
|
||||||
|
loader.load(logfile)
|
||||||
|
except BaseException as e:
|
||||||
|
raise RuntimeError(e)
|
||||||
|
analyzers: List[Analyzer] = []
|
||||||
|
log.debug("build analyzers")
|
||||||
|
for analyzer in settings.analyzers:
|
||||||
|
analyzers.append(analyzer(settings))
|
||||||
|
log.debug("process entries")
|
||||||
|
for entry in loader.get_entry():
|
||||||
|
for analyzer in analyzers:
|
||||||
|
try:
|
||||||
|
if analyzer.process(entry):
|
||||||
|
break
|
||||||
|
except KeyError as e:
|
||||||
|
log.exception(e)
|
||||||
|
return analyzers
|
||||||
|
|
||||||
|
|
||||||
|
def run_analysis(log_ids: list, settings, loaders):
|
||||||
|
store: ResultStore = ResultStore()
|
||||||
|
for log_id in log_ids:
|
||||||
|
for analysis in process_log(log_id, settings, loaders):
|
||||||
|
log.info("* Result for " + analysis.name())
|
||||||
|
analysis.result(store, name=log_id)
|
||||||
|
return store
|
||||||
|
|
||||||
|
|
||||||
|
def load_ids(name: str):
|
||||||
|
log_ids = []
|
||||||
|
with open(name) as src:
|
||||||
|
for line in src:
|
||||||
|
line = line.strip()
|
||||||
|
log_ids.append(line)
|
||||||
|
return log_ids
|
||||||
|
|
||||||
|
|
||||||
|
def grep(log_ids, source, settings):
|
||||||
|
logs = []
|
||||||
|
with open(source) as src:
|
||||||
|
lines = src.readlines()
|
||||||
|
for id in log_ids:
|
||||||
|
for line in lines:
|
||||||
|
if id in line:
|
||||||
|
logs.append(line.strip())
|
||||||
|
return logs
|
||||||
|
|
||||||
|
|
||||||
|
def src_file(filename):
|
||||||
|
log_ids = []
|
||||||
|
with open(filename) as src:
|
||||||
|
for line in src:
|
||||||
|
line = line.strip()
|
||||||
|
log_ids.append(line)
|
||||||
|
return log_ids
|
||||||
Loading…
Reference in New Issue