WIP snapshot
parent
e1105244f4
commit
b40efa4bbb
|
|
@ -51,7 +51,7 @@ __MAPPING__ = {
|
||||||
StoreRender
|
StoreRender
|
||||||
],
|
],
|
||||||
SimulationOrderAnalyzer: [
|
SimulationOrderAnalyzer: [
|
||||||
JSONRender,
|
#JSONRender,
|
||||||
# SimulationOrderRender,
|
# SimulationOrderRender,
|
||||||
SimulationGroupRender
|
SimulationGroupRender
|
||||||
]
|
]
|
||||||
|
|
|
||||||
|
|
@ -50,7 +50,7 @@ class ResultStore:
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
result = []
|
result = []
|
||||||
for key in self.store:
|
for key in sorted(self.store):
|
||||||
result += self.store[key]
|
result += self.store[key]
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -185,7 +185,13 @@ class SimulationOrderRender(Render):
|
||||||
|
|
||||||
class SimulationGroupRender(Render):
|
class SimulationGroupRender(Render):
|
||||||
def render(self, results: List[Result], name=None):
|
def render(self, results: List[Result], name=None):
|
||||||
data = [r.get() for r in self.filter(results)]
|
#data = [r.get() for r in self.filter(results)]
|
||||||
|
data = []
|
||||||
|
for r in self.filter(results):
|
||||||
|
raw = r.get()
|
||||||
|
if len(raw) < 6:
|
||||||
|
raw = [0] + raw
|
||||||
|
data.append(raw)
|
||||||
print(name, len(data))
|
print(name, len(data))
|
||||||
graph_plot(list(data), ylabel="simulation retries", title="sequential simulation retries", rotation=None, name=name)
|
graph_plot(list(data), ylabel="simulation retries", title="sequential simulation retries", rotation=None, name=name)
|
||||||
#graph_fit(list(data), name=name)
|
#graph_fit(list(data), name=name)
|
||||||
|
|
|
||||||
|
|
@ -14,6 +14,7 @@
|
||||||
"analyzers": {
|
"analyzers": {
|
||||||
"analyzers": [
|
"analyzers": [
|
||||||
"SimulationCategorizer",
|
"SimulationCategorizer",
|
||||||
|
"SimulationOrderAnalyzer",
|
||||||
"ActivityMapper"
|
"ActivityMapper"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
|
|
|
||||||
317
log_analyzer.py
317
log_analyzer.py
|
|
@ -63,17 +63,18 @@ if __name__ == '__main__':
|
||||||
# "fec57041458e6cef98652df625", ]
|
# "fec57041458e6cef98652df625", ]
|
||||||
log_ids = []
|
log_ids = []
|
||||||
# with open("/home/clemens/git/ma/test/filtered") as src:
|
# with open("/home/clemens/git/ma/test/filtered") as src:
|
||||||
with open("/home/clemens/git/ma/test/filtered") as src:
|
if False:
|
||||||
for line in src:
|
with open("/home/clemens/git/ma/test/filtered_5_actions") as src:
|
||||||
line = line.strip()
|
for line in src:
|
||||||
log_ids.append(line)
|
line = line.strip()
|
||||||
store: ResultStore = ResultStore()
|
log_ids.append(line)
|
||||||
for log_id in log_ids:
|
store: ResultStore = ResultStore()
|
||||||
for analysis in process_log(log_id, settings):
|
for log_id in log_ids:
|
||||||
log.info("* Result for " + analysis.name())
|
for analysis in process_log(log_id, settings):
|
||||||
# print(analysis.result())
|
log.info("* Result for " + analysis.name())
|
||||||
# print(analysis.render())
|
# print(analysis.result())
|
||||||
analysis.result(store)
|
# print(analysis.render())
|
||||||
|
analysis.result(store)
|
||||||
if False:
|
if False:
|
||||||
for r in get_renderer(analyzers.LocomotionActionAnalyzer):
|
for r in get_renderer(analyzers.LocomotionActionAnalyzer):
|
||||||
r().render(store.get_all())
|
r().render(store.get_all())
|
||||||
|
|
@ -120,8 +121,10 @@ if __name__ == '__main__':
|
||||||
writer.writerow(line)
|
writer.writerow(line)
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
#json.dump(store.serializable(), open("new.json", "w"), indent=1)
|
# json.dump(store.serializable(), open("new.json", "w"), indent=1)
|
||||||
from collections import defaultdict
|
from collections import defaultdict
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
from util.meta_temp import CONFIG_NAMES
|
||||||
|
|
||||||
keys = [
|
keys = [
|
||||||
"simu",
|
"simu",
|
||||||
|
|
@ -130,81 +133,243 @@ if __name__ == '__main__':
|
||||||
"audio",
|
"audio",
|
||||||
"video",
|
"video",
|
||||||
"other",
|
"other",
|
||||||
"map"
|
"map",
|
||||||
|
# "error"
|
||||||
]
|
]
|
||||||
import matplotlib.pyplot as plt
|
|
||||||
#results = []
|
|
||||||
|
|
||||||
places = defaultdict(list)
|
|
||||||
|
|
||||||
for log in store.get_all():
|
def get_data(store, relative_values=True, sort=True, show_errors=False):
|
||||||
result = defaultdict(lambda: 0)
|
places = defaultdict(list)
|
||||||
for i in log.get()['track']:
|
|
||||||
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
|
|
||||||
result[i['properties']['activity_type']] += duration
|
|
||||||
print(json.dumps(result, indent=4))
|
|
||||||
total = sum(result.values())
|
|
||||||
print(total)
|
|
||||||
percentage = defaultdict(lambda :0)
|
|
||||||
minutes = defaultdict(lambda:0)
|
|
||||||
for i in result:
|
|
||||||
percentage[i]= result[i]/total
|
|
||||||
minutes[i] = result[i]/60_000
|
|
||||||
print(json.dumps(percentage,indent=4))
|
|
||||||
if not 'error' in result:
|
|
||||||
#places[log.get()['instance']].append(percentage)
|
|
||||||
places[log.get()['instance']].append(minutes)
|
|
||||||
|
|
||||||
for place in places:
|
for log in store.get_all():
|
||||||
places[place] = sorted(places[place], key=lambda item:item['map'])
|
if not log.analysis() == analyzers.ActivityMapper:
|
||||||
|
continue
|
||||||
|
result = defaultdict(lambda: 0)
|
||||||
|
for i in log.get()['track']:
|
||||||
|
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
|
||||||
|
result[i['properties']['activity_type']] += duration
|
||||||
|
print(json.dumps(result, indent=4))
|
||||||
|
total = sum(result.values())
|
||||||
|
print(total)
|
||||||
|
percentage = defaultdict(lambda: 0)
|
||||||
|
minutes = defaultdict(lambda: 0)
|
||||||
|
for i in result:
|
||||||
|
percentage[i] = result[i] / total
|
||||||
|
minutes[i] = result[i] / 60_000
|
||||||
|
print(json.dumps(percentage, indent=4))
|
||||||
|
if not 'error' in result or show_errors:
|
||||||
|
if relative_values:
|
||||||
|
places[log.get()['instance']].append(percentage)
|
||||||
|
else:
|
||||||
|
places[log.get()['instance']].append(minutes)
|
||||||
|
if sort:
|
||||||
|
for place in places:
|
||||||
|
places[place] = sorted(places[place], key=lambda item: item['map'])
|
||||||
|
return places
|
||||||
|
|
||||||
dummy = [0]*len(keys)
|
|
||||||
results = []
|
from shapely.geometry import LineString
|
||||||
sites = []
|
from shapely.ops import transform
|
||||||
from util.meta_temp import CONFIG_NAMES
|
from functools import partial
|
||||||
for i in places:
|
import pyproj
|
||||||
for j in places[i]:
|
|
||||||
ordered = []
|
|
||||||
|
def calc_distance(coordinates):
|
||||||
|
track = LineString(coordinates)
|
||||||
|
project = partial(
|
||||||
|
pyproj.transform,
|
||||||
|
pyproj.Proj(init='EPSG:4326'),
|
||||||
|
pyproj.Proj(init='EPSG:32633'))
|
||||||
|
return transform(project, track).length
|
||||||
|
|
||||||
|
|
||||||
|
def get_data_distance(store, relative_values=True, sort=True, show_errors=False):
|
||||||
|
places = defaultdict(list)
|
||||||
|
|
||||||
|
for log in store.get_all():
|
||||||
|
if not log.analysis() == analyzers.ActivityMapper:
|
||||||
|
continue
|
||||||
|
result = defaultdict(lambda: 0)
|
||||||
|
for i in log.get()['track']:
|
||||||
|
coords = i['coordinates']
|
||||||
|
if len(coords) > 1:
|
||||||
|
distance = calc_distance(coords)
|
||||||
|
result[i['properties']['activity_type']] += distance
|
||||||
|
total = sum(result.values())
|
||||||
|
percentage = defaultdict(lambda: 0)
|
||||||
|
for i in result:
|
||||||
|
if not total == 0:
|
||||||
|
percentage[i] = result[i] / total
|
||||||
|
if not 'error' in result or show_errors:
|
||||||
|
if relative_values:
|
||||||
|
places[log.get()['instance']].append(percentage)
|
||||||
|
else:
|
||||||
|
places[log.get()['instance']].append(result)
|
||||||
|
if sort:
|
||||||
|
for place in places:
|
||||||
|
places[place] = sorted(places[place], key=lambda item: item['map'])
|
||||||
|
return places
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_data(store):
|
||||||
|
places = defaultdict(list)
|
||||||
|
|
||||||
|
for log in store.get_all():
|
||||||
|
if not log.analysis() == analyzers.ActivityMapper:
|
||||||
|
continue
|
||||||
|
result = defaultdict(lambda: defaultdict(lambda: 0))
|
||||||
|
for i in log.get()['track']:
|
||||||
|
coords = i['coordinates']
|
||||||
|
if len(coords) > 1:
|
||||||
|
distance = calc_distance(coords)
|
||||||
|
else:
|
||||||
|
distance = 0.1
|
||||||
|
result["space"][i['properties']['activity_type']] += distance
|
||||||
|
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
|
||||||
|
result["time"][i['properties']['activity_type']] += duration
|
||||||
|
total_space = sum(result["space"].values())
|
||||||
|
total_time = sum(result["time"].values())
|
||||||
|
percentage = defaultdict(lambda: defaultdict(lambda: 0))
|
||||||
|
for i in result["space"]:
|
||||||
|
if not total_space == 0:
|
||||||
|
percentage[i]["space"] = result["space"][i] / total_space
|
||||||
|
else:
|
||||||
|
percentage[i]["space"] = 0
|
||||||
|
if not total_time == 0:
|
||||||
|
percentage[i]["time"] = result["time"][i] / total_time
|
||||||
|
else:
|
||||||
|
percentage[i]["time"] = 0
|
||||||
|
print(percentage)
|
||||||
|
if not 'error' in result:
|
||||||
|
places[log.get()['instance']].append(percentage)
|
||||||
|
return places
|
||||||
|
|
||||||
|
|
||||||
|
def stack_data(keys, places):
|
||||||
|
dummy = [0] * len(keys)
|
||||||
|
results = []
|
||||||
|
sites = []
|
||||||
|
for i in places:
|
||||||
|
for j in places[i]:
|
||||||
|
ordered = []
|
||||||
|
for k in keys:
|
||||||
|
ordered.append(j[k])
|
||||||
|
results.append(ordered)
|
||||||
|
results.append(dummy)
|
||||||
|
sites.append(CONFIG_NAMES[i] if i in CONFIG_NAMES else "---")
|
||||||
|
return results, sites
|
||||||
|
|
||||||
|
|
||||||
|
def plot_data(places, keys):
|
||||||
|
results, sites = stack_data(keys, places)
|
||||||
|
|
||||||
|
size = len(results)
|
||||||
|
print("{} elements total".format(size))
|
||||||
|
ind = np.arange(size)
|
||||||
|
width = 1
|
||||||
|
# print(results)
|
||||||
|
data = list(zip(*results))
|
||||||
|
# print(data)
|
||||||
|
lines = []
|
||||||
|
bottom = [0] * size
|
||||||
|
for i in range(0, len(data)):
|
||||||
|
lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
|
||||||
|
for k, x in enumerate(data[i]):
|
||||||
|
bottom[k] += x
|
||||||
|
plt.legend(lines, keys)
|
||||||
|
plt.title(", ".join(sites))
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
colors = {
|
||||||
|
"simu": "blue",
|
||||||
|
"question": "orange",
|
||||||
|
"image": "green",
|
||||||
|
"audio": "red",
|
||||||
|
"video": "purple",
|
||||||
|
"other": "brown",
|
||||||
|
"map": "violet",
|
||||||
|
# "error":"grey"
|
||||||
|
}
|
||||||
|
markers = [".", "o", "x", "s", "*", "D", "p", ",", "<", ">", "^", "v", "1", "2", "3", "4"]
|
||||||
|
|
||||||
|
|
||||||
|
def plot_time_space(time_data, space_data, keys):
|
||||||
|
# assuming time_data and space_data are in same order!
|
||||||
|
marker = 0
|
||||||
|
for id in time_data:
|
||||||
for k in keys:
|
for k in keys:
|
||||||
ordered.append(j[k])
|
for i in range(len(time_data[id])):
|
||||||
results.append(ordered)
|
print(time_data[id][i][k], space_data[id][i][k])
|
||||||
results.append(dummy)
|
plt.plot(time_data[id][i][k], space_data[id][i][k], color=colors[k], marker=markers[marker])
|
||||||
sites.append(CONFIG_NAMES[i] if i in CONFIG_NAMES else "---")
|
marker += 1
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
size = len(results)
|
# plt.cla()
|
||||||
ind = np.arange(size)
|
# plt.clf()
|
||||||
width=0.9
|
# plt.close()
|
||||||
print(results)
|
|
||||||
data = list(zip(*results))
|
|
||||||
print(data)
|
|
||||||
lines = []
|
|
||||||
bottom = [0]*len(results)
|
|
||||||
for i in range(0, len(data)):
|
|
||||||
lines.append(plt.bar(ind,data[i], bottom=bottom, width=width)[0])
|
|
||||||
for k,x in enumerate(data[i]):
|
|
||||||
bottom[k] += x
|
|
||||||
plt.legend(lines, keys)
|
|
||||||
plt.title(", ".join(sites))
|
|
||||||
plt.show()
|
|
||||||
|
|
||||||
#size = len(results)
|
def plot_time_space_rel(combined, keys):
|
||||||
#ind = np.arange(size)
|
groups = defaultdict(list)
|
||||||
#width = 0.9
|
keys = list(keys)
|
||||||
#print(results)
|
keys.remove("other")
|
||||||
#data = list(zip(*results))
|
for k in keys:
|
||||||
#print(data)
|
for id in sorted(combined):
|
||||||
#lines = []
|
group = 0.0
|
||||||
#bottom = [0] * len(results)
|
count = 0
|
||||||
#for i in range(0, len(data)):
|
for item in combined[id]:
|
||||||
# lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
|
if k in item:
|
||||||
# for k, x in enumerate(data[i]):
|
time = item[k]["time"]
|
||||||
# bottom[k] += x
|
distance = item[k]["space"]
|
||||||
#plt.legend(lines, keys)
|
if time > 0:
|
||||||
#plt.title("Zwei Spiele in Filderstadt (t1=237min; t2=67min)")
|
group += (distance / time)
|
||||||
#plt.show()
|
count+=1
|
||||||
|
else:
|
||||||
|
print("div by zero", distance, time)
|
||||||
|
if count > 0:
|
||||||
|
groups[k].append(group/count)
|
||||||
|
else:
|
||||||
|
groups[k].append(0.0)
|
||||||
|
ind = np.arange(len(combined.keys()))
|
||||||
|
width = .7 / len(groups)
|
||||||
|
print(ind)
|
||||||
|
print(json.dumps(groups, indent=1))
|
||||||
|
bars = []
|
||||||
|
fig, ax = plt.subplots()
|
||||||
|
for k in groups:
|
||||||
|
print(groups[k])
|
||||||
|
if not len(groups[k]):
|
||||||
|
groups[k].append(0)
|
||||||
|
bars.append(ax.bar(ind, groups[k], width, color=colors[k]))
|
||||||
|
ind = ind + width
|
||||||
|
ax.set_xticks(ind + width / 2)
|
||||||
|
ax.set_xticklabels(list([CONFIG_NAMES[i] if i in CONFIG_NAMES else "---" for i in sorted(combined.keys())]))
|
||||||
|
plt.legend(bars, keys)
|
||||||
|
print(combined.keys())
|
||||||
|
print([CONFIG_NAMES[i] if i in CONFIG_NAMES else "---" for i in sorted(combined.keys())])
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
# spatial_data = get_data_distance(store,relative_values=False)
|
||||||
|
# temporal_data = get_data(store,relative_values=False)
|
||||||
|
# spatial_data_rel = get_data_distance(store,relative_values=True)
|
||||||
|
# temporal_data_rel = get_data(store,relative_values=True)
|
||||||
|
#temporal_data_rel = json.load(open("temporal_rel.json"))
|
||||||
|
#spatial_data_rel = json.load(open("spatial_rel.json"))
|
||||||
|
# import IPython
|
||||||
|
# IPython.embed()
|
||||||
|
|
||||||
|
#print(json.dumps(get_all_data(store)))
|
||||||
|
# json.dump(get_all_data(store), open("combined.json", "w"))
|
||||||
|
combined = json.load(open("combined.json"))
|
||||||
|
plot_time_space_rel(combined, keys)
|
||||||
|
|
||||||
|
#plot_time_space_rel(temporal_data_rel, spatial_data_rel, keys)
|
||||||
|
|
||||||
|
# plot_data(data, keys)
|
||||||
|
# plot_data(get_data_distance(store,relative_values=False), keys)
|
||||||
|
|
||||||
|
|
||||||
# for analyzers in analyzers:
|
# for analyzers in analyzers:
|
||||||
# if analyzers.name() in ["LogEntryCount", "ActionSequenceAnalyzer"]:
|
# if analyzers.name() in ["LogEntryCount", "ActionSequenceAnalyzer"]:
|
||||||
|
|
|
||||||
|
|
@ -4,4 +4,5 @@ matplotlib==2.1.0
|
||||||
osmnx==0.6
|
osmnx==0.6
|
||||||
networkx==2.0
|
networkx==2.0
|
||||||
pydot==1.2.3
|
pydot==1.2.3
|
||||||
scipy==1.0.0
|
scipy==1.0.0
|
||||||
|
ipython==6.2.1
|
||||||
Loading…
Reference in New Issue