WIP snapshot
parent
1748ce1f67
commit
e1105244f4
|
|
@ -4,4 +4,4 @@ _*
|
||||||
*.pyc
|
*.pyc
|
||||||
logs/
|
logs/
|
||||||
data/
|
data/
|
||||||
|
plots/*
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ from typing import List
|
||||||
|
|
||||||
from .analyzer import Analyzer, Result
|
from .analyzer import Analyzer, Result
|
||||||
from .analyzer.biogames import BoardDurationAnalyzer, SimulationRoundsAnalyzer, ActivationSequenceAnalyzer, \
|
from .analyzer.biogames import BoardDurationAnalyzer, SimulationRoundsAnalyzer, ActivationSequenceAnalyzer, \
|
||||||
BiogamesCategorizer, ActivityMapper, BiogamesStore, InstanceConfig, SimulationOrderAnalyzer
|
BiogamesCategorizer, ActivityMapper, BiogamesStore, InstanceConfig, SimulationOrderAnalyzer, SimulationCategorizer
|
||||||
from .analyzer.default import LogEntryCountAnalyzer, LocationAnalyzer, LogEntrySequenceAnalyzer, ActionSequenceAnalyzer, \
|
from .analyzer.default import LogEntryCountAnalyzer, LocationAnalyzer, LogEntrySequenceAnalyzer, ActionSequenceAnalyzer, \
|
||||||
CategorizerStub, Store, ProgressAnalyzer
|
CategorizerStub, Store, ProgressAnalyzer
|
||||||
from .analyzer.locomotion import LocomotionActionAnalyzer, CacheSequenceAnalyzer
|
from .analyzer.locomotion import LocomotionActionAnalyzer, CacheSequenceAnalyzer
|
||||||
|
|
@ -10,7 +10,8 @@ from .analyzer.mask import MaskSpatials
|
||||||
from .render import Render
|
from .render import Render
|
||||||
from .render.biogames import SimulationRoundsRender, BoardDurationHistRender, BoardDurationBoxRender, \
|
from .render.biogames import SimulationRoundsRender, BoardDurationHistRender, BoardDurationBoxRender, \
|
||||||
ActivityMapperRender, StoreRender, SimulationOrderRender, SimulationGroupRender
|
ActivityMapperRender, StoreRender, SimulationOrderRender, SimulationGroupRender
|
||||||
from .render.default import PrintRender, JSONRender, TrackRender, HeatMapRender
|
from .render.default import PrintRender, JSONRender, TrackRender, HeatMapRender, LogEntryCountAnalyzerPlot, \
|
||||||
|
LogEntryCountCSV
|
||||||
from .render.locomotion import LocomotionActionRelativeRender, LocomotionActionAbsoluteRender, \
|
from .render.locomotion import LocomotionActionRelativeRender, LocomotionActionAbsoluteRender, \
|
||||||
LocomotionActionRatioRender
|
LocomotionActionRatioRender
|
||||||
|
|
||||||
|
|
@ -21,7 +22,9 @@ __MAPPING__ = {
|
||||||
LocomotionActionRelativeRender,
|
LocomotionActionRelativeRender,
|
||||||
LocomotionActionRatioRender, ],
|
LocomotionActionRatioRender, ],
|
||||||
LogEntryCountAnalyzer: [
|
LogEntryCountAnalyzer: [
|
||||||
JSONRender,
|
# JSONRender,
|
||||||
|
LogEntryCountAnalyzerPlot,
|
||||||
|
LogEntryCountCSV,
|
||||||
],
|
],
|
||||||
SimulationRoundsAnalyzer: [
|
SimulationRoundsAnalyzer: [
|
||||||
JSONRender,
|
JSONRender,
|
||||||
|
|
@ -49,8 +52,8 @@ __MAPPING__ = {
|
||||||
],
|
],
|
||||||
SimulationOrderAnalyzer: [
|
SimulationOrderAnalyzer: [
|
||||||
JSONRender,
|
JSONRender,
|
||||||
#SimulationOrderRender,
|
# SimulationOrderRender,
|
||||||
SimulationGroupRender
|
SimulationGroupRender
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -61,10 +64,10 @@ def get_renderer(cls: type) -> [type]:
|
||||||
return __MAPPING__[cls]
|
return __MAPPING__[cls]
|
||||||
|
|
||||||
|
|
||||||
def render(cls: type, results: List[Result]):
|
def render(cls: type, results: List[Result], name=None):
|
||||||
for r in get_renderer(cls):
|
for r in get_renderer(cls):
|
||||||
p = r()
|
p = r()
|
||||||
p.result_types.append(cls)
|
p.result_types.append(cls)
|
||||||
rendered = p.render(results)
|
rendered = p.render(results, name=name)
|
||||||
if rendered:
|
if rendered:
|
||||||
print(str(r))
|
print(str(r))
|
||||||
|
|
|
||||||
|
|
@ -59,7 +59,8 @@ class ResultStore:
|
||||||
|
|
||||||
def get_category(self, key):
|
def get_category(self, key):
|
||||||
if key not in self.store:
|
if key not in self.store:
|
||||||
return self.entry
|
return self.entry()
|
||||||
|
log.error("get_category %s %s", key, len(self.store[key]))
|
||||||
return self.store[key]
|
return self.store[key]
|
||||||
|
|
||||||
def serializable(self):
|
def serializable(self):
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ from types import SimpleNamespace
|
||||||
from typing import List, NamedTuple
|
from typing import List, NamedTuple
|
||||||
|
|
||||||
from util import json_path, combinate
|
from util import json_path, combinate
|
||||||
from util.download import download_board
|
from util.download import download_board, get_board_data
|
||||||
from . import Result, LogSettings, Analyzer, ResultStore
|
from . import Result, LogSettings, Analyzer, ResultStore
|
||||||
from .default import CategorizerStub, Store
|
from .default import CategorizerStub, Store
|
||||||
|
|
||||||
|
|
@ -52,6 +52,36 @@ class BoardDurationAnalyzer(Analyzer):
|
||||||
self.last = {}
|
self.last = {}
|
||||||
|
|
||||||
|
|
||||||
|
class TypedBoardDuration(Analyzer):
|
||||||
|
__name__ = "BoardDuration"
|
||||||
|
|
||||||
|
def result(self, store: ResultStore) -> None:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def process(self, entry: dict) -> bool:
|
||||||
|
entry_type = entry[self.settings.type_field]
|
||||||
|
if entry_type in self.settings.boards:
|
||||||
|
pass
|
||||||
|
|
||||||
|
def add_board(self, entry):
|
||||||
|
board_data = get_board_data(self.settings.source, )
|
||||||
|
|
||||||
|
def add_location(self, entry):
|
||||||
|
self.track['coordinates'].append(json_path(entry, self.settings.custom['coordinates']))
|
||||||
|
|
||||||
|
def add_track(self, **props):
|
||||||
|
self.track['properties'] = props
|
||||||
|
self.tracks.append(self.track)
|
||||||
|
self.track = dict(self.template)
|
||||||
|
|
||||||
|
def __init__(self, settings: LogSettings):
|
||||||
|
super().__init__(settings)
|
||||||
|
self.last_board = {}
|
||||||
|
self.tracks = []
|
||||||
|
self.template = {"type": "LineString", "coordinates": [], "properties": {}}
|
||||||
|
self.track = dict(self.template)
|
||||||
|
|
||||||
|
|
||||||
class SimulationRoundsAnalyzer(Analyzer):
|
class SimulationRoundsAnalyzer(Analyzer):
|
||||||
__name__ = "SimuRounds"
|
__name__ = "SimuRounds"
|
||||||
|
|
||||||
|
|
@ -90,7 +120,7 @@ class ActivationSequenceAnalyzer(Analyzer):
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
class BiogamesCategorizer(CategorizerStub):
|
class BiogamesCategorizer(CategorizerStub): # TODO: refactor
|
||||||
__name__ = "BiogamesCategorizer"
|
__name__ = "BiogamesCategorizer"
|
||||||
|
|
||||||
def __init__(self, settings: LogSettings):
|
def __init__(self, settings: LogSettings):
|
||||||
|
|
@ -105,10 +135,21 @@ class BiogamesCategorizer(CategorizerStub):
|
||||||
|
|
||||||
class ActivityMapper(Analyzer):
|
class ActivityMapper(Analyzer):
|
||||||
__name__ = "ActivityMapper"
|
__name__ = "ActivityMapper"
|
||||||
|
classes = {
|
||||||
|
"sequence.simulation.": "simu",
|
||||||
|
"sequence.question.": "question",
|
||||||
|
"error": "error"
|
||||||
|
}
|
||||||
|
|
||||||
def __init__(self, settings: LogSettings) -> None:
|
def __init__(self, settings: LogSettings) -> None:
|
||||||
super().__init__(settings)
|
super().__init__(settings)
|
||||||
self.store: List[self.State] = []
|
self.store: List[self.State] = []
|
||||||
|
self.timeline = []
|
||||||
|
self.last_board = {}
|
||||||
|
self.last_board_type = "other"
|
||||||
|
self.last_coordinate = None
|
||||||
|
self.tracks = []
|
||||||
|
self.track = None
|
||||||
self.instance_config_id: str = None
|
self.instance_config_id: str = None
|
||||||
self.filters = SimpleNamespace()
|
self.filters = SimpleNamespace()
|
||||||
self.filters.start = lambda entry: combinate(self.settings.custom["sequences2"]["start"], entry)
|
self.filters.start = lambda entry: combinate(self.settings.custom["sequences2"]["start"], entry)
|
||||||
|
|
@ -116,7 +157,7 @@ class ActivityMapper(Analyzer):
|
||||||
|
|
||||||
self.State: NamedTuple = namedtuple("State", ["sequence", "events", "track", "timestamp"])
|
self.State: NamedTuple = namedtuple("State", ["sequence", "events", "track", "timestamp"])
|
||||||
|
|
||||||
def result(self, store: ResultStore) -> None:
|
def result_old(self, store: ResultStore) -> None:
|
||||||
instance_config_id = self.instance_config_id
|
instance_config_id = self.instance_config_id
|
||||||
for active_segment in self.store: # active_segment → sequence or None (None → map active)
|
for active_segment in self.store: # active_segment → sequence or None (None → map active)
|
||||||
seq_data_url = "/game2/editor/config/{config_id}/sequence/{sequence_id}/".format(
|
seq_data_url = "/game2/editor/config/{config_id}/sequence/{sequence_id}/".format(
|
||||||
|
|
@ -136,32 +177,69 @@ class ActivityMapper(Analyzer):
|
||||||
event["image"] = local_file[16:]
|
event["image"] = local_file[16:]
|
||||||
store.add(Result(type(self), {"instance": instance_config_id, "store": [x._asdict() for x in self.store]}))
|
store.add(Result(type(self), {"instance": instance_config_id, "store": [x._asdict() for x in self.store]}))
|
||||||
|
|
||||||
|
def result(self, store: ResultStore) -> None:
|
||||||
|
|
||||||
|
store.add(Result(type(self), {"instance": self.instance_config_id, "track": self.tracks, "boards": self.timeline}))
|
||||||
|
|
||||||
def process(self, entry: dict) -> bool:
|
def process(self, entry: dict) -> bool:
|
||||||
|
if self.track is None:
|
||||||
|
self.track = self.new_track(entry['timestamp'])
|
||||||
if self.instance_config_id is None:
|
if self.instance_config_id is None:
|
||||||
if entry[self.settings.type_field] in self.settings.custom['instance_start']:
|
if entry[self.settings.type_field] in self.settings.custom['instance_start']:
|
||||||
self.instance_config_id = json_path(entry, self.settings.custom['instance_config_id'])
|
self.instance_config_id = json_path(entry, self.settings.custom['instance_config_id'])
|
||||||
if self.filters.start(entry):
|
|
||||||
self.store.append(
|
|
||||||
self.State(
|
|
||||||
sequence=json_path(entry, json_path(self.settings.custom, "sequences2.id_field")),
|
|
||||||
events=[],
|
|
||||||
track=[],
|
|
||||||
timestamp=entry['timestamp']))
|
|
||||||
elif self.filters.end(entry) or not self.store:
|
|
||||||
self.store.append(self.State(sequence=None, events=[], track=[], timestamp=entry['timestamp']))
|
|
||||||
|
|
||||||
|
self.update_board_type(entry)
|
||||||
if entry[self.settings.type_field] in self.settings.spatials:
|
if entry[self.settings.type_field] in self.settings.spatials:
|
||||||
self.store[-1].track.append(
|
self.add_location(entry)
|
||||||
{
|
elif entry[self.settings.type_field] in self.settings.boards:
|
||||||
'timestamp': entry['timestamp'],
|
board_data = get_board_data(self.settings.source, self.instance_config_id, entry["sequence_id"],
|
||||||
'coordinates': json_path(entry, "location.coordinates"),
|
entry["board_id"])
|
||||||
'accuracy': entry['accuracy']
|
entry["extra_data"] = board_data
|
||||||
}
|
entry['coordinate'] = self.new_coordinate()
|
||||||
)
|
self.timeline.append(entry)
|
||||||
else:
|
|
||||||
self.store[-1].events.append(entry)
|
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
def update_board_type(self, entry):
|
||||||
|
type = self.classify_entry(entry)
|
||||||
|
if not type == self.last_board_type:
|
||||||
|
self.add_track(activity_type=self.last_board_type,end_timestamp=entry['timestamp'])
|
||||||
|
self.last_board_type = type
|
||||||
|
|
||||||
|
def classify_entry(self, entry):
|
||||||
|
entry_type = entry[self.settings.type_field]
|
||||||
|
if self.filters.end(entry):
|
||||||
|
return "map"
|
||||||
|
if not entry_type in self.settings.boards:
|
||||||
|
return self.last_board_type
|
||||||
|
board_data = get_board_data(self.settings.source, self.instance_config_id, entry["sequence_id"],
|
||||||
|
entry["board_id"])
|
||||||
|
for pattern in self.classes:
|
||||||
|
if pattern in board_data['class']:
|
||||||
|
return self.classes[pattern]
|
||||||
|
if board_data['has_video']:
|
||||||
|
return "video"
|
||||||
|
elif board_data['has_audio']:
|
||||||
|
return "audio"
|
||||||
|
elif board_data['has_image']:
|
||||||
|
return "image"
|
||||||
|
return "other"
|
||||||
|
|
||||||
|
def new_coordinate(self):
|
||||||
|
return {"type": "Point", "coordinates": self.last_coordinate}
|
||||||
|
|
||||||
|
def add_location(self, entry):
|
||||||
|
coordinates = json_path(entry, self.settings.custom['coordinates'])
|
||||||
|
self.track['coordinates'].append(coordinates)
|
||||||
|
self.last_coordinate = coordinates
|
||||||
|
|
||||||
|
def add_track(self, **props):
|
||||||
|
self.track['properties'].update(props)
|
||||||
|
self.tracks.append(self.track)
|
||||||
|
self.track = self.new_track(props['end_timestamp'])
|
||||||
|
|
||||||
|
def new_track(self, timestamp):
|
||||||
|
return {"type": "LineString", "coordinates": [], "properties": {'start_timestamp': timestamp}}
|
||||||
|
|
||||||
|
|
||||||
class BiogamesStore(Store):
|
class BiogamesStore(Store):
|
||||||
__name__ = "BiogamesStore"
|
__name__ = "BiogamesStore"
|
||||||
|
|
@ -223,4 +301,21 @@ class SimulationOrderAnalyzer(Analyzer):
|
||||||
self.store[simu_id] += 1
|
self.store[simu_id] += 1
|
||||||
if not simu_id in self.order:
|
if not simu_id in self.order:
|
||||||
self.order.append(simu_id)
|
self.order.append(simu_id)
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
class SimulationCategorizer(CategorizerStub): # TODO: refactor categorizer
|
||||||
|
__name__ = "SimulationCategorizer"
|
||||||
|
|
||||||
|
def __init__(self, settings: LogSettings):
|
||||||
|
super().__init__(settings)
|
||||||
|
|
||||||
|
def process(self, entry: dict) -> bool:
|
||||||
|
if self.key is "default":
|
||||||
|
if entry[self.settings.type_field] in self.settings.custom['instance_start']:
|
||||||
|
try:
|
||||||
|
self.key = json_path(entry, self.settings.custom['instance_config_id'])
|
||||||
|
except KeyError as e:
|
||||||
|
print(entry)
|
||||||
|
raise e
|
||||||
|
return False
|
||||||
|
|
|
||||||
|
|
@ -28,6 +28,7 @@ class LocationAnalyzer(Analyzer):
|
||||||
|
|
||||||
|
|
||||||
class LogEntryCountAnalyzer(Analyzer):
|
class LogEntryCountAnalyzer(Analyzer):
|
||||||
|
#TODO: flexibler: z.b. min/max lat/long
|
||||||
"""
|
"""
|
||||||
count occurrences of log entry types
|
count occurrences of log entry types
|
||||||
"""
|
"""
|
||||||
|
|
|
||||||
|
|
@ -5,7 +5,7 @@ from .. import Result
|
||||||
class Render:
|
class Render:
|
||||||
result_types = []
|
result_types = []
|
||||||
|
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
raise NotImplementedError()
|
raise NotImplementedError()
|
||||||
|
|
||||||
def filter(self, results: List[Result]):
|
def filter(self, results: List[Result]):
|
||||||
|
|
|
||||||
|
|
@ -10,35 +10,60 @@ import networkx as nx
|
||||||
import itertools
|
import itertools
|
||||||
|
|
||||||
from analyzers import Store, BiogamesStore, SimulationOrderAnalyzer
|
from analyzers import Store, BiogamesStore, SimulationOrderAnalyzer
|
||||||
|
from util.meta_temp import CONFIG_NAMES
|
||||||
from . import Render
|
from . import Render
|
||||||
from .. import Result, SimulationRoundsAnalyzer, BoardDurationAnalyzer, ActivityMapper
|
from .. import Result, SimulationRoundsAnalyzer, BoardDurationAnalyzer, ActivityMapper
|
||||||
|
|
||||||
|
|
||||||
def add_edge(graph, src, dest):
|
def add_edge(graph, src, dest):
|
||||||
if graph.has_edge(src, dest):
|
if graph.has_edge(src, dest):
|
||||||
weight = graph.get_edge_data(src, dest)['weight'] + 1
|
weight = graph.get_edge_data(src, dest)['weight'] + 1
|
||||||
else:
|
else:
|
||||||
weight = 1
|
weight = 1
|
||||||
graph.add_edge(tuple(src),tuple(dest),weight=weight)
|
graph.add_edge(tuple(src), tuple(dest), weight=weight)
|
||||||
|
|
||||||
|
|
||||||
def pairs(iterable):
|
def pairs(iterable):
|
||||||
a,b = itertools.tee(iterable)
|
a, b = itertools.tee(iterable)
|
||||||
next(b,None)
|
next(b, None)
|
||||||
return zip(a,b)
|
return zip(a, b)
|
||||||
|
|
||||||
|
|
||||||
|
def __plot_or_show(name=None):
|
||||||
|
if name:
|
||||||
|
plt.savefig(name)
|
||||||
|
else:
|
||||||
|
plt.show()
|
||||||
|
plt.cla()
|
||||||
|
plt.clf()
|
||||||
|
plt.close()
|
||||||
|
|
||||||
|
|
||||||
def plot(src_data: List[Tuple[str, List[int]]], ylabel="simulation rounds", title="simulation retries",
|
def plot(src_data: List[Tuple[str, List[int]]], ylabel="simulation rounds", title="simulation retries",
|
||||||
rotation='vertical'):
|
rotation='vertical', name=None):
|
||||||
names, datas = list(zip(*src_data))
|
names, datas = list(zip(*src_data))
|
||||||
#plt.boxplot(datas, labels=names, showfliers=False, showmeans=True, meanline=True)
|
# plt.boxplot(datas, labels=names, showfliers=False, showmeans=True, meanline=True)
|
||||||
rand = np.random.rand(len(datas),len(datas[0]))
|
rand = np.random.rand(len(datas), len(datas[0]))
|
||||||
plt.plot(datas+rand, linewidth=.2)
|
plt.plot(datas + rand, linewidth=.2)
|
||||||
plt.xticks(rotation=rotation)
|
plt.xticks(rotation=rotation)
|
||||||
# plt.margins()
|
# plt.margins()
|
||||||
plt.ylabel(ylabel)
|
plt.ylabel(ylabel)
|
||||||
plt.title(title)
|
plt.title(title)
|
||||||
plt.show()
|
__plot_or_show(name)
|
||||||
|
|
||||||
|
|
||||||
def graph_plot(src_data: List[Tuple[str, List[int]]], ylabel="simulation rounds", title="sequential simulation retries",
|
def graph_plot(src_data: List[Tuple[str, List[int]]], ylabel="simulation rounds", title="sequential simulation retries",
|
||||||
rotation='vertical'):
|
rotation='vertical', name=None):
|
||||||
|
config_name = CONFIG_NAMES[name] if name in CONFIG_NAMES else "---"
|
||||||
|
counts_per_group = [sum(i) for i in src_data]
|
||||||
|
label = "{}: n={n}; # of sim runs: ⌀={avg:.2f}, median={median}".format(
|
||||||
|
config_name,
|
||||||
|
n=len(src_data),
|
||||||
|
avg=np.mean(counts_per_group),
|
||||||
|
median=np.median(counts_per_group)
|
||||||
|
)
|
||||||
|
print(config_name)
|
||||||
|
name = "plots/{}.png".format(name)
|
||||||
g = nx.Graph()
|
g = nx.Graph()
|
||||||
for group in src_data:
|
for group in src_data:
|
||||||
for i in pairs(enumerate(group)):
|
for i in pairs(enumerate(group)):
|
||||||
|
|
@ -46,34 +71,37 @@ def graph_plot(src_data: List[Tuple[str, List[int]]], ylabel="simulation rounds"
|
||||||
positions = {}
|
positions = {}
|
||||||
for node in g.nodes():
|
for node in g.nodes():
|
||||||
positions[node] = node
|
positions[node] = node
|
||||||
widths = [x[2]/10.0 for x in g.edges.data('weight')]
|
widths = [x[2] / 10.0 for x in g.edges.data('weight')]
|
||||||
print(max(widths))
|
print(max(widths))
|
||||||
nx.draw_networkx_edges(g, positions, width=widths)
|
nx.draw_networkx_edges(g, positions, width=widths)
|
||||||
#rand = np.random.rand(len(datas),len(datas[0]))
|
# rand = np.random.rand(len(datas),len(datas[0]))
|
||||||
#plt.plot(datas+rand, linewidth=.2)
|
# plt.plot(datas+rand, linewidth=.2)
|
||||||
plt.xticks(rotation=rotation)
|
plt.xticks(rotation=rotation)
|
||||||
# plt.margins()
|
# plt.margins()
|
||||||
plt.ylabel(ylabel)
|
plt.ylabel(ylabel)
|
||||||
plt.title(title)
|
plt.title(title)
|
||||||
plt.show()
|
plt.figtext(0.5, 0.13, label, ha="center")
|
||||||
|
__plot_or_show(name)
|
||||||
|
|
||||||
def graph_fit(src_data, deg=5):
|
|
||||||
plt.title("polyfit(x,y,deg="+str(deg)+")")
|
def graph_fit(src_data, deg=5, name=None):
|
||||||
|
plt.title("polyfit(x,y,deg=" + str(deg) + ")")
|
||||||
for i in src_data:
|
for i in src_data:
|
||||||
#plt.plot(i)
|
# plt.plot(i)
|
||||||
count = len(i)
|
count = len(i)
|
||||||
xp = np.linspace(0, count-1, num=count, endpoint=True)
|
xp = np.linspace(0, count - 1, num=count, endpoint=True)
|
||||||
#fit = np.poly1d(np.polyfit(range(len(i)), i, deg=deg))
|
# fit = np.poly1d(np.polyfit(range(len(i)), i, deg=deg))
|
||||||
#plt.plot(xp, fit(xp), linewidth=0.1)
|
# plt.plot(xp, fit(xp), linewidth=0.1)
|
||||||
xnew = np.linspace(0, count-1, num=count*20, endpoint=True)
|
xnew = np.linspace(0, count - 1, num=count * 20, endpoint=True)
|
||||||
f = interp1d(xp, i, kind='quadratic')
|
f = interp1d(xp, i, kind='quadratic')
|
||||||
|
|
||||||
plt.plot(range(count), i, '.', markersize=1)
|
plt.plot(range(count), i, '.', markersize=1)
|
||||||
plt.plot(xnew, f(xnew), linewidth=0.2)
|
plt.plot(xnew, f(xnew), linewidth=0.2)
|
||||||
plt.show()
|
__plot_or_show(name)
|
||||||
|
|
||||||
|
|
||||||
class SimulationRoundsRender(Render):
|
class SimulationRoundsRender(Render):
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
data = defaultdict(list)
|
data = defaultdict(list)
|
||||||
for result in self.filter(results):
|
for result in self.filter(results):
|
||||||
get = result.get()
|
get = result.get()
|
||||||
|
|
@ -89,7 +117,7 @@ class SimulationRoundsRender(Render):
|
||||||
class BoardDurationHistRender(Render):
|
class BoardDurationHistRender(Render):
|
||||||
result_types = [BoardDurationAnalyzer]
|
result_types = [BoardDurationAnalyzer]
|
||||||
|
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
data = []
|
data = []
|
||||||
for result in self.filter(results):
|
for result in self.filter(results):
|
||||||
session = result.get()
|
session = result.get()
|
||||||
|
|
@ -107,7 +135,7 @@ class BoardDurationHistRender(Render):
|
||||||
class BoardDurationBoxRender(Render):
|
class BoardDurationBoxRender(Render):
|
||||||
result_types = [BoardDurationAnalyzer]
|
result_types = [BoardDurationAnalyzer]
|
||||||
|
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
data = defaultdict(list)
|
data = defaultdict(list)
|
||||||
for result in self.filter(results):
|
for result in self.filter(results):
|
||||||
get = result.get()
|
get = result.get()
|
||||||
|
|
@ -122,11 +150,11 @@ class BoardDurationBoxRender(Render):
|
||||||
class ActivityMapperRender(Render):
|
class ActivityMapperRender(Render):
|
||||||
result_types = [ActivityMapper]
|
result_types = [ActivityMapper]
|
||||||
|
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
print(os.getcwd())
|
print(os.getcwd())
|
||||||
for result in self.filter(results):
|
for result in self.filter(results):
|
||||||
data = result.get()
|
data = result.get()
|
||||||
with open(os.path.join("static", "progress", "data", data['instance']),"w") as out:
|
with open(os.path.join("static", "progress", "data", data['instance']), "w") as out:
|
||||||
json.dump(data["store"], out, indent=1)
|
json.dump(data["store"], out, indent=1)
|
||||||
return "ok"
|
return "ok"
|
||||||
|
|
||||||
|
|
@ -134,31 +162,32 @@ class ActivityMapperRender(Render):
|
||||||
class StoreRender(Render):
|
class StoreRender(Render):
|
||||||
result_types = [Store, BiogamesStore]
|
result_types = [Store, BiogamesStore]
|
||||||
|
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
for result in self.filter(results):
|
for result in self.filter(results):
|
||||||
with open(os.path.join("static","progress","data","fooo"), "w") as out:
|
with open(os.path.join("static", "progress", "data", "fooo"), "w") as out:
|
||||||
json.dump(result.get(), out, indent=1)
|
json.dump(result.get(), out, indent=1)
|
||||||
|
|
||||||
|
|
||||||
class SimulationOrderRender(Render):
|
class SimulationOrderRender(Render):
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
data = defaultdict(list)
|
data = defaultdict(list)
|
||||||
for result in self.filter(results):
|
for result in self.filter(results):
|
||||||
get = result.get()
|
get = result.get()
|
||||||
for i,value in enumerate(get):
|
for i, value in enumerate(get):
|
||||||
data[i].append(value)
|
data[i].append(value)
|
||||||
#data_tuples = [(key, data[key]) for key in sorted(data)]
|
# data_tuples = [(key, data[key]) for key in sorted(data)]
|
||||||
#data_tuples = sorted(data_tuples, key=lambda x: sum(x[1]))
|
# data_tuples = sorted(data_tuples, key=lambda x: sum(x[1]))
|
||||||
#plot(enumerate([r.get() for r in self.filter(results)]))
|
# plot(enumerate([r.get() for r in self.filter(results)]))
|
||||||
plot(list(data.items()), ylabel="simulation retries", title="sequential simulation retries", rotation=None)
|
plot(list(data.items()), ylabel="simulation retries", title="sequential simulation retries", rotation=None)
|
||||||
|
|
||||||
result_types = [SimulationOrderAnalyzer]
|
result_types = [SimulationOrderAnalyzer]
|
||||||
|
|
||||||
|
|
||||||
class SimulationGroupRender(Render):
|
class SimulationGroupRender(Render):
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
data = [r.get() for r in self.filter(results)]
|
data = [r.get() for r in self.filter(results)]
|
||||||
#graph_plot(list(data), ylabel="simulation retries", title="sequential simulation retries", rotation=None)
|
print(name, len(data))
|
||||||
graph_fit(list(data))
|
graph_plot(list(data), ylabel="simulation retries", title="sequential simulation retries", rotation=None, name=name)
|
||||||
|
#graph_fit(list(data), name=name)
|
||||||
|
|
||||||
result_types = [SimulationOrderAnalyzer]
|
result_types = [SimulationOrderAnalyzer]
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,9 @@ import json
|
||||||
import logging
|
import logging
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
|
||||||
|
from analyzers import LogEntryCountAnalyzer
|
||||||
from . import Render, Result
|
from . import Render, Result
|
||||||
from .. import LocationAnalyzer
|
from .. import LocationAnalyzer
|
||||||
|
|
||||||
|
|
@ -9,25 +12,26 @@ log = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
class PrintRender(Render):
|
class PrintRender(Render):
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
print("\t" + "\n\t".join([str(r) for r in results]))
|
print("\t" + "\n\t".join([str(r) for r in results]))
|
||||||
|
|
||||||
|
|
||||||
class JSONRender(Render):
|
class JSONRender(Render):
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
print(json.dumps([r.get() for r in self.filter(results)], indent=1))
|
print(json.dumps([r.get() for r in self.filter(results)], indent=1))
|
||||||
|
|
||||||
|
|
||||||
class TrackRender(Render):
|
class TrackRender(Render):
|
||||||
result_types = [LocationAnalyzer]
|
result_types = [LocationAnalyzer]
|
||||||
|
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
data = []
|
data = []
|
||||||
log.debug(results)
|
log.debug(results)
|
||||||
for result in self.filter(results):
|
for result in self.filter(results):
|
||||||
if len(result.get()) > 0:
|
if len(result.get()) > 0:
|
||||||
data.append(
|
data.append(
|
||||||
[[entry['location']['coordinates'][1], entry['location']['coordinates'][0]] for entry in # TODO: configurable
|
[[entry['location']['coordinates'][1], entry['location']['coordinates'][0]] for entry in
|
||||||
|
# TODO: configurable
|
||||||
result.get()])
|
result.get()])
|
||||||
dumps = json.dumps(data)
|
dumps = json.dumps(data)
|
||||||
with open("track_data.js", "w") as out:
|
with open("track_data.js", "w") as out:
|
||||||
|
|
@ -38,7 +42,7 @@ class TrackRender(Render):
|
||||||
class HeatMapRender(TrackRender):
|
class HeatMapRender(TrackRender):
|
||||||
weight = 0.01
|
weight = 0.01
|
||||||
|
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
raw = super(HeatMapRender, self).render(results)
|
raw = super(HeatMapRender, self).render(results)
|
||||||
data = []
|
data = []
|
||||||
for session in json.loads(raw):
|
for session in json.loads(raw):
|
||||||
|
|
@ -47,3 +51,36 @@ class HeatMapRender(TrackRender):
|
||||||
with open('heat_data.js', 'w') as out:
|
with open('heat_data.js', 'w') as out:
|
||||||
out.write("coords = " + dumps + ";")
|
out.write("coords = " + dumps + ";")
|
||||||
return dumps
|
return dumps
|
||||||
|
|
||||||
|
|
||||||
|
class LogEntryCountAnalyzerPlot(Render):
|
||||||
|
result_types = [LogEntryCountAnalyzer]
|
||||||
|
|
||||||
|
def render(self, results: List[Result], name=None):
|
||||||
|
raw_data = list(self.filter(results))[0].get()
|
||||||
|
print(raw_data)
|
||||||
|
labels = []
|
||||||
|
data = []
|
||||||
|
for x in sorted(raw_data.items()):
|
||||||
|
labels.append(str(x[0]).split(".")[-1])
|
||||||
|
data.append(x[1])
|
||||||
|
plt.bar(range(len(data)), list(data))
|
||||||
|
plt.xticks(range(len(data)), labels, rotation="vertical")
|
||||||
|
plt.tight_layout()
|
||||||
|
|
||||||
|
name = "plots/{}.png".format(name)
|
||||||
|
plt.savefig(name)
|
||||||
|
plt.cla()
|
||||||
|
plt.clf()
|
||||||
|
plt.close()
|
||||||
|
|
||||||
|
class LogEntryCountCSV(Render):
|
||||||
|
result_types = [LogEntryCountAnalyzer]
|
||||||
|
summary = None
|
||||||
|
|
||||||
|
def render(self, results: List[Result], name=None):
|
||||||
|
if self.summary is None:
|
||||||
|
return
|
||||||
|
for result in self.filter(results):
|
||||||
|
raw_data = result.get()
|
||||||
|
self.summary[name] = raw_data
|
||||||
|
|
@ -51,18 +51,18 @@ class LocomotionActionRender(Render):
|
||||||
|
|
||||||
|
|
||||||
class LocomotionActionAbsoluteRender(LocomotionActionRender):
|
class LocomotionActionAbsoluteRender(LocomotionActionRender):
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
results = filter_results(self.filter(results), ['locomotion_sum', 'action_sum'])
|
results = filter_results(self.filter(results), ['locomotion_sum', 'action_sum'])
|
||||||
plot(results, "time", "abs loc/action")
|
plot(results, "time", "abs loc/action")
|
||||||
|
|
||||||
|
|
||||||
class LocomotionActionRelativeRender(LocomotionActionRender):
|
class LocomotionActionRelativeRender(LocomotionActionRender):
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
results = filter_results(self.filter(results), ['locomotion_relative', 'action_relative'])
|
results = filter_results(self.filter(results), ['locomotion_relative', 'action_relative'])
|
||||||
plot(results, "fraction of time", "rel loc/action")
|
plot(results, "fraction of time", "rel loc/action")
|
||||||
|
|
||||||
|
|
||||||
class LocomotionActionRatioRender(LocomotionActionRender):
|
class LocomotionActionRatioRender(LocomotionActionRender):
|
||||||
def render(self, results: List[Result]):
|
def render(self, results: List[Result], name=None):
|
||||||
results = filter_results(self.filter(results), ['locomotion_action_ratio'])
|
results = filter_results(self.filter(results), ['locomotion_action_ratio'])
|
||||||
plot_line(results, ylabel="Ratio", title="Locomotion/Action Ratio")
|
plot_line(results, ylabel="Ratio", title="Locomotion/Action Ratio")
|
||||||
|
|
|
||||||
|
|
@ -13,12 +13,14 @@
|
||||||
],
|
],
|
||||||
"analyzers": {
|
"analyzers": {
|
||||||
"analyzers": [
|
"analyzers": [
|
||||||
"BiogamesCategorizer",
|
"SimulationCategorizer",
|
||||||
"SimulationOrderAnalyzer"
|
"ActivityMapper"
|
||||||
]
|
]
|
||||||
},
|
},
|
||||||
"dis":[
|
"dis":[
|
||||||
"ActivityMapper",
|
"BiogamesCategorizer",
|
||||||
|
"LogEntryCountAnalyzer",
|
||||||
|
"SimulationOrderAnalyzer",
|
||||||
"ProgressAnalyzer",
|
"ProgressAnalyzer",
|
||||||
"InstanceConfig"],
|
"InstanceConfig"],
|
||||||
"disabled_analyzers": [
|
"disabled_analyzers": [
|
||||||
|
|
|
||||||
142
log_analyzer.py
142
log_analyzer.py
|
|
@ -2,9 +2,12 @@ import json
|
||||||
import logging
|
import logging
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
import analyzers
|
import analyzers
|
||||||
from analyzers import get_renderer, Analyzer, render, Store
|
from analyzers import get_renderer, Analyzer, render, Store
|
||||||
from analyzers.analyzer import ResultStore
|
from analyzers.analyzer import ResultStore
|
||||||
|
from analyzers.render.default import LogEntryCountCSV
|
||||||
from analyzers.settings import LogSettings, load_settings
|
from analyzers.settings import LogSettings, load_settings
|
||||||
from loaders import LOADERS
|
from loaders import LOADERS
|
||||||
|
|
||||||
|
|
@ -30,8 +33,11 @@ def process_log(log_id: str, settings: LogSettings) -> List[Analyzer]:
|
||||||
log.debug("process entries")
|
log.debug("process entries")
|
||||||
for entry in loader.get_entry():
|
for entry in loader.get_entry():
|
||||||
for analyzer in analyzers:
|
for analyzer in analyzers:
|
||||||
if analyzer.process(entry):
|
try:
|
||||||
break
|
if analyzer.process(entry):
|
||||||
|
break
|
||||||
|
except KeyError as e:
|
||||||
|
log.exception(e)
|
||||||
return analyzers
|
return analyzers
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -44,18 +50,19 @@ if __name__ == '__main__':
|
||||||
"e32b16998440475b994ab46d481d3e0c",
|
"e32b16998440475b994ab46d481d3e0c",
|
||||||
]
|
]
|
||||||
log_ids: List[str] = [
|
log_ids: List[str] = [
|
||||||
#"34fecf49dbaca3401d745fb467",
|
# "34fecf49dbaca3401d745fb467",
|
||||||
# "44ea194de594cd8d63ac0314be",
|
# "44ea194de594cd8d63ac0314be",
|
||||||
# "57c444470dbf88605433ca935c",
|
# "57c444470dbf88605433ca935c",
|
||||||
# "78e0c545b594e82edfad55bd7f",
|
# "78e0c545b594e82edfad55bd7f",
|
||||||
# "91abfd4b31a5562b1c66be37d9",
|
# "91abfd4b31a5562b1c66be37d9",
|
||||||
"597b704fe9ace475316c345903",
|
"597b704fe9ace475316c345903",
|
||||||
"e01a684aa29dff9ddd9705edf8",
|
"e01a684aa29dff9ddd9705edf8",
|
||||||
"fbf9d64ae0bdad0de7efa3eec6",
|
"fbf9d64ae0bdad0de7efa3eec6",
|
||||||
# "fe1331481f85560681f86827ec",
|
# "fe1331481f85560681f86827ec",
|
||||||
"fe1331481f85560681f86827ec"]
|
"fe1331481f85560681f86827ec"]
|
||||||
#"fec57041458e6cef98652df625", ]
|
# "fec57041458e6cef98652df625", ]
|
||||||
log_ids = []
|
log_ids = []
|
||||||
|
# with open("/home/clemens/git/ma/test/filtered") as src:
|
||||||
with open("/home/clemens/git/ma/test/filtered") as src:
|
with open("/home/clemens/git/ma/test/filtered") as src:
|
||||||
for line in src:
|
for line in src:
|
||||||
line = line.strip()
|
line = line.strip()
|
||||||
|
|
@ -72,17 +79,132 @@ if __name__ == '__main__':
|
||||||
r().render(store.get_all())
|
r().render(store.get_all())
|
||||||
if False:
|
if False:
|
||||||
render(analyzers.LocationAnalyzer, store.get_all())
|
render(analyzers.LocationAnalyzer, store.get_all())
|
||||||
#print(json.dumps(store.serializable(), indent=1))
|
# print(json.dumps(store.serializable(), indent=1))
|
||||||
if False:
|
if False:
|
||||||
render(analyzers.ActivityMapper, store.get_all())
|
render(analyzers.ActivityMapper, store.get_all())
|
||||||
render(analyzers.ProgressAnalyzer, store.get_all())
|
render(analyzers.ProgressAnalyzer, store.get_all())
|
||||||
|
|
||||||
if False:
|
if False:
|
||||||
from analyzers.postprocessing import graph
|
from analyzers.postprocessing import graph
|
||||||
|
|
||||||
g = graph.Cache(settings)
|
g = graph.Cache(settings)
|
||||||
g.run(store)
|
g.run(store)
|
||||||
|
if False:
|
||||||
|
# render(analyzers.SimulationOrderAnalyzer, store.get_all())
|
||||||
|
for cat in store.get_categories():
|
||||||
|
data = store.get_category(cat)
|
||||||
|
render(analyzers.SimulationOrderAnalyzer, data, name=cat)
|
||||||
|
if False:
|
||||||
|
LogEntryCountCSV.summary = None
|
||||||
|
for cat in store.get_categories():
|
||||||
|
data = store.get_category(cat)
|
||||||
|
render(analyzers.LogEntryCountAnalyzer, data, name=cat)
|
||||||
|
if LogEntryCountCSV.summary:
|
||||||
|
headers = []
|
||||||
|
lines = []
|
||||||
|
for name in LogEntryCountCSV.summary:
|
||||||
|
data = LogEntryCountCSV.summary[name]
|
||||||
|
for head in data:
|
||||||
|
if not head in headers:
|
||||||
|
headers.append(head)
|
||||||
|
line = [name]
|
||||||
|
for head in headers:
|
||||||
|
line.append(data[head]) if head in data else line.append(0)
|
||||||
|
lines.append(line)
|
||||||
|
import csv
|
||||||
|
|
||||||
|
with open('logentrycount.csv', 'w', newline='') as csvfile:
|
||||||
|
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONE)
|
||||||
|
writer.writerow(["name"] + [h.split(".")[-1] for h in headers])
|
||||||
|
for line in lines:
|
||||||
|
writer.writerow(line)
|
||||||
|
|
||||||
if True:
|
if True:
|
||||||
render(analyzers.SimulationOrderAnalyzer, store.get_all())
|
#json.dump(store.serializable(), open("new.json", "w"), indent=1)
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
keys = [
|
||||||
|
"simu",
|
||||||
|
"question",
|
||||||
|
"image",
|
||||||
|
"audio",
|
||||||
|
"video",
|
||||||
|
"other",
|
||||||
|
"map"
|
||||||
|
]
|
||||||
|
import matplotlib.pyplot as plt
|
||||||
|
#results = []
|
||||||
|
|
||||||
|
places = defaultdict(list)
|
||||||
|
|
||||||
|
for log in store.get_all():
|
||||||
|
result = defaultdict(lambda: 0)
|
||||||
|
for i in log.get()['track']:
|
||||||
|
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
|
||||||
|
result[i['properties']['activity_type']] += duration
|
||||||
|
print(json.dumps(result, indent=4))
|
||||||
|
total = sum(result.values())
|
||||||
|
print(total)
|
||||||
|
percentage = defaultdict(lambda :0)
|
||||||
|
minutes = defaultdict(lambda:0)
|
||||||
|
for i in result:
|
||||||
|
percentage[i]= result[i]/total
|
||||||
|
minutes[i] = result[i]/60_000
|
||||||
|
print(json.dumps(percentage,indent=4))
|
||||||
|
if not 'error' in result:
|
||||||
|
#places[log.get()['instance']].append(percentage)
|
||||||
|
places[log.get()['instance']].append(minutes)
|
||||||
|
|
||||||
|
for place in places:
|
||||||
|
places[place] = sorted(places[place], key=lambda item:item['map'])
|
||||||
|
|
||||||
|
dummy = [0]*len(keys)
|
||||||
|
results = []
|
||||||
|
sites = []
|
||||||
|
from util.meta_temp import CONFIG_NAMES
|
||||||
|
for i in places:
|
||||||
|
for j in places[i]:
|
||||||
|
ordered = []
|
||||||
|
for k in keys:
|
||||||
|
ordered.append(j[k])
|
||||||
|
results.append(ordered)
|
||||||
|
results.append(dummy)
|
||||||
|
sites.append(CONFIG_NAMES[i] if i in CONFIG_NAMES else "---")
|
||||||
|
|
||||||
|
|
||||||
|
size = len(results)
|
||||||
|
ind = np.arange(size)
|
||||||
|
width=0.9
|
||||||
|
print(results)
|
||||||
|
data = list(zip(*results))
|
||||||
|
print(data)
|
||||||
|
lines = []
|
||||||
|
bottom = [0]*len(results)
|
||||||
|
for i in range(0, len(data)):
|
||||||
|
lines.append(plt.bar(ind,data[i], bottom=bottom, width=width)[0])
|
||||||
|
for k,x in enumerate(data[i]):
|
||||||
|
bottom[k] += x
|
||||||
|
plt.legend(lines, keys)
|
||||||
|
plt.title(", ".join(sites))
|
||||||
|
plt.show()
|
||||||
|
|
||||||
|
#size = len(results)
|
||||||
|
#ind = np.arange(size)
|
||||||
|
#width = 0.9
|
||||||
|
#print(results)
|
||||||
|
#data = list(zip(*results))
|
||||||
|
#print(data)
|
||||||
|
#lines = []
|
||||||
|
#bottom = [0] * len(results)
|
||||||
|
#for i in range(0, len(data)):
|
||||||
|
# lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
|
||||||
|
# for k, x in enumerate(data[i]):
|
||||||
|
# bottom[k] += x
|
||||||
|
#plt.legend(lines, keys)
|
||||||
|
#plt.title("Zwei Spiele in Filderstadt (t1=237min; t2=67min)")
|
||||||
|
#plt.show()
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# for analyzers in analyzers:
|
# for analyzers in analyzers:
|
||||||
# if analyzers.name() in ["LogEntryCount", "ActionSequenceAnalyzer"]:
|
# if analyzers.name() in ["LogEntryCount", "ActionSequenceAnalyzer"]:
|
||||||
|
|
|
||||||
|
|
@ -3,4 +3,5 @@ numpy==1.13.1
|
||||||
matplotlib==2.1.0
|
matplotlib==2.1.0
|
||||||
osmnx==0.6
|
osmnx==0.6
|
||||||
networkx==2.0
|
networkx==2.0
|
||||||
pydot==1.2.3
|
pydot==1.2.3
|
||||||
|
scipy==1.0.0
|
||||||
|
|
@ -73,7 +73,10 @@ class Biogames(Source):
|
||||||
os.remove(filename)
|
os.remove(filename)
|
||||||
|
|
||||||
def get_json(self, url):
|
def get_json(self, url):
|
||||||
return self._get(url, stream=False).json()
|
http = self._get(url, stream=False)
|
||||||
|
if not http.ok:
|
||||||
|
raise ConnectionError("HTTP status is not OK", http.url)
|
||||||
|
return http.json()
|
||||||
|
|
||||||
def close(self):
|
def close(self):
|
||||||
pass
|
pass
|
||||||
|
|
|
||||||
|
|
@ -31,12 +31,48 @@ def download_board(board_id, instance_config_id, sequence_id, source):
|
||||||
|
|
||||||
def get_config(source, instance_id):
|
def get_config(source, instance_id):
|
||||||
url = "/game2/editor/config/{config_id}/".format(config_id=instance_id)
|
url = "/game2/editor/config/{config_id}/".format(config_id=instance_id)
|
||||||
instance_data = source.get_json(url)
|
instance_data = get_json(source, url)
|
||||||
caches = url + "cache/"
|
caches = url + "cache/"
|
||||||
cache_data = source.get_json(caches)
|
cache_data = get_json(source, caches)
|
||||||
|
|
||||||
return {
|
result = {
|
||||||
"name": instance_data["name"],
|
"name": instance_data["name"],
|
||||||
"id": instance_data["@id"],
|
"id": instance_data["@id"],
|
||||||
"caches": cache_data
|
"caches": cache_data
|
||||||
}
|
}
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def get_board_data(source, instance_id, sequence_id, board_id):
|
||||||
|
url = "/game2/editor/config/{config_id}/sequence/{sequence_id}/board/{board_id}/".format(
|
||||||
|
config_id=instance_id,
|
||||||
|
sequence_id=sequence_id,
|
||||||
|
board_id=board_id
|
||||||
|
)
|
||||||
|
instance_data = get_json(source, url)
|
||||||
|
if instance_data is None:
|
||||||
|
return {"class": "error"}
|
||||||
|
result = {
|
||||||
|
"class": instance_data["@class"],
|
||||||
|
"id": instance_data["@id"]
|
||||||
|
}
|
||||||
|
for i in ["image", "audio", "video"]:
|
||||||
|
key = i + "_file"
|
||||||
|
result["has_" + i] = bool(key in instance_data and instance_data[key])
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
cache = {}
|
||||||
|
|
||||||
|
|
||||||
|
def get_json(source, url):
|
||||||
|
if url in cache:
|
||||||
|
return cache[url]
|
||||||
|
try:
|
||||||
|
data = source.get_json(url)
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
print("exception", e, e.args) # TODO: logging
|
||||||
|
data = None
|
||||||
|
cache[url] = data
|
||||||
|
return data
|
||||||
|
|
|
||||||
|
|
@ -0,0 +1,99 @@
|
||||||
|
CONFIG_NAMES = {
|
||||||
|
'06c627ac-09fb-4f03-9a70-49261adefed9': u'Kopie 2 - Filderstadt',
|
||||||
|
'07c3566b-371b-4627-9fa6-96fdcf421ad8': u'Stadt\xf6kologieLindlarAWG-V.1',
|
||||||
|
'08369b6c-f699-41ba-9313-9d6ea2d22f78': u'Schierke',
|
||||||
|
'0bd1bba5-cde8-40e5-ad1c-326e55bf1247': u'Simulation: Wildkatzen',
|
||||||
|
'0d2e711b-be77-46e1-92f5-73199626b68c': u'Kopie 2 - Simulation: Rinder',
|
||||||
|
'11b9793e-7b4f-41ec-98fc-e46de557ae07': u'Kopie 11 - Bad Urach',
|
||||||
|
'13241906-cdae-441a-aed0-d57ebeb37cac': u'Stadt\xf6kologie',
|
||||||
|
'14dee52a-d040-4c70-9e1f-359c7faadfab': u'Kopie 5 - Bad Urach',
|
||||||
|
'14e8f4be-d27e-43a4-95e1-e033950a99bd': u'Kopie 13 - Bad Urach',
|
||||||
|
'16fc3117-61db-4f50-b84f-81de6310206f': u'Oberelsbach',
|
||||||
|
'17926099-4ed3-4ca0-996d-cc577c6fdaed': u'Kopie 6 - Bad Urach',
|
||||||
|
'17d401a9-de21-49a2-95bc-7dafa53dda64': u'Oberelsbach 2016',
|
||||||
|
'1cae4e4c-6d8b-43f0-b17d-1034b213cbaf': u'Test-Born',
|
||||||
|
'1f56b428-7c2c-4333-8fe1-c740ccbff40f': u'Bad Gateway',
|
||||||
|
'1f8b9d55-3b95-4739-914a-e2eff2dc52c3': u'Kopie 2 - Bad Gateway',
|
||||||
|
'2b3975be-242a-4c9d-80c7-8d9a370c9fe0': u'Simulation: Rinder',
|
||||||
|
'2bdc24f5-c51d-41a3-9cbd-adfc3a77a5ce': u'Simulation: Landnutzung',
|
||||||
|
'2c7cdb5d-7012-4a06-b4c8-980ad2470f10': u'Kopie 3 - Bad Gateway',
|
||||||
|
'30b743e6-144c-4fd7-a055-e87680f74c27': u'Nakundu2go',
|
||||||
|
'31da50e3-d59f-4166-91f2-7c84454c5769': u'Kopie 4 - Taltitz',
|
||||||
|
'3269d0d4-dc13-46f7-b666-1db94350fcd4': u'simu rindfleisch',
|
||||||
|
'32ed9be7-6fc2-4c50-afdb-8f32453d8409': u'Kopie 1 - Eichstaett - Stadt\xf6kologie2',
|
||||||
|
'33024899-7c10-4d44-b950-38fd0c2b0e16': u'Kopie 3 - Stadt\xf6kologie',
|
||||||
|
'3fe38d6e-04d8-49e7-a6d9-7a5f12635369': u'Kopie 8 - Bad Urach',
|
||||||
|
'50716b57-e494-46e0-9449-919cecb02a3d': u'Kopie 2 - Lindlar',
|
||||||
|
'5436357d-9644-4e3d-a181-bb4c6c0b3055': u'Kopie 4 - Bad Urach',
|
||||||
|
'543ac9b8-e990-4540-8277-994c3c756f47': u'Kopie 4 - Lindlar',
|
||||||
|
'5544ec80-5928-41e1-ba89-c13e570bda88': u'Test - Nakundu2Go',
|
||||||
|
'5637a078-c931-458d-865d-adc5d0653147': u'Kopie 2 - Bad Gateway',
|
||||||
|
'57e079b1-0a58-4150-a358-627fc9e896cc': u'Kopie 1 - Schierke',
|
||||||
|
'5cb3318a-cb5f-412f-bfd6-6467101ed505': u'Eichst\xe4tt-Schafe-1',
|
||||||
|
'5e64ce07-1c16-4d50-ac4e-b3117847ea43': u'Filderstadt',
|
||||||
|
'60e77829-2686-4022-84e6-b9e8875f7ca0': u'Kopie 10 - Bad Urach',
|
||||||
|
'6140a24e-32c6-4872-92b8-c463468f79a2': u'Taltitz neu',
|
||||||
|
'63610965-7a82-471b-a11a-0f696b4d6996': u'Kopie 3 - Lindlar',
|
||||||
|
'6479e339-f70a-4ed7-9b9e-9884a8037d81': u'Kopie 5 - Lindlar',
|
||||||
|
'658e1856-d04a-4284-9fb3-95c8e89843d9': u'Simulation: Streuobst',
|
||||||
|
'66fd2366-5985-4cac-8777-51a83e169d93': u'Kopie 1 - Test - Garnison Stadt\xf6kologie',
|
||||||
|
'681b9c2a-2547-4ffd-b510-ef28f5a2d355': u'Kopie 6 - Bad Gateway',
|
||||||
|
'74f0bd8c-c53c-4293-b583-1d7aec98fafa': u'Simulation: Luchse',
|
||||||
|
'78a00aac-422c-4772-9327-3241b32cea03': u'Kopie 2 - Stadt\xf6kologie',
|
||||||
|
'7a056d76-5636-45cc-a0bf-0555eff6101c': u'Test - Osnabr\xfcck Stadt\xf6kologie',
|
||||||
|
'7bf1de94-2627-489b-a310-cbad568d2230': u'Kopie 2 - Taltitz',
|
||||||
|
'7ea9ff83-c015-4ede-a561-8a16a1fb0833': u'Kopie 1 - Stadt\xf6kologieLindlarAWG-V.1',
|
||||||
|
'81ebf491-a556-43a8-b5d7-48ee193e2636': u'Test - Oberelsbach Wildkatze',
|
||||||
|
'877a8c70-fe0c-464b-98c1-73f8669cabd6': u'Mittenwald',
|
||||||
|
'890e99b0-eeed-4a20-ac21-ea027daf16f3': u'Kopie 3 - Bad Urach',
|
||||||
|
'8aa01b71-2609-4a47-a14c-8c3b51905fd2': u'? (lb)',
|
||||||
|
'8c002b38-606b-45cd-b046-bc4641188e18': u'Kopie 7 - Bad Urach',
|
||||||
|
'8cf124c1-3041-4e9b-a35a-1d4e09694917': u'Kopie 1 - AAAAA Bad Gateway',
|
||||||
|
'8e13e952-180c-4498-8730-9691dc837515': u'Test_Eichst\xe4tt_Schafe',
|
||||||
|
'90278021-4c57-464e-90b1-d603799d07eb': u'Eichst\xe4tt',
|
||||||
|
'92fc4eef-1489-4b31-b9e1-9d9436f7f43e': u'Kopie 5 - Taltitz',
|
||||||
|
'98c6aed7-3632-467e-9f20-5bdc3276f616': u'Kopie 8 - Bad Gateway',
|
||||||
|
'995e06bf-abc4-4103-a572-9f096d71d192': u'Eichstaett - Stadt\xf6kologie',
|
||||||
|
'9cb95913-8245-49e6-8416-ee6635e67aab': u'Kopie 2 - Simulation: Landnutzung',
|
||||||
|
'9e819058-f7f9-459e-9947-84349c7d849c': u'Kopie 9 - Bad Urach',
|
||||||
|
'9f99c761-4fb6-4636-92da-a9627977d8b3': u'Simulation: Schafe',
|
||||||
|
'a5fa36f5-7531-4821-ba0e-cf8f2a502ad4': u'Garmisch',
|
||||||
|
'a79bb488-5fea-4bf9-9b25-395691c8e7cd': u'Kopie 1 - A kopietest docker',
|
||||||
|
'abdf9bd0-9b7e-4286-a25e-2cb14742db30': u'Test - Bad Urach Streuobst',
|
||||||
|
'ac0eb831-0f47-4cf1-a1a5-2e6a535e70e9': u'Kopie 1 - Vorlagen',
|
||||||
|
'ae726f64-cfa5-4f86-9538-a1e63dd914cf': u'AAAAA Bad Gateway',
|
||||||
|
'b307e954-5f0e-43bb-855f-d39c1a8858bd': u'Kopie 7 - Bad Gateway',
|
||||||
|
'b58ea5b3-b864-42e3-aaf4-1a76633c037e': u'Kopie 4 - Bad Gateway',
|
||||||
|
'b5c27cc6-e2bc-4288-be97-1e5bc1f6f94f': u'Kopie 1 - Simulation: Landnutzung',
|
||||||
|
'b623a3c8-7ff8-47b5-853e-7b08e200dd27': u'Taltitz',
|
||||||
|
'be1b2167-293c-4a4b-beda-28d6aa8b47a7': u'Kopie 1 - Stadt\xf6kologie berichtigt',
|
||||||
|
'bf7146bd-c83b-4497-b948-dd6dfc8607aa': u'Kopie 1 - Rinder Lindlar gps',
|
||||||
|
'c2c87501-f180-40de-af7b-1a3288c82292': u'Eichstaett - Stadt\xf6kologie2',
|
||||||
|
'c3016b66-3c26-4ec4-bcf1-d36be07be037': u'?? (lb)',
|
||||||
|
'c3598b20-e8a5-45eb-953a-2b474fd2695a': u'Test - Eichst\xe4tt Schafe',
|
||||||
|
'c39fe95e-2cfd-461b-8103-cfd3e2b45e67': u'??? (lb)',
|
||||||
|
'c46994cc-5ca7-4f9f-b548-7bd6a6fff026': u'Kopie 1 - Bad Gateway',
|
||||||
|
'c528e93f-3469-4de9-b05d-e809575d2999': u'????',
|
||||||
|
'c6606915-3b7e-48f4-adfe-cbcf3130e76a': u'Kopie 12 - Bad Urach',
|
||||||
|
'c8ed936c-25d6-40ea-a731-2741c1d53b48': u'Born',
|
||||||
|
'c9df06e1-33a7-40fc-bd3d-0eba6abba245': u'Test - Schierke Luchse',
|
||||||
|
'ce093332-dc98-4cfa-9ff4-47903897e84f': u'Kopie 5 - Bad Gateway',
|
||||||
|
'ceb78b48-495d-4761-bb61-563fa4dd41fb': u'Kopie 2 - Eichstaett - Stadt\xf6kologie2',
|
||||||
|
'd5712976-59fa-452c-a5e3-8b4232b5cb44': u'Kopie 1 - Garmisch Test',
|
||||||
|
'd7d0be7e-a0ac-4f4f-910d-c55e13a01e88': u'Test - Garmisch Rinder',
|
||||||
|
'db1cd7aa-878b-4c30-9234-0739498996d6': u'Bad Urach - \xfcberarbeitet',
|
||||||
|
'df68db4d-3d51-45f3-96ed-08429a7de3c9': u'A kopietest docker',
|
||||||
|
'e3b0ffce-6135-400e-9796-d1aef173aaf5': u'Kopie 3 - Taltitz',
|
||||||
|
'e3e86e25-4d92-11e6-b176-00199963ac6e': u'Garmisch (geschrumpft)',
|
||||||
|
'e7b3094d-d3c6-41c7-92e3-28638365e018': u'Born II',
|
||||||
|
'e7f36db3-b919-4208-8d98-b5d400b5d972': u'Kopie 15 - Bad Urach',
|
||||||
|
'e9f35c27-6f4f-487c-b07e-9d0ab27a7b85': u'Kopie 1 - Filderstadt',
|
||||||
|
'ea94b249-439b-46dd-b621-e0fbe99aa4ee': u'Stadt\xf6kologie berichtigt',
|
||||||
|
'ec782ab1-eb9d-43b9-a2d1-4699f8432adb': u'Kopie 2 - Bad Urach',
|
||||||
|
'ecfdfd0b-28be-4df2-8994-8092a7fe87b5': u'Kopie 3 - Simulation: Landnutzung',
|
||||||
|
'f7bb56a3-fb15-413a-9a3e-f61e22d0a7d1': u'Kopie 2 - Schierke',
|
||||||
|
'f8f65e9d-de9e-4f8d-8bb6-d1e4e01593a0': u'Arbeitstitel',
|
||||||
|
'fca28f01-ea17-4c41-8e60-4726f96dfca8': u'Kopie 1 - Test-Born',
|
||||||
|
'fe43a0f0-3dea-11e6-a065-00199963ac6e': u'Vorlagen',
|
||||||
|
'ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771': u'Bad Urach'
|
||||||
|
}
|
||||||
Loading…
Reference in New Issue