Compare commits

..

No commits in common. "master" and "p1-rc0" have entirely different histories.

72 changed files with 572 additions and 1581 deletions

View File

@ -2,10 +2,9 @@
%Necessary Information
\author{Clemens Klug}
\title{A Framework for the Analysis of Spatial Game Data}
\subtitle{Ein Analyseframework f\"ur raumbezogene Spieldaten\\3. Vortrag}
\subtitle{Ein Analyseframework f\"ur raumbezogene Spieldaten}
%The day of the presentation
%\date{\today}
\date{7. August 2018}
\date{\today}
%Optional Information
\subject{A Framework for the Analysis of Spatial Game Data}
@ -22,5 +21,5 @@
\titlegraphic{\includegraphics[width=13mm,height=13mm]{image/logo}}
%\gittrue
\gittrue
\presentationtrue

Binary file not shown.

Before

Width:  |  Height:  |  Size: 252 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 19 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 133 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 154 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 228 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 190 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 22 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 18 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 7.6 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 88 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 42 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 24 KiB

View File

@ -16,13 +16,7 @@ compress
\newcommand\meta{../meta}
\input{\meta/config/commands}
\newcommand{\backupbegin}{
\newcounter{finalframe}
\setcounter{finalframe}{\value{framenumber}}
}
\newcommand{\backupend}{
\setcounter{framenumber}{\value{finalframe}}
}
\def\signed #1{{\leavevmode\unskip\nobreak\hfil\penalty50\hskip2em
\hbox{}\nobreak\hfil(#1)%
@ -100,38 +94,39 @@ compress
%%%%%%%%%% Content starts here %%%%%%%%%%
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
\section{A Framework for the Analysis of Spatial Game Data}
\begin{frame}{A Framework for the Analysis of Spatial Game Data}
\framesubtitle{Goal definition}
\section{Location based Games}
\begin{frame}{Location based Games}
\framesubtitle{Put the 'fun' in education}
\begin{columns}
\column{0.49\linewidth}
Geogames: Intersection of GIS and gaming technology\footnotemark
\column{0.55\linewidth}
Intersection of GIS and gaming technology\footnotemark
\begin{itemize}
\item Game actions tied to real-world spatial places ('Break the magic circle')
\item Game actions tied to real-world spatial places ('Break the magic circle')%TODO citation
\item Locomotion as essential game part
\item Trade-offs between board and race style games\footnotemark
\item Foster recognition of environment
\item Focus through game related tasks
\item Trade-offs between board and race style games\footnotemark
\end{itemize}
\column{.49\linewidth}
Scope of the analysis framework
\begin{itemize}
\item Framework for analysis
\item Two target groups:
\begin{itemize}
\item Expert users/researchers
\item Staging/designing staff
\end{itemize}
\item Integration of external data (questionnaire results)
\item Adaptable for multiple games
\end{itemize}
\column{.45\linewidth}
\image{.5\textwidth}{gg2}{Geogame map view}{img:gg2}
\end{columns}
\addtocounter{footnote}{-1}
\footcitetext{Ahlqvist2018}\stepcounter{footnote}
\footcitetext{1705427}
\end{frame}
\begin{frame}{Framework components}
\section{Goal definition}
\begin{frame}{Goal definition}
\framesubtitle{A Framework for the Analysis of Spatial Game Data}
\begin{itemize}
\item framework for analysis
\item 2 zielgruppen: expert users/researcher vs. staging/designing
\item integration von externen daten (fragebögen => CSV o.ä.)
\item adaptable for multiple games
\end{itemize}
\end{frame}
\begin{frame}{Components}
Prerequisites: Game log
\begin{itemize}
\item Creation
@ -139,6 +134,7 @@ Prerequisites: Game log
\end{itemize}
New components
\begin{itemize}
% \item[{\small $\Box$}] {\small(create + store game log)}
\item Per-game importer (Web client, File loader, …)
\item Analyzer modules (number crunching)
\item Output \& Visualization (CSV, [Geo]JSON, KML, Graphs, …)
@ -147,154 +143,146 @@ New components
\end{itemize}
\end{frame}
\section{Modular map-reduce architecture}
\section{State of research}
\frame<handout:0>
{
\tableofcontents[sectionstyle=show/hide,hideothersubsections]
}
\begin{frame}{Architecture}
Log processing turn-key solutions
\subsection{Log processing}
\begin{frame}{Modern log processing stacks}
Technologies: Collection, Time-series databases, Frontend
\begin{itemize}
\item Fast update cycles
\item Low spatial resolution
\item Query languages as entry barrier
\item ELK (Elastic search, Logstash, Kibana)\autocite{andreassen2015monitoring} \autocite{yang2016aggregated} \autocite{steinegger2016analyse} \autocite{sanjappa2017analysis}
\item Collectd, Influx DB, Grafana \autocite{komarek2017metric}
\item
\end{itemize}
Custom stack solution
\begin{itemize}
\item Based on map-reduce
\item Map: Analysis
\begin{itemize}
\item Iterate Log entries
\item Feed log entry through analyzer queue
\item[+] widely deployed
\item[+] powerful query languages %TODO example
\item mainly web/container/hardware monitoring
\item[-] spatial analysis: heavily anonymised (reduced resolution)
\item[-] fast-paced enviroment
\end{itemize}
TODO: bild mit importiertem GG2-logfile %TODO
\end{frame}
\subsection{Pedestrian traces}
\begin{frame}{Analyzing pedestrian movement}
\framesubtitle{… based on GPS logs}
\begin{itemize}
\item GPS overestimates systematically \autocite{Ranacher_2015}
\item GPS is a suitable instrument for spatio-temporal data\autocite{van_der_Spek_2009}
\item Activity mining
\begin{itemize}
\item Augment entries
\item Filter entries
\item Sequential order
\item Overview: \autocite{Gong_2014}
\item Speed-based Clustering \autocite{ren2015mining}
%\item \autocite{Ferrante_2016} % closed access
\item Machine Learning \autocite{pattern_recog} %TODO
\end{itemize}
\end{itemize}
\item Reduce: Collect summaries from analyzers
\begin{itemize}
\item Rendering
\item Post-processing, Comparison, …
\end{itemize}
\item E.g.: Improve tourist management \autocite{tourist_analysis2012}
\end{itemize}
\end{frame}
\begin{frame}{Log processing scheme}
\image{\textwidth}{../../ThesTeX/images/map-reduce.pdf}{Data flows}{img:flow}
\begin{frame}{Heatmap}
\image{.81\textwidth}{strava}{Heatmap: Fitnesstracker\autocite{strava}}{img:strava}
\end{frame}
%%%%%%%%%%%%%%%%%55
% PRESENTATION 2
%%%%%%%%%%%%%%%%%%%%
\section{Microservice oriented implementation}
\subsection{Service Composition}
\begin{frame}{Microservice Composition}
\framesubtitle{Dockerize everything!}
\begin{columns}
\column{0.45\linewidth}
\begin{itemize}
\item Analysis framework: Celery
\item User interface: Flask
\item Result server: Nginx
\item Connection Flask - Celery: Redis
\item Public frontend: Traefik (external)
\item Orchestration: Docker-Compose
\end{itemize}
\column{0.525\linewidth}
\image{\textwidth}{../../ThesTeX/images/architecture}{Service structure}{img:microservices}
\end{columns}
\begin{frame}{Space-time cube}
\image{.72\textwidth}{space-time}{Space-time cube examples\autocite{bach2014review}}{img:spacetime}
\end{frame}
\subsection{Results}
\begin{frame}{Generaliziation}
\image{\textwidth}{generalization}{Trajectories and generalizations with varying radius parameter \autocite{adrienko2011spatial}}{img:generalization}
\end{frame}
\begin{frame}{Trajectory patterns}
\image{\textwidth}{traj-pattern}{Flock and meet trajectory pattern\autocite{jeung2011trajectory}}{img:traj-pattern}
\end{frame}
\subsection{Analyzing games}
\begin{frame}{Analyzing gamez}
\begin{itemize}
\item there's more than heatmaps
\item combine position with game actions
\item identify patterns, balancing issues
\item manual processes %\citetitle{Drachen2013}\citetitle{AHLQVIST20181}
\end{itemize}
\vspace{-42pt}
%\image{.5\textwidth}{game-an}{chat logs with players location \autocite{Drachen2013}}{img:chatlogs}
%\image{.5\textwidth}{ac3-death}{identify critical sections \autocite{Drachen2013}}{img:ac3death}
\twofigures{0.5}{game-an}{chat logs with players location}{img:chatlogs}{ac3-death}{identify critical sections}{img:ac3death}{game analytics \cite{Drachen2013}}{fig:gameanal}
\end{frame}
\subsection{Location based games}
\begin{frame}{Analyzing location based games}
\begin{itemize}
\item fragebögen (prä + post; p-hacking)
\end{itemize}
TODO: cite \footcite{Schaal2017} %TODO
\end{frame}
\subsection{Summary}
\begin{frame}{Summary}
\begin{itemize}
\item log processing: doof
\item fussgänger-tracks auswerten: totgeschlagen (gps messfehler, muster, verhaltenserkennung, …)
\item track rendering: linie (mit attributen), raum-zeit-würfel, heatmap, …
\item räumliche auswertung von computerspielen: läuft
\item auswertung ortsbezogener spiele: macht niemand?
\end{itemize}
\end{frame}
\section{Solution approach}
\frame<handout:0>
{
\tableofcontents[sectionstyle=show/hide,hideothersubsections]
}
\subsection{Requirements}
\begin{frame}{Requirements}
\end{frame}
\subsection{Architecture}
\begin{frame}{Solution approach}
\begin{itemize}
\item map-reduce like
\item map: analyse-läufe
\begin{itemize}
\item log-eintrags basiert
\item reichern log an
\item filtern log
\item metriken über komplette log => extern gespeichert
\item => evtl. Abhängigkeiten in Reihenfolge
\item basis-funktionalität || spiel-bezogen
\end{itemize}
\item reduce: Operieren mit Ergebnissen (über alle verarbeiteten logs hinweg)
\begin{itemize}
\item render
\item verteilung, …
\end{itemize}
\end{itemize}
\image{.5\textwidth}{../../ThesTeX/images/flowchart.pdf}{flowchart}{img:flow}
\end{frame}
\section{Outlook: Implementation}
\begin{frame}{Implementation}
\begin{itemize}
\item python (3.6)
\item standalone library/cli tool
\item web based configuration/runner/API(flask)
\item
\end{itemize}
\end{frame}
\begin{frame}{Configuration \& results}
\twofigures{0.5}{oeb-kml}{Analyzer configuration}{img:oebkml}{oeb-ge}{Result visiualized}{img:oebge}{Example: Generate KML tracks (BioDiv2Go; Oberelsbach2016)}{fig:oeb2016}
\end{frame}
\begin{frame}{ActivityMapper}
\image{.7\textwidth}{track-fi}{Combined screen activity and spatial progress}{img:trackfi}
\end{frame}
\begin{frame}{Track length evaluation}
\begin{columns}
\column{0.49\linewidth}
\image{\textwidth}{oeb-raw}{Raw track lengths}{img:oeb-raw}
\column{0.49\linewidth}
\image{\textwidth}{oeb-simplified}{Simplified track lengths}{img:oeb-simpe}
\end{columns}
\end{frame}
\section{Evaluation}
\subsection{Setup}
\begin{frame}{Evaluation}
%\begin{itemize}
% \item Analyse other geogames
% \item Describe effort
% \item ?
% \item Profit
%\end{itemize}
\image{\textwidth}{eval}{Evaluation setup}{img:evalplan}
\end{frame}
\begin{frame}{Evaluation}
Clients
\begin{longtable}[H]{ccp{0.6\textwidth}}
Geogame & Log files & Notes \\
\hline
BioDiv2Go & $\approx430$ & SQLite database with JSON log entries, references to game config; import base case\\
GeoTicTacToe & $\approx13$ & CSV with pipes; no temporal data; events + tracks\\
\caption{Geogame client log data}
\label{tab:logscli}
\end{longtable}
Servers
\begin{longtable}[H]{ccp{0.6\textwidth}}
Geogame & Log files & Notes \\
\hline
GeoTicTacToe & $\approx2$ & intermediate log format\\
GeoTTT & $\approx130$ & fragmented structure: incomplete or splitted?\\
Neocartographer & $\approx400$ & Partly broken GPX: missing description information; one GPX file per player\\
MissingLink & $\approx6$ & Partly broken GPX: missing spatial information; one GPX file per player\\
Equilibrium & $\approx40$ & GPX with missing end tag\\
\caption{Geogame servers log data}
\label{tab:logssrv}
\end{longtable}
\end{frame}
\subsection{Results}
\begin{frame}{Integration of Neocartographer}
Challenges
\begin{itemize}
\item Corrupted XML files
\item No game server with API for log retrieval
\end{itemize}
\begin{longtable}[H]{rl}
Geogame & Log files \\
\hline
missing attribute space & <desc><event message="leaveObject"geoid="9"/></desc>\\
unclosed tag & <desc><event </desc>\\
missing attribute name & <trkpt lat="48.3689110.897709">\\
invalid attribute values & <trkpt lat="UNKNOWN" lon="UNKNOWN">\\
\caption{Neocartographer GPX log error types}
\label{tab:xml}
\end{longtable}
Solutions
\begin{itemize}
\item Recovery parser \& custom cleanup (new dependency: lxml)
\item Additional log server (Nginx with JSON autoindex)
\end{itemize}
\end{frame}
\begin{frame}{Evaluation results}
\image{\textwidth}{eval-changes}{Code changes necessary for the integration of another game}{img:eval}
\end{frame}
\section{Demotime}
\begin{frame}{It's time for...}
\huge{...a demo!}
\vspace{2cm}
\pic{.9\textwidth}{demo}
\end{frame}
%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%
@ -304,14 +292,10 @@ Solutions
\begin{frame}[allowframebreaks]{References}
\def\newblock{\hskip .11em plus .33em minus .07em}
\scriptsize
\setbeamertemplate{bibliography item}[text]
\printbibliography
\normalsize
\end{frame}
\appendix
\backupbegin
\end{document}

1
ThesTeX/.gitignore vendored
View File

@ -25,4 +25,3 @@
.prepared
# nano swap files
*.swp
*.lol

View File

@ -30,7 +30,7 @@ initialize:
DOTTYPE := pdf
LANG := en
LANG := de
F :=
DOT_WILDCARD := images/*.dot
.PHONY: fast latex bibtex dot spell spell1 todo

View File

@ -1,19 +0,0 @@
class Analyzer:
"""Operate on log entries, one at a time"""
def __init__(self, settings: LogSettings) -> None:
self.settings: LogSettings = settings
def process(self, entry: dict) -> bool:
"""
Process an entry
:param entry: Entry to process
:return: True if consumed, False for further analysis
"""
raise NotImplementedError()
def result(self, store: ResultStore, name=None) -> None:
raise NotImplementedError()
def name(self) -> str:
return self.__name__

View File

@ -1,30 +0,0 @@
import os
import sqlite3
import tempfile
import zipfile
from json import loads as json_loads
from .loader import Loader
DB_FILE = "instance_log.sqlite"
class SQLiteLoader(Loader):
conn = None
def load(self, file: str):
self.conn = sqlite3.connect(file)
def get_entry(self) -> dict:
cursor = self.conn.cursor()
cursor.execute("SELECT * FROM log_entry")
for seq, timestamp, json in cursor.fetchall():
yield json_loads(json)
class ZipSQLiteLoader(SQLiteLoader):
def load(self, file: str):
with zipfile.ZipFile(file, "r") as zipped_log, tempfile.TemporaryDirectory() as tmp:
zipped_log.extract(DB_FILE, path=tmp)
super(ZipSQLiteLoader, self).load(os.path.join(tmp, DB_FILE))

View File

@ -1,30 +0,0 @@
FROM alpine:edge
RUN set -ex &&\
echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing/" >> /etc/apk/repositories && \
apk upgrade --update &&\
apk add --update nodejs proj4-dev \
geos-dev \
py-geos \
gdal \
gdal-dev \
py-gdal \
py-psycopg2 \
jpeg-dev \
postgresql-dev \
bash \
py-virtualenv \
gcc \
zlib-dev \
python2-dev \
libc-dev \
sqlite-dev \
postgresql-client
ADD ["start.sh", "/"]
VOLUME ["/biogames", "/venv"]
EXPOSE 8000
ENTRYPOINT ["/start.sh"]
CMD ["runserver", "0.0.0.0:8000"]

View File

@ -1,48 +0,0 @@
version: "3"
services:
web:
build: ./docker2
image: biogames:python2
volumes:
- ./biogames:/biogames
- ../gamefield/:/gamefield
- ./venv_web/:/venv/
- ./gg-data/files/:/biogames/files/
- ./gg-data/media/:/biogames/media/
restart: on-failure:5
depends_on:
- db
networks:
- traefik_net
- default
labels:
- "traefik.enable=true"
- "traefik.port=8000"
- "traefik.docker.network=traefik_net"
- "traefik.http.frontend.rule=Host:biogames.potato.kinf.wiai.uni-bamberg.de"
celery:
image: biogames:python2
volumes:
- ./biogames:/biogames
- ./venv_celery/:/venv/
- ./gg-data/files/:/biogames/files/
- ./gg-data/media/:/biogames/media/
restart: on-failure:5
command: ["celery", "worker"]
depends_on:
- db
db:
image: docker.clkl.de/postgis:9.6-alpine
volumes:
- ./postgres/:/var/lib/postgresql/data
environment:
- POSTGRES_PASSWORD=secret
restart: on-failure:5
networks:
traefik_net:
external:
name: traefik_net

View File

@ -1,15 +0,0 @@
#!/bin/bash
if [ ! -f /venv/bin/activate ]; then
virtualenv /venv;
fi;
source /venv/bin/activate;
LIBRARY_PATH=/lib:/usr/lib pip install -qr /biogames/requirements.txt;
until pg_isready -qh db; do
echo "Waiting for Postgres...";
sleep 1;
done
cd /biogames;
/biogames/manage.py $@

View File

@ -1,22 +0,0 @@
version: '2'
services:
kibana:
image: docker.elastic.co/kibana/kibana:6.2.1
volumes:
- ./kibana.yml:/usr/share/kibana/config/kibana.yml
environment:
- SERVER_HOST=0.0.0.0
- ELASTICSEARCH_URL=http://elastic:9200
networks:
- default
- traefik_net
labels:
- "traefik.enable=true"
- "traefik.docker.network=traefik_net"
elastic:
image: docker.elastic.co/elasticsearch/elasticsearch-basic:6.2.1
networks:
traefik_net:
external:
name: traefik_net

View File

@ -1,70 +0,0 @@
import logging
from datetime import datetime
from lxml import etree
from .loader import Loader
log = logging.getLogger(__name__)
NS = {'gpx':"http://www.topografix.com/GPX/1/1"}
class NeoCartLoader(Loader):
def load(self, file: str):
src = open(file, "r")
parser = etree.XMLParser(recover=True)
tree = etree.parse(src, parser=parser)
self.entries = []
for point in tree.xpath("//gpx:trkpt", namespaces=NS):
try:
self.entries.append(self.parse_point(point))
except ValueError as e:
print(e, etree.tostring(point, pretty_print=True).decode())
log.exception(e)
def parse_point(self, point):
raw_lat = point.xpath("@lat")[0]
if raw_lat.count(".") > 1:
log.warning(f"recreate lat/lon from: {raw_lat}")
log.warn(etree.tostring(point, pretty_print=True).decode())
start_offset = 4
x = raw_lat[start_offset:].index(".")
offset = start_offset + x
raw_lon = raw_lat[offset:]
raw_lat = raw_lat[:offset]
else:
raw_lon = point.xpath("@lon")[0]
lat = float(raw_lat)
lon = float(raw_lon)
times = point.xpath("gpx:time",namespaces=NS)
assert len(times) == 1
time = times[0].text
dt = datetime.strptime(time, "%Y-%m-%dT%H:%M:%SZ")
timestamp = int(dt.timestamp() * 1000) # python3.6 has no timestamp_ns (yet)
events = point.xpath(".//gpx:event",namespaces=NS)
assert 0 <= len(events) <= 1
event = {}
if events:
event = dict(events[0].attrib)
if events[0].tail and events[0].tail.strip():
try:
# base case: trailing 'geoid="0"/>'
key, v = events[0].tail.strip().split("=")
value = v.split('"')[1]
event[key] = value
except:
event['__tail__'] = events[0].tail.strip()
return {
"location": {
"type": "Point",
"coordinates": [lon, lat]
},
"timestamp": timestamp,
"event": event,
"type": event['message'] if event else "location"
}
def get_entry(self) -> object:
for i in self.entries:
yield i

View File

@ -1,63 +0,0 @@
version: "3"
services:
app:
image: docker.clkl.de/ma/celery:0.4.1
build: .
volumes:
- ./:/app
working_dir: /app/selector
command: python3 webserver.py
environment:
- PYTHONPATH=/app
- PYTHONUNBUFFERED=1
networks:
- default
- traefik_net
labels:
- "traefik.enable=true"
- "traefik.port=5000"
- "traefik.docker.network=traefik_net"
- "traefik.url.frontend.rule=Host:select.ma.potato.kinf.wiai.uni-bamberg.de"
celery:
image: docker.clkl.de/ma/celery:0.4.1
environment:
- PYTHONPATH=/app
- PYTHONUNBUFFERED=1
volumes:
- ./:/app
- ./data/results:/data/results
working_dir: /app
command: celery -A tasks.tasks worker --loglevel=info
redis:
image: redis:4-alpine
volumes:
- ./data/redis:/data
command: redis-server --appendonly yes
nginx:
image: nginx:1.13-alpine
volumes:
- ./data/results:/usr/share/nginx/html:ro
networks:
- traefik_net
labels:
- "traefik.enable=true"
- "traefik.port=80"
- "traefik.docker.network=traefik_net"
- "traefik.url.frontend.rule=Host:results.ma.potato.kinf.wiai.uni-bamberg.de"
log_data:
image: nginx:1.13-alpine
volumes:
- ./log_data/:/srv/:ro
- ./log_data.conf:/etc/nginx/conf.d/log_data.conf
networks:
traefik_net:
external:
name: traefik_net

View File

@ -1,13 +0,0 @@
class Render:
result_types = []
def render(self, results: List[Result], name=None) -> [str]:
raise NotImplementedError()
def filter(self, results: List[Result]):
if len(self.result_types) == 0:
return results
return filter(self.__filter__, results)
def __filter__(self, obj: Result):
return obj.analysis() in self.result_types

View File

@ -1,24 +0,0 @@
logLevel = "ERROR"
defaultEntryPoints = ["https", "http"]
[entryPoints]
[entryPoints.http]
address = ":80"
[entryPoints.http.redirect]
entryPoint = "https"
[entryPoints.https]
address = ":443"
[entryPoints.https.tls]
[docker]
exposedbydefault = false
watch = true
[acme]
email = "tls-admin@examplo.org"
storage = "acme.json"
entryPoint = "https"
OnHostRule = true
[acme.httpChallenge]
entryPoint = "http"

View File

@ -1,24 +0,0 @@
version: "3"
services:
traefik:
image: traefik
command: --logLevel="ERROR" --docker.exposedbydefault=false
volumes:
- ./traefik.toml:/traefik.toml
- /var/run/docker.sock:/var/run/docker.sock
- /srv/traefik/acme.json:/acme.json
ports:
- 80:80
- 443:443
networks:
- net
labels:
- "traefik.enable=true"
- "traefik.port=8080"
- "traefik.frontend.rule=Host:traefik.potato.kinf.wiai.uni-bamberg.de"
networks:
net:
driver: bridge

View File

@ -4,10 +4,10 @@
\subtitle{A Framework for the Analysis of Spatial Game Data}
\newcommand\degree{Master}
\newcommand\studycourse{Angewandte Informatik}
\newcommand\advisor{Prof. Dr. Christoph Schlieder}
\newcommand\advisor{Christoph Schlieder}
\newcommand\location{Bamberg}
\subject{\degree arbeit im Studiengang \studycourse\ der Fakultät Wirtschaftsinformatik und Angewandte Informatik der Otto-Friedrich-Universität Bamberg}
\date{14.06.2018}
\date{19.06.2018?} %TODO
\gitfalse
\gittrue
\thesistrue

View File

@ -1,22 +1,12 @@
In this thesis, a framework for the analysis of spatial game data is developed.
This game data is collected during the game sessions and stored in log files.
The following chapters describe the basics of the development process.
\section{Auswertung ortsbezogener Spiele}
\begin{itemize}
\item geogames
\item datenspuren, logs
\item erkenntnisgewinn aus spatial logs
\item geogame analyse (z.b. paper mit auswertung von gg)
\item feedback loop: desingers/stagers
\end{itemize}
\section{Location based Games: Put the `fun' in education}
Beispiel für Auswertung mit Fragebögen vorher/nachher: \cite{Schaal2017}
Spatial games, also known as location based games, are at the intersection of GIS and gaming technology \cite{Ahlqvist2018}.
With game actions tied to real-world spatial places, this genre breaks the magic circle of games: they are embedded into the environment and the boundary between game and non-game are vanishing \cite{montola2009games}.
As they feature locomotion as an essential game part, a focus on certain aspects of the environment can be achieved by game related tasks.
These tasks can include educational aspects or reward special behaviour through ingame benefits as mean of gamification.
A playable game with good balance and a lasting impact needs to consider the trade-off between board and race style games \cite{1705427,kremer2013spatial}.
Board style games are dominated by strategic planning with less temporal restrictions, while race styled games favour the physical capabilities of the fastest players.
Popular examples of mobile geogames are Ingress\furl{https://www.ingress.com/} and the more recent Pokemon Go\furl{https://www.pokemongo.com/}\!.
These worldwide playable games barely embed the surroundings into the game except for the base map and some landmark-derived attributes\footnote{Pokemon Go aligns the land types with the possible types of Pokemon's available}\!.
With a fine tuned setup of educational content, game elements and integration of locomotion on the other hand, location based games (also known as geogames) foster recognition of the environment.
\autoref{img:gg2} shows the map overview of such a game: FindeVielfalt Simulation\furl{https://biodivlb.jimdo.com/english-1/project-finde-vielfalt/finde-vielfalt-simulation/}\!.
Located in an orchard, the blue dots are caches tied to game actions.
To proceed in the games narrative story, the caches are to be completed.
The players have to complete a task within the context of the caches' location.
\image{.5\textwidth}{../../PresTeX/images/gg2}{Geogame map view}{img:gg2}
TODO \cite{Ahlqvist2018} %TODO

View File

@ -1,39 +1,37 @@
\label{sec:scope}
\section{Goal definition}\label{sec:require}
To provide a ground truth to findings derived from questionnaires about geogames (like described in \autoref{sec:gg-res}), a \emph{Framework for the Analysis of Spatial Game Data} is the goal of this work.
This framework shall rely on geogame log files providing both real world data (location and locomotion) and ingame data (game process, game actions).
From these logs, the framework shall support the creation of different analysis reports.
To be a true framework, a tight coupling to any specific game is out of bounds.
For an holistic solution, the integration of external data (e.g. questionnaire results) allows the framework to compute an overall result.
\subsection{Target groups}
There are two target groups for this project: Researchers and staging staff.
With researchers extracting influences of games into the players' behaviour, the first target group of the analysis framework is clear.
They need a sufficient data basis for evaluations of broader scope.
A second user group are the game staging and game designing staff.
They require feedback about difficult sections in the game, hard to reach locations, or overcrowded situations to improve the game enjoyment of the players.
Additionally they need statistics to conclude the game session with a review of the game round.
With an result for each player, these statistics allow individual feedback.
With spatial trajectories of possibly individual players, some basic privacy protection needs to restrict the access to the log files to a minimal amount of people.
Additionally, a user interface for the creation of predefined analysis settings with custom logs provides the stagers and designers a reasonable information basis.
\section{Components}
\subsection{Prerequisites}
Out of scope for this work is the creation of the geogame logs, they are a prerequisite for the application of the framework for any (geo-)game.
Obviously, game sessions without logs have no data basis for an evaluation left.
Not only the creation but also the storage of the game logs is not to be considered here.
As long as an interface to access the logs is provided, an access client belongs to the scope of the framework.
\subsection{New components}
The framework can be defined by the following components:
\section{Analyseframework}
\begin{itemize}
\item Per-game importer (Web client, File loader, …) to load log files for analysis
\item Analyzer modules to perform the number crunching and extraction of information
\item Output \& Visualization export the derived information for storage, interpretation or further analysis, e.g. into CSV, [Geo]JSON, KML, Graphs, …
\item User Interface for the configuration of the analysis run and the selection of processed game logs
\item built-in analysis=> fixed implementation, no cross-game evaluations (independant results/methods etc)
\item framework
\begin{itemize}
\item create + store game log
\item per-game importer
\item analyzer modules crunch numbers
\item output/visualization
\item interface
\item cross-game comparisons
\end{itemize}
\end{itemize}
\section{scope}
todo
\begin{itemize}
\item framework for analysis
\item expert users/researcher vs. staging/designing
\item integration von externen daten (fragebögen => CSV o.ä.)
\item adaptable for multiple games
\end{itemize}
not todo
\begin{itemize}
\item interpretation
\item fragebögen
\end{itemize}
steps
\begin{enumerate}
\item import data
\item analyze data
\item store/render/… results
\end{enumerate}

View File

@ -1,2 +1,291 @@
%\input{content/2.0-collection}
\input{content/2.1-text}
\begin{itemize}
\item Sensing Human Activity: GPS Tracking \cite{van_der_Spek_2009} %/home/agp8x/Downloads/sensors-09-03033.pdf
\\\begin{itemize}
\item 2009
\item evaluate the sensory qualities that GPS technology offers for researching measures of urban quality
\item value of GPS as a sensor technology in urban studies
\end{itemize}
\item Exploring Patterns of Movement Suspension in Pedestrian Mobility \cite{GEAN:GEAN818} %/home/agp8x/Downloads/Exploring_patterns_of_movement_suspension_in_pedestrian_mobility.pdf
\\\begin{itemize}
\item 2011
\item explore patterns of movement suspension
\item classify the movement vectors and identify the location of movement suspension patterns
\item The results have shown that most of the detected patterns formed spatial clusters located around places that represent some attraction in the context in which the pedestrians were moving. For the players in the urban mobile game, suspension patterns were located at the checkpoints and a-priori known locations of gaming events.
\end{itemize}
\item Using location-based tracking data to analyze the movements of city tourists \cite{Modsching:2008:1098-3058:31} %/home/agp8x/Downloads/fulltext.pdf
\\\begin{itemize}
\item 2008
\item spatial distribution of visitors in various areas is visualized and analyzed
\item distribution of walking speeds, areas of slowdown are identified and subsequently clustered into activity areas
\end{itemize}
\item GPS in Pedestrian and Spatial Behaviour Surveys \cite{nielsen2004gps} %/home/agp8x/Downloads/TSN_HHH_Cities_for_people_2004.pdf
\\\begin{itemize}
\item 2004
\item possibilities in spatial behaviour and pedestrian-surveys with GPS and electronic questionnaires
\end{itemize}
\item Monitoring pedestrian spatio-temporal behaviour \cite{millonig2007monitoring} %/home/agp8x/Downloads/paper03millonig.pdf
\\\begin{itemize}
\item 2007
\item observation and interpretation of pedestrian walking patterns and route decision behaviour
\end{itemize}
\item Analysis of Tourist Behavior Based on Tracking Data Collected by GPS \cite{tourist_analysis2012} %12.pdf
\\\begin{itemize}
\item 2012
\item a user-friendly system that integrates the tracking of visitor spatial activity, the elaboration of such data according to structured formats, and their visualization through a webbased viewer
\item design and validation of this comprehensive methodology, and the early results from a test application
\end{itemize}
\item Spatial Game Analytics \cite{Drachen2013} %/home/agp8x/ownCloud/uni/ma/Neuer Ordner/978-1-4471-4769-5_17.pdf
\\\begin{itemize}
\item 2013
\item Introduction to spatial game analytics and the current state-of-the-art in games development and games research.
\item Advice and ideas on how to get started with spatial analysis of behavioral game telemetry data
\item recommendations:
\begin{verbatim}
Dankoff, J. (2011, September 12). Game telemetry with playtest DNA on assassins creed. The engine room. URL: http://engineroom.ubi.com/game-telemetry-with-playtest-dna-on-assassins-creed/
Demers, N. (2008). Fundamentals of geographical information systems. URL: http://www.amazon.com/Fundamentals-Geographical-Information-Systems-Michael/dp/0470129069
Drachen, A., & Canossa, A. (2011). Evaluating motion: Spatial user behaviour in virtual environments. URL: http://andersdrachen.files.wordpress.com/2011/01/05_drachen_ijart.pdf
Hoobler, N., Humphreys, G., & Agrawala, M. (2004). Visualizing competitive behaviors in multiuser virtual environments . URL: http://www.cs.virginia.edu/~gfx/pubs/lithium/
Houghton, S. (2011). Balance and fl ow maps . URL: http://altdevblogaday.com/2011/06/01/balance-and-flow-maps-2/
Kennerly, K. (2003). Better game design through data mining . URL: http://www.gamasutra.com/view/feature/2816/better_game_design_through_data_.php
Pruett, C. (2010). Hot failure: Tuning gameplay with simple player metrics. URL: http://www.gamasutra.com/view/feature/6155/hot_failure_tuning_gameplay_with_.php?print=1
Thompson, C. (2007). Halo 3: How Microsoft labs invented a new science of play. URL: http://www.wired.com/gaming/virtualworlds/magazine/15-09/ff_halo
Zoeller, G. (2011). MMO rapid content iteration. URL: http://gdc.gulbsoft.org/
\end{verbatim}
same book:
\begin{verbatim}
18 Visual Game Analytics ............................................................................ 403
Ben Medler
19 Visual Analytics Tools A Lens into Players
Temporal Progression and Behavior ..................................................... 435
Magy Seif El-Nasr, André Gagné, Dinara Moura, and Bardia Aghabeigi
\end{verbatim}
\item -----
\item beloved, but basic: heatmap
\item spatial contexts in computer games, adapting GIS technology
\end{itemize}
\item GeoGame analytics A cyber-enabled petri dish for geographic modeling and simulation \cite{AHLQVIST20181} %/home/agp8x/ownCloud/uni/ma/Neuer Ordner/1-s2.0-S0198971517304234-main.pdf
\\\begin{itemize}
\item 2018
\item we present the emerging area of Spatial Game Analytics that provides an uncharted area for data-intensive geospatial scenario analysis
\item Exploratory GeoGame analytics to mine spatial behavior of players, identify how variations in the rules and varying locations affect the simulation outcomes.
\item -----
\item manual process: \textit{In our subsequent analysis we first converted our game database to
ArcMap data in order to link the parcel map with game play attributes.
We then used ArcMap primarily as a visual exploration tool to display
parcels and related data, like yield, parcel use, etc., on those parcels.
We also performed some exploratory data analysis in SPSS where our
focus was to look for correlations between variables in the game.}
\end{itemize}
\end{itemize}
\section{Metriken sammeln}
\begin{itemize}
\item Geogames: Designing Location-Based Games from Classic Board Games\cite{1705427}
\\\begin{itemize}
\item
\end{itemize}
\item\cite{heinz2015agent}
\\\begin{itemize}
\item
\end{itemize}
\item\cite{schlieder2005geogames}
\\\begin{itemize}
\item
\end{itemize}
\item locomotion-action %TODO citation Reducing Location Overhead in Educational Geogames
\\\begin{itemize}
\item
\end{itemize}
\end{itemize}
\section{umgang mit trajektorien}
\begin{itemize}
\item Computing with Spatial Trajectories \cite{zheng2011computing} %10.1007%2F978-1-4614-1629-6.pdf
\\\begin{itemize}
\item 2011
\item … (collection)
\end{itemize}
\item Trajectory Data Mining: An Overview \cite{Zheng_2015} %TrajectoryDataMining-tist-yuzheng.pdf
\\\begin{itemize}
\item 2015
\item systematic survey on the major research into trajectory data mining
\item methods that transform trajectories into other data formats, such as graphs, matrices, and tensors
\end{itemize}
\item Efficient Motif Discovery in Spatial Trajectories Using Discrete Fréchet Distance \cite{tang_motif} %/home/clemens/Downloads/Efficient motif discovery in spatial trajectories using discrete.pdf
\\\begin{itemize}
\item 2017
\item The discrete Fréchet distance (DFD) captures perceptual and geographical similarity between discrete trajectories
\end{itemize}
\item Mining individual behavior pattern based on significant locations and spatial trajectories \cite{Chen_2012} %TODO
\\\begin{itemize}
\item 2012
\item BP-Mine framework, which consists of three phases, that is, location extraction, trajectory modeling and behavior pattern mining
\item Raw WiFi RSS readings and accelerometer sensor data
\end{itemize}
\item Machine Learning and Data Mining in Pattern Recognition \cite{pattern_recog} %/home/clemens/Downloads/978-3-642-23199-5.pdf
\\\begin{itemize}
\item 2011
\item … (conference collection)
\end{itemize}
\end{itemize}
\section{log processing}
\begin{itemize}
\item Analyse von Logs mit Open-Source-Werkzeugen \cite{steinegger2016analyse}
\begin{itemize}
\item 2016
\end{itemize}
\item Monitoring mixed-language applications with elastic search, logstash and kibana (elk) \cite{andreassen2015monitoring} %/home/clemens/Downloads/wepgf041.pdf
\begin{itemize}
\item 2015
\item show how we can process almost any type of structured or unstructured data source
\end{itemize}
\item Aggregated containerized logging solution with fluentd, elasticsearch and kibana \cite{yang2016aggregated}
\begin{itemize}
\item 2016
\end{itemize}
\item Analysis of logs by using logstash \cite{sanjappa2017analysis}
\begin{itemize}
\item 2017
\end{itemize}
\item Performance monitoring and optimization \cite{matotek2017performance}
\begin{itemize}
\item 2017
\end{itemize}
\item A practical guide to monitoring and alerting with time series at scale \cite{wilkinson2017practical}
\begin{itemize}
\item 2017
\end{itemize}
\end{itemize}
\section{toDO(i)}
\begin{itemize}
\item Using High-Resolution {GPS} Tracking Data of Bird Flight for Meteorological Observations \cite{Treep_2016}
\begin{itemize}
\item 2016
\item
\end{itemize}
\item Combining {GPS} {\&} survey data improves understanding of visitor behaviour \cite{East_2017}
\begin{itemize}
\item 2017
\item
\end{itemize}
\item A general framework for collecting and analysing the tracking data of cruise passengers at the destination \cite{Ferrante_2016}
\begin{itemize}
\item 2016
\item
\end{itemize}
\item Moves app: a digital diary to track physical activity and location \cite{Evenson_2016}
\begin{itemize}
\item 2016
\item
\end{itemize}
\item Automated Urban Travel Interpretation: A Bottom-up Approach for Trajectory Segmentation \cite{Das_2016}
\begin{itemize}
\item 2016
\item
\end{itemize}
\item Identification of activity stop locations in {GPS} trajectories by density-based clustering method combined with support vector machines \cite{Gong_2015} %/home/clemens/Downloads/s40534-015-0079-x.pdf
\begin{itemize}
\item 2015
\item
\end{itemize}
\item Deriving Personal Trip Data from {GPS} Data: A Literature Review on the Existing Methodologies \cite{Gong_2014} %/home/clemens/Downloads/1-s2.0-S1877042814041597-main.pdf
\begin{itemize}
\item 2014
\item
\item recommends: comparison of travel diaries; extracting activity travel diaries from gps data
\end{itemize}
\item Post-processing Procedures for Passive {GPS} based Travel Survey \cite{Liu_2013} % /home/clemens/Downloads/1-s2.0-S1877042813021642-main.pdf
\begin{itemize}
\item 2013
\item restore the sequences of data points, both in space and time: trips and activities occurred in the survey time should be identifiable chronologically
\end{itemize}
\item Everyday Cycling in Urban Environments: Understanding Behaviors and Constraints in Space-Time \cite{Yeboah_2014} % /home/clemens/Downloads/Everyday_cycling_in_urban_environments_U.pdf
\begin{itemize}
\item 2014
\item enhance data availability to understand cycling behaviors
\item data was used together with the area cycling infrastructure data
\item Computational Approaches for Urban Environments /home/clemens/Downloads/978-3-319-11469-9.pdf
\end{itemize}
\item A Hybrid Spatio-Temporal Data Indexing Method for Trajectory Database \cite{Ke_2014}
\begin{itemize}
\item 2014
\item
\end{itemize}
\item Why {GPS} makes distances bigger than they are \cite{Ranacher_2015} %/home/clemens/Downloads/Why GPS makes distances bigger than they are.pdf
\begin{itemize}
\item 2015
\item measurement error causes a systematic bias in distances recorded with a GPS
\item This error cancels out when average speed, distance or direction is calculated along the trajectory
\end{itemize}
\item Transportation mode-based segmentation and classification of movement trajectories \cite{Biljecki_2013} %/home/clemens/Downloads/Transportation mode based segmentation and classification of movement trajectories.pdf
\begin{itemize}
\item 2013
\item method for segmenting movement data into single-mode segments and for classifying them according to the transportation mode used
\end{itemize}
\item Children in schoolyards: Tracking movement patterns and physical activity in schoolyards using global positioning system and heart rate monitoring \cite{Fj_rtoft_2009} %/home/clemens/Downloads/Fjortoftetal.2009.pdf
\begin{itemize}
\item 2009
\item h ow the yard invited physical activity
\item The methods of GPS tracking and HR monitoring seemed to be applicable for this purpose. For further studies with young children the equipment needs adjustments.
\item => gps in small scale places
\end{itemize}
\item Places as intersecting flows: Mapping urban morphologies, functional constellations and pedestrian rhythms \cite{pafka2013places} %/home/clemens/Downloads/Places_as_Intersecting_Flows_Mapping_Urb.pdf
\begin{itemize}
\item 2013
\item no gps, social concepts
\end{itemize}
\item GPS, GIS and personal travel surveys: an exercise in visualisation \cite{stopher2002gps} %/home/clemens/Downloads/2002_Stopher_Bullock_Jiang.pdf
\begin{itemize}
\item 2002
\item produce maps and other visual representations of the travel
\end{itemize}
\item Analyzing pedestrian movement in mataf using gps and gis to support space redesign \cite{koshak2008analyzing}
\begin{itemize}
\item 2008
\item
\end{itemize}
\end{itemize}

View File

@ -1,167 +0,0 @@
In \autoref{sec:gg-res} example the involvement of location based games in the research field is reviewed.
Covering the basic data aggregation, \autoref{sec:logproctheo} shows the current state of tools and processes for managing large volumes of log and time series data.
An overview of the field of pedestrian track analysis is located in \autoref{sec:pedest}.
Finally, in \autoref{sec:gametheo} the connection of spatial analysis and digital game optimizations is showcased.
\section{Research with location based games}\label{sec:gg-res}
\cite{Schaal2017} describes the evaluation of a location based game.
To measure the effectiveness of the game, the following pattern is applied:
After a mission statement has been defined and approved, a fitting statistical framework has to be developed.
Based on such a framework, questionnaires have to be derived.
As some metrics cannot be retrieved directly from the questionnaires answers, the statistical framework needs to considers these and consider measurable information to derive the original metric from.
The finished and for alignment with the mission statement approved questionnaires are then applied at field test with users from the target groups.
Each field test consists of an upstream questionnaire, a pass of the location based game and a final round of questionnaires.
After an data entry step for paper-based questionnaires, the raw results are fed into the statistical framework implemented in a statistical processing software to retrieve the final results.
\autoref{img:biodiv-schaal} shows the resulting statistical framework for the valuing of biodiversity as target variable of the location based geogame developed in the BioDiv2Go project.
\image{\textwidth}{../../PresTeX/images/biodiv-schaal}{Statistical framework for BioDiv2Go\cite{Schaal2017}}{img:biodiv-schaal}
\section{Log processing}\label{sec:logproctheo}
System administrators and developers face a daily surge of log files from applications, systems, and servers.
For knowledge extraction, a wide range of tools is in constant development for such environments.
Currently, an architectural approach with three main components is most frequently applied.
This components are divided into aggregation \& creation, storage, and analysis \& frontend.
A popular example is the ELK stack consisting of Elastic Search, Logstash, and Kibana \cite{andreassen2015monitoring,yang2016aggregated,steinegger2016analyse,sanjappa2017analysis}. \nomenclature{\m{E}lasticSearch, \m{L}ogstash, and \m{K}ibana}{ELK}
In \autoref{tab:logs} some implementations of these components are listed according to the main focus.
For this list, cloud-based services were not taken into account.
A clear classification is not always possible, as some modules integrate virtually all features necessary, as is the case with the Graphite tool set.
\begin{longtable}[H]{cp{0.2\textwidth}p{0.2\textwidth}}
Collection & Database & Frontend\\
\hline
Logstash\furl{https://www.elastic.co/de/products/logstash} & Elastic Search\furl{https://www.elastic.co/de/products/elasticsearch} & Kibana\furl{https://www.elastic.co/de/products/kibana}\\
Collectd\furl{https://collectd.org/} & Influx DB\furl{https://www.influxdata.com/} & Grafana\furl{https://grafana.com}\\
Icinga\furl{https://www.icinga.com/products/icinga-2/} & Whisper\furl{https://github.com/graphite-project/whisper} & Graphite\furl{https://graphiteapp.org/}\\
StatsD\furl{https://github.com/etsy/statsd} & Prometheus\furl{https://prometheus.io/} & \\
%\furl{} & \furl{} & \furl{}\\
\caption{Log processing components}
\label{tab:logs}
\end{longtable}
\subsection{Collection}
Nearly all services designed for log collection offer multiple interfaces for submitting log data.
By way of illustration, Logstash features a long list of input plugins from streaming files over an HTTP API to proprietary vendor sources like Amazon Web Services (AWS)\furl{https://www.elastic.co/guide/en/logstash/current/input-plugins.html}. \nomenclature{\m{A}mazon \m{W}eb \m{S}ervices}{AWS} \nomenclature{\m{A}pplication \m{P}rogramming \m{I}nterface}{API}\nomenclature{\m{H}yper\m{t}ext \m{T}ransport \m{P}rotocol}{HTTP}
Aside from aggregation, the topic of log creation is covered from host-based monitoring solutions like Icinga to application-centric approaches with e.g. StatsD embedded in the application source code\furl{https://thenewstack.io/collecting-metrics-using-statsd-a-standard-for-real-time-monitoring/}.
\subsection{Databases}
The key component for a log processing system is the storage.
While relational database management systems (RDBMS) \nomenclature{\m{R}elational \m{D}ata\m{b}ase \m{M}anagement \m{S}ystem}{RDBMS} can be suitable for small-scale solutions, the temporal order of events impose many pitfalls.
For instance, django-monit-collector\furl{https://github.com/nleng/django-monit-collector} as open alternative to the proprietary MMonit cloud service\furl{https://mmonit.com/monit/\#mmonit} assures temporal coherence through lists of timestamps and measurement values stored as JSON strings in a RDBMS. \nomenclature{\m{J}ava\m{s}cript \m{O}bject \m{N}otation}{JSON}
This strategy forces the RDBMS and the application to deal with growing amounts of data, as no temporal selection can be performed by the RDBMS itself.
During the evaluation in \cite{grossmann2017monitoring}, this phenomena rendered the browser-based visualization basically useless and impeded the access with statistical tools significantly.
Time Series Databases (TSDB) are specialized on chronological events.
One typical use is in monitoring, e.g. server health/usage statistics, or weather stations, like the example \autoref{img:rdd} shows.
This example utilizes one of the early TSDB systems, RDDtool\furl{https://oss.oetiker.ch/rrdtool/index.en.html}.
More recently, alternatives written in modern languages are popular, like InfluxDB\furl{https://www.influxdata.com/} on Go\furl{https://golang.org/} or Whisper on Python (from the Graphite software package).
\image{\textwidth}{mgroth}{Weather station plot with RDDtool \cite{RDD}}{img:rdd}
\nomenclature{\m{T}ime \m{S}eries \m{D}ata\m{b}ase}{TSDB}
\subsection{Frontend}
Frontends utilize the powerful query languages of the TSDB systems backing them.
Grafana e.g. provides customizable dashboards with graphing and mapping support \cite{komarek2017metric}.
Additional functionality can be added with plugins, e.g. for new data sources or dashboard panels with visualizations.
The query languages of the data sources is abstracted by an common user interface.
\section{Pedestrian traces}\label{sec:pedest}
Analyzing pedestrian movement based on GPS logs is an established technique.
In the following sections, \autoref{sssec:gps} provides an overview of GPS as data basis, \autoref{sssec:act} highlights some approaches to activity mining and \autoref{sssec:vis} showcases popular visualizations of tempo-spatial data.
\nomenclature{\m{G}lobal \m{P}ositioning \m{S}ystem}{GPS}
\subsection{Data basis: GPS}\label{sssec:gps}
Global navigation satellite systems (GNSS) like GPS, Galileo, GLONASS, or BeiDou are a source of positioning data for mobile users.
\nomenclature{\m{G}lobal \m{N}avigation \m{S}atellite \m{S}ystems}{GNSS}
\cite{van_der_Spek_2009} has shown that such signals provide a reliable service in many situations.
Additionally, tracks of these signals are a invaluable source of information for researching movements and movement patterns. \cite{Modsching:2008:1098-3058:31,nielsen2004gps,millonig2007monitoring}
Therefore, GNSS are suitable instruments for acquiring spatio-temporal data \cite{van_der_Spek_2009}.
However, \cite{Ranacher_2015} reminds of systematical overestimates by GPS due to interpolation errors.
To eliminate such biases of one system, \cite{Li2015} describes the combination of multiple GNSS for improved accuracy and reduced convergence time.
\subsection{Activity Mining}\label{sssec:act}
GPS (or GNSS) tracks generally only contain the raw tempo-spatial data (possibly accompanied by metadata like accuracy, visible satellites, etc.).
Any additional information needs either be logged separately or needs to be derived from the track data itself.
This activity mining allows e.g. the determination of the modes of transport used while creating the track \cite{Gong_2014}.
\cite{Gong_2015} shows the extraction of activity stop locations to identify locations where locomotion suspends for an activity in contrast to stops without activities.
Information of this kind are relevant e.g. for improvements for tourist management in popular destinations \cite{tourist_analysis2012,koshak2008analyzing,Modsching:2008:1098-3058:31}.
Beside points of interest (POIs), individual behaviour patterns can be mined from tracks, as described in \cite{ren2015mining}.
Post-processing of these patterns with machine learning enables predictions of future trajectories \cite{10.1007/978-3-642-23199-5_37}.
%TODO more??
\subsection{Visualization}\label{sssec:vis}
Visualizations help to understand data sets, especially for spatial data.
\subsubsection{Heatmap}
One of the most basic visualization of large amounts of spatial data is the heatmap.
As the example in \autoref{img:strava} shows, it allows to identify areas with high densities of data points very quickly.
This comes however with the loss of nearly all context information.
For example, the temporal information - both the time slice and the relative order of the data points - is completely absent.
A workaround is an external control element for such information to control the underlying data set.
\image{\textwidth}{../../PresTeX/images/strava}{Heatmap: Fitnesstracker \cite{strava}}{img:strava}
\subsubsection{Track attributes}
An example of a rendering methodology including more attributes, \cite{stopher2002gps} details the possibilities using cartographic signatures as seen in \autoref{img:track-attr}.
When track lines are used, there are some options to indicate attributes of the track, too.
Besides the color, e.g. the width and stroke-type of the line can indicate certain attributes.
A combination of these allows the visualization of multiple attributes at once.
However, such views are limited in the amount of tracks and attributes to display before been confusing and ambiguous.
\image{\textwidth}{track-attributes}{Track rendering with acceleration attributes \cite{stopher2002gps}}{img:track-attr}
\subsubsection{Space-time cube}
One way to address the lack of temporal context is the space-time cube concept reviewed in \cite{kraak2003space}.
By mapping an additional temporal axis as third dimension on a two-dimensional map, tracks can be rendered in a three-dimensional context.
The example in \autoref{img:spacetime} shows how such a rendering allows to identify individual movement patterns and locations of activity in between.
However, it also demonstrates the problems of the difficult interpretation of the 3D map, especially with overlapping tracks.
Beside from overcrowded ares, many people have difficulties of miss-interpreting the 3D movements.
The space flattened alternative on the right tries to reduce this problem with a spatial abstraction.
\image{\textwidth}{../../PresTeX/images/space-time}{Space-time cube examples \cite{bach2014review}}{img:spacetime}
An approach for an time-aware heatmap utilizing space-time cubes is shown in \autoref{img:spacetime2}.
This highlights hotspots of activity over an temporal axis.
\image{\textwidth}{space-time-density}{Space-time cube density examples \cite{demvsar2015analysis}}{img:spacetime2}
\subsubsection{Trajectory patterns and generalizations}
To simplify the visualization of large amounts of individual tracks, the derivation of patterns applying to the tracks allows to highlight key areas.
\autoref{img:traj-pattern} shows two examples of such patterns: Flock, where a group of tracks are aligned for some time, and meet, which defines an area of shared presence.
It is possible to apply such pattern time aware or time agnostic, i.e. whether to take the simultaneous appearance into account. \cite{jeung2011trajectory}
\image{\textwidth}{../../PresTeX/images/traj-pattern}{Flock and meet trajectory pattern \cite{jeung2011trajectory}}{img:traj-pattern}
An approach for addressing the generalization aspects necessary to visualize massive movement data is described in \cite{adrienko2011spatial}.
They work on traffic data as shown in \autoref{img:generalization}.
With an increasing generalization parameter, the flows refine to more abstract representations of travel.
\image{\textwidth}{../../PresTeX/images/generalization}{Trajectories and generalizations with varying radius parameter \cite{adrienko2011spatial}}{img:generalization}
\section{Analyzing games}\label{sec:gametheo}
Modern video games with always-on copy-protection or online master servers allow game studios to collect metrics about players' performances.
In \cite{Drachen2013}, the authors describe the use of GIS technologies for such environments.
For example, \autoref{img:chatlogs} shows a correlation between the frequency of certain keywords in the chat messages and the players' current location.
This indicates a possible bug in the game to look out for.
Not only technical problems, design errors or bad balancing can be visualized, too.
\autoref{img:ac3death} uses a heatmap to highlight areas with high failure rates during play-testing.
These failure hotspots points can then be addressed for a convenient game flow.
\image{\textwidth}{../../PresTeX/images/game-an}{Chat logs with players location \cite{Drachen2013}}{img:chatlogs}
\image{\textwidth}{../../PresTeX/images/ac3-death}{Identify critical sections \cite{Drachen2013}}{img:ac3death}
%\twofigures{0.5}{../../PresTeX/images/game-an}{Chat logs with players location}{img:chatlogs}{../../PresTeX/images/ac3-death}{Identify critical sections}{img:ac3death}{Game analytics \cite{Drachen2013}}{fig:gameanal}
In contrast to the complete virtual games above, \cite{AHLQVIST20181} describes the mining of spatial behaviour of players through an real-world base online game.
With an focus on replicating the real world, players have to align social and natural resources.
The results of these simulations can then be used to built agent-based simulations with realistic behaviour.

View File

@ -1,145 +1,57 @@
The following chapter \autoref{sec:logproc} takes a dive into the world of log processing frameworks and evaluates the feasibility of two such system for the scope of this thesis.
Based on the findings, an alternative approach is then outlined in \autoref{sec:alternative-design}.
\section{Evaluating log processing solutions}\label{sec:logproc}
This chapter looks into the possibilities of existing log processing solutions.
By example, Kibana with an Elastic Search backend and Grafana with an InfluxDB will be evaluated.
\subsection{Evaluating Kibana}
To evaluate whether Kibana is a viable approach for the given requirements, a test environment was built.
This setup with Docker, defined with Docker-compose, is documented in \autoref{app:kibana}.
Two sample data sets were loaded into the Elasticsearch container through HTTP POST requests. %: \texttt{curl -H 'Content-Type: application/x-ndjson' -XPOST 'elastic:9200/\_bulk?pretty' --data-binary @gamelog.json}.
Once Kibana was told which fields hold the spatial information, it was possible to have a first visualization on the workbench.
However, this view is optimized for the context of web log processing, so it has a rather low spatial resolution as shown in \autoref{img:kibana} and \autoref{img:kibana2}.
Dealing mostly with imprecise locations from GeoIP lookups and in respect of the web users` privacy this choice avoids false conclusions\footnote{GeoIP database providers can not always return qualified resolutions, instead rely on default locations, leading to bizarre events like \url{https://splinternews.com/how-an-internet-mapping-glitch-turned-a-random-kansas-f-1793856052}} and enforces privacy-by-default.
As an additional restraint to application in the geogame context, the query language restricts the possible research questions the solution can resolve.
This means only the questions expressible in the query language can be answered.
Additionally, this requires the users to master the query language before any reasonable conclusions can be extracted.
By building a custom plugin, extension, or modified version, it is possible to circumvent this obstacle.
However, the fast-paced environment of the industry either requires a constant effort of keeping pace, or results in an outdated system rather quickly. (E.g. the next major release Kibana v6.0.0\footnote{\url{https://github.com/elastic/kibana/releases/tag/v6.0.0}} was released about a year after Kibana v5.0.0\footnote{\url{https://github.com/elastic/kibana/releases/tag/v5.0.0}}. However, the previous major version seems to receive updates for about an year, too.)
\image{\textwidth}{../../PresTeX/images/kibana}{Game trace in Kibana}{img:kibana}
\image{\textwidth}{../../PresTeX/images/kibana2}{Game trace in Kibana}{img:kibana2}
\subsection{Evaluation Grafana}
Grafana is a solution to analyze, explore, and visualize various source of time series data.
There exist plugins for nearly any storage and collection backend for metrics\furl{https://grafana.com/plugins?type=datasource}.
The different backends are available through a unified user interface shown in \autoref{img:grafana}.
Spatial resolution suffers under similar conditions compared to Kibana.
\autoref{img:kibana} shows by example the restrictions by the query language/query editing interfaces in the domain of weather stations.
\image{\textwidth}{grafana-metrics}{Configuring a graph in Grafana}{img:grafana}
\subsection{Conclusion}
This chapter once again instantiates the phrase "spatial is special" \cite{spatialspecial}:
After all, the monitoring solutions are no perfect match for this special - spatial - use case.
The privacy concerns vital in web monitoring prohibit detailed spatial analyzes, the query languages can restrict some questions, and custom extensions require constant integration effort.
Regarding the specified use cases, especially the non-expert users benefit from a simple to use interface.
The default Kibana workbench does not qualify for this, a custom interface could improve the situation.
Grafana does have support for shared dashboards with a fixed set of data, however precise spatial support is still lacking.
A third party plugin recently does provide such support\furl{https://github.com/CitiLogics/citilogics-geoloop-panel}, unfortunately it missed the timeframe during the evaluation of grafana for this thesis.
Such a plugin would still be a possibly fragile component given the fast pace of web development shown by these kind of projects.
\section{Developing a modular architectural design}\label{sec:alternative-design}
While the development of a custom stack requires a lot of infrastructural work to get the project running, the learnings above give points to build a custom solution as a feasible alternative:
\section{modulare auswahl von metriken}
\begin{itemize}
\item Developing from bottom-up takes less time than diving into complex turn-key monitoring solutions.
\item With rather limited amounts of data\footnote{From a sample of 436 game logs from BioDiv2go, an average log file is 800 kB in size, with a median of 702 kB}, scalable solutions are no hard requirement
\item No core dependencies on fast-paced projects
\item Interfaces tailored on requirements: Simple web interface for non-expert users, CLI and API for researchers with unrestricted possibilities.
\item A focus on key points allows simple, easily extendable interfaces and implementations.
\item Reducing the complexity to an overseeable level, the processes and results can be verified for accuracy and reliability.
\item base set of analytic functions
\item extendable for special cases
\item
\end{itemize}
With the requirements from \autoref{sec:require} and the learnings from log processing evaluations in mind, a modular processing pipeline depicted in \autoref{img:flowchart} allows for a configurable solution.
It comprises the stages of input, analysis and rendering.
With interfaces defined between the stages, this approach allows the exchange of single modules without affecting the remaining pipeline.
\image{.75\textwidth}{flowchart.pdf}{Modular processing pipeline}{img:flowchart}
\subsection{Overview}
An architectural approach surrounding the processing pipeline is visualized in \autoref{img:solution}.
It outlines three main components of the project: Two user facing services (Web \& CLI / API), and an analysis framework.
The interfaces (Web and CLI/API) for both target groups (see \autoref{sec:require}) are completely dependent on the analysis framework at the core.
\image{.75\textwidth}{solution.pdf}{Architecture approach}{img:solution}
The following sections describe each of those components.
\subsection{Analysis Framework}
The analysis framework takes game logs, processes their entries, collects results, and renders them to an output.
With a Map-Reduce pattern as basic structure for the data flow, an ordered collection of analyzing, matching postprocessing and render operations defines an analysis run.
\autoref{img:flow} shows the data flows through the framework.
Every processed log file has its own chain of analyzer instances.
The log entries are fed sequentially into the analysis chain.
\image{\textwidth}{map-reduce.pdf}{Data flows}{img:flow}
\subsubsection{Analyzer}
An Analyzer takes one log entry at a time and processes it.
With dynamic selectors stored in settings, Analyzers can be used on multiple game types.
For specific needs, Analyzers can tailored to a specific game, too.
While processing, the Analyzer can choose to read, manipulate, or consume the log entry.
\paragraph{Reading a log entry}
Every Analyzer can read all of the log entry's contents.
This is obviously the core of the whole framework, as it is the only way to gain knowledge from the log.
Information can be stored in the Analyzer's instance until the log file was processed completely.
\paragraph{Manipulating a log entry}
Every Analyzer can manipulate a log entry.
This can be adding new information, modifying existing information, or deleting information.
\paragraph{Consuming a log entry}
Every Analyzer can consume a log entry.
A consumed log entry is not passed down the analysis chain anymore.
This can be useful to filter verbose logs before computationally expensive operations.
\subsubsection{Result}
When all entries of a game log have been processed, the results of each analyzer are collected.
Each result is linked to the analyzer which has produced this artifact to avoid ambiguous data sets.
The results are stored in a ResultStore.
To support arbitrary structures, a category factory can be specified.
In this case, special analyzers can introduce categories as needed before storing their result.
The newly created category will then be used to store consecutive Analyzer results until another category is introduced.
\subsubsection{Postprocessing \& Render}
When all game logs are processed, the whole result store is passed into the postprocessing step.
This is the first step to compare multiple game logs, i.e. the results of the analyzed game logs, directly.
Postprocessing is a hard requirement for rendering the results, as at least a transformation into the desired output format is absolutely necessary.
Rendering is not restricted to visualizations, artifacts of all kind can be produced.
A whole range from static plots and CSV exports to structured JSON data for interactive map visualizations or text generation is possible.
\subsubsection{Log parser}
Key to the framework described above is a component to import game log data, parse it, and prepare it to a common format for processing.
This needs to be adapted for each supported game.
It has to know where game logs are stored and how they can be accessed.
Configurable items like URLs and user credentials allow e.g. for different game servers.
The important step is the parsing of game logs from the formats used by the games (e.g. JSON, XML, plain text, database, …) to a common format used internally.
\subsection{Web Interface}
The web interface is rather straightforward:
Expert users prepare a set of analysis methods and bundle them with suitable rendering targets to an analysis suite.
Non-expert users select game logs for processing, choose a prepared analysis suit, and receive a rendered result once the analysis process has finished.
\subsection{CLI/API Interface}
Providing direct access to analysis and render classes, the CLI/API interface offers the most powerful way to explore the log data.
By implementing custom algorithms, expert users can cope with difficult input formats and special requirements.
Splitting often used analysis functionality into small, universal Analyzers, compositing Analyzers into a queue may be sufficient to achieve some information desires.
\section{spiel unabhängig}
\begin{itemize}
\item log importer/transformer necessary
\end{itemize}
\subsection{Architecture}
The API is designed to be standalone, i.e. it is independent of both game servers and user interfaces.
Separation from game servers narrows the scope, and allows the usage with any kind of game.
Games without central server can provide a mocked server to supply logged data, while games with server can e.g. expose API endpoints with authentication and user management.
By acting like any normal client, the framework can avoid obstacles like CORS/XSS prevention.
\section{log processing taugt nich}
\begin{itemize}
\item powerful timeseries database
\item complex setup
\item fast paced environment
\item low spatial resolution => privacy optimized
\end{itemize}
The independence to user interfaces, mainly the web interface, allows scalability through load-balancing with multiple API workers.
Expert users with special requirements can embed the framework in projects without pulling in large amounts of dependencies for user interfaces or games/game servers.
\section{architekturmodell}
\begin{itemize}
\item map-reduce ähnlich
\begin{enumerate}
\item input transformation
\item analysis/extension (looping)
\item render (image,csv,json,…)
\end{enumerate}
\image{.75\textwidth}{flowchart}{arch dlowcahrt}{img:flowchart}
\item standalone (indep. of any game)
\item own client for game server (due to CORS/XSS prevention prohibiting shared use of game server assets in other host)
\item API for integration
\item allow load distribution
\end{itemize}
====
\begin{itemize}
\item map-reduce like
\item map: analyse-läufe
\begin{itemize}
\item log-eintrags basiert
\item reichern log an
\item filtern log
\item metriken über komplette log => extern gespeichert
\item => evtl. Abhängigkeiten in Reihenfolge
\item basis-funktionalität || spiel-bezogen
\end{itemize}
\item reduce: Operieren mit Ergebnissen (über alle verarbeiteten logs hinweg)
\begin{itemize}
\item render
\item verteilung, …
\end{itemize}
\end{itemize}

View File

@ -1,260 +0,0 @@
Based on the findings in \autoref{sec:solution}, an implementation with Python was realized.
The following sections describe the structure and service composition utilized to fulfill the requirements.
\section{Code structure}
There are four packages forming the Analysis Framework project:
\begin{itemize}
\item analysis: Core analysis functionality, including log parsing, analysis, postprocessing and rendering
\item clients: Connection classes to game servers to retrieve log files and game configurations
\item selector: Web interface for non-expert users
\item tasks: Definition of asynchronous tasks
\end{itemize}
The analysis and clients packages are described in \autoref{sec:analysisframework}, while \autoref{sec:web} features selector and tasks packages.
\image{.7\textwidth}{packages}{Project package overview}{img:packages}
\subsection{Analysis Framework}\label{sec:analysisframework}
The internal structure of the analysis package is shown in \autoref{img:pack-analysis}.
Besides the sub-packages for analysing work (analyzers: \autoref{sec:analysiswork}) and log parsing (loaders: \autoref{sec:loaders}), it contains helper functionalities and finally the Python module \texttt{log\_analyzer} as entry point for researches experimenting and outline of the intended workflow.
\image{.7\textwidth}{packages-analysis}{analysis package overview}{img:pack-analysis}
\subsubsection{Log parsing}\label{sec:loaders}
Outlined in \autoref{img:pack-loader}, the parsing of log files into an internal structure happens here.
\image{.7\textwidth}{packages-loader}{loader package overview}{img:pack-loader}
\paragraph{The loader module} holds the definition of the abstract base class \texttt{Loader}.
It has two unimplemented methods: \texttt{load} and \texttt{get\_entry}.
While the first is issued with an filename as argument to load a log file, the second it then called repeatedly to retrieve a single log for the analysis steps.
Processing stops when all log entries have been passed from this method.
The module also defines a showcase implementation loading a JSON file and \texttt{yield}ing it's items.
\paragraph{Biogames} is for the log files of Biodiv2go, a composite approach was used: The games' log files come as ZIP archive with an SQLite database and possibly media files.
The \texttt{SQLiteLoader} contains the logic to handle a plain SQLite file according to the definition of the \texttt{Loader} from above.
By extending this class, \texttt{ZipSQLiteLoader} focuses on unzipping the archive and creating a temporary storage location, leaving interpretation of the data to its super class.
This avoids code duplication and, with little amount of tweaking, would present a generic way to handle SQLite database files.
\paragraph{Neocart(ographer)}
is the evaluation step described in \autoref{sec:eval}.
This \texttt{Loader} deals with some seriously broken XML files.
\paragraph{Module settings} are stored in the \texttt{\_\_init\_\_} module.
This is mainly a mapping to allow references to \texttt{Loader}s in the JSON files for configuration (see \autoref{sec:settings}).
\subsubsection{Analysis Work package}\label{sec:analysiswork}
\autoref{img:pack-analyzers} shows the sub-packages of \texttt{analysis.analyzers}.
There are sub-packages for doing the actual analysis work, as well as for the postprocess and rendering step.
Additional the \texttt{settings} module defines the LogSettings class.
\image{.7\textwidth}{packages-analysis-analyzers}{analysis.analyzers package overview}{img:pack-analyzers}
\paragraph{LogSettings}\label{sec:settings}
This class holds the configuration for an analysis run:
\begin{itemize}
\item The type of the log parser to use
\item Information about the structure of the parsed log files, e.g.
\begin{itemize}
\item What is the key of the field to derive the type of the log entry?
\item What value does this field hold, when there is spatial information?
\item What value does indicate game actions?
\item What is the path to obtain spatial information from an spatial entry?
\end{itemize}
\item The analysis setup:
\begin{itemize}
\item Which analyzers to use,
\item and the order to apply them
\end{itemize}
\item Variable data to configure the source (see \autoref{sec:source}).
\item Rendering methods to apply to the result set
\end{itemize}
The settings are stored as JSON files, and parsed by runtime into a \texttt{LogSetting} object (see \autoref{img:oebkml} for a sample JSON settings file).
The helper functions in \texttt{analysis.util} provide a very basic implementation of an query language for Python dictionaries:
A dot-separated string defines the path to take through the dictionary, providing basically syntactic sugar to avoid lines like \texttt{entry["instance"]["config"]["@id"]}.
As this proves quite difficult to configure using JSON, the path-string \texttt{"instance.config.@id"} is much more deserialization friendly.
\paragraph{The Analyzer package} defines the work classes to extract information from log entries.
The packages' init-module defines the Result and ResultStore classes, as well as the abstract base class for the Analyzers.
As shown in \autoref{code:analyzer}, this base class provides the basic mechanics to access the settings.
The core feature of this project is condensed in the method stub \texttt{process}.
It is fed with an parsed entry from \autoref{sec:loaders}, processes it, possibly updates the internal state of the class, and the can decide to end the processing of the particular log entry or continue to feed down into the remainder of the analysis chain.
When all log entries of a log file are processed, the \texttt{result} method returns the findings of this analysis instance (see \autoref{par:result}).
\lstinputlisting[language=python,caption={Analyzer base class},label=code:analyzer]{code/analyzer.py}
There are 23 classes implementing analysis functionality, partitioned into modules for generic use, Biodiv2go analysis, and filtering purposes.
The settings provided by the base class include access to the client connecting to the game's server.
This allows the Analyzers to fetch additional data like game configurations or media files.
The ActivityMapper analyzer already makes use of that.
In a similar fashion it is possible to load other external data like questionnaire spreadsheets.
\paragraph{Results}\label{par:result} are stored in a \texttt{Result} object (\texttt{analysis.analyzers.analyzer.\_\_init\_\_}).
This class keeps track of the origin of the resulting data to allow filtering for results by arbitrary analyzing classes.
As \autoref{code:analyzer} shows, the \texttt{Result}s are stored in a \texttt{ResultStore}.
This store - defined next to the \texttt{Result} class - provides means to structure the results by arbitrary measures.
By passing the store's reference into the analyzers, any analyzer can introduce categorization measures.
This allows for example to distinguish several log files by name, or to combine log files and merge the results by events happening during the games' progress.
With an default of an dictionary of lists, the API supports a callable factory for arbitrary use.
\paragraph{Rendering of the Results} is done in the \texttt{render} package.
Similar to the Analyzers' package, the render package defines its common base class in the initialization module, as shown in \autoref{code:render}.
It provides implementer means to filter the result set to relevant analysis types through the \texttt{filter} methods.
Of course, the implementation of the rendering method is left open.
\lstinputlisting[language=python,caption={Render base class},label=code:render]{code/render.py}
There are 18 implementations, again split ted into generic and game-specific ones.
The most generic renderers just dump the results into JSON files or echo them to the console.
A more advanced implementation relies on the \texttt{LocationAnalyzer} and creates a KML file with a track animation (example: \autoref{img:oebge}).
Finally, e.g. \texttt{biogames.SimulationGroupRender} performs postprocessing steps on a collection of \texttt{biogames.SimulationOrderAnalyzer} results by creating a graph\furl{https://networkx.github.io/} rendered with matplotlib\furl{https://matplotlib.org/} to discover simulation retries (example: \autoref{img:retries}).
\subsection{Sources}\label{sec:source} of log files are clients connecting either to game servers directly or other log providers.
There is currently a bias towards HTTP clients, as REST APIs are todays go-to default.
To acknowledge this bias, the HTTP oriented base class is not defined at package level.
The \texttt{Client} originates from the \texttt{client.webclients} package instead.
It contains some convenience wrappers to add cookies, headers and URL-completion to HTTP calls as well as handling file downloads.
The two implementing classes are designed for Biodiv2go and a Geogames-Team log provider.
Using a REST API, the \texttt{Biogames} client integrates seamlessly into the authentication and authorization of the game server.
The client acts as proxy for users to avoid issues with cross-origin scripting (XSS) or resource Sharing (CORS).
The Geogames-Team's geogames like Neocartographer write game logs to files and only have a server running during the active game.
Therefore, an additional log providing server was created to allow access to the log files (see also: \autoref{sec:ggt-server}).
Clients can have arbitrary amounts of options, as all fields in the JSON settings file are passed through (see \autoref{img:oebkml}, section "source").
\subsection{Web Interface for prepared results}\label{sec:web}
The selector package holds a Flask\furl{http://flask.pocoo.org/} app for an web interface for non-expert users.
It utilizes the provided clients (see \autoref{sec:source}) for authentication, and gives users the following options:
\begin{itemize}
\item Exploring available game logs
\item Configuring a new analysis run
\item View analysis run status
\item View analysis run results
\end{itemize}
The web interface offers all available clients for the user to choose from.
With user provided credentials, the server retrieves the available game logs and offers them, together with the predefined analysis options, to create an new analysis run.
When an analysis run is requested, the server issues a new task to be executed (see \autoref{sec:tasks}).
An overview page lists the status of the tasks from the given user, and provides access to the results once the task is finished.
When problems occur, the status page informs the user, too.
As Flask does not recommend serving static files trough itself, a Nginx HTTP server\furl{https://www.nginx.com/} is configured to serve the result files.
\subsubsection{User workflow}
The index page of the web UI features a login form.
It offers a selection for the different configured game backends (see \autoref{img:webindex}).
While a failed login stays at the index, a successful attempt redirects the user to the result overview (see \autoref{img:webresults}).
Here, the both the results of completed analysis runs and the status of scheduled and running jobs are visible.
For finished runs, there are links to the result artifacts.
The link \emph{create new analysis} leads to the configuration menu for new analysis runs (see \autoref{img:webcreate}).
It lists the game logs available for the logged in user, and offers a selection of the predefined analysis configurations.
With a given name, it is easy to identify the results for each analysis run in the result overview page.
\subsection{Result interface}
Accompanying the Web interface above is the result interface.
Here, results of the analysis runs issued in the Web interface are displayed to the users.
\autoref{img:trackfi} shows a result by example: The combination of spatial positions of players and the screen activity.
\image{\textwidth}{../../PresTeX/images/track-fi}{ActivityMapper: Combined screen activity and spatial progress}{img:trackfi}
\subsection{Task definition}\label{sec:tasks} in the \texttt{package} provides tasks available for execution.
This package is the interface for celery\furl{http://www.celeryproject.org/} workers and issuers.
The key point is the task \texttt{analyze} to start new analysis runs.
When a new task is scheduled, the issuer puts a task in the Redis DB\furl{https://redis.io/}.
A free worker node claims the task and executes it.
During the runtime, status updates are stored in the Redis Db to inform the issuer about progress, failures and results artifacts.
\section{Services \& Service composition}
Following the implementation above, the following services are necessary:
\begin{itemize}
\item Analysis framework: Celery
\item User interface: Flask
\item Result server: Nginx
\item Connection Flask - Celery: Redis
\item Public frontend: Traefik (external)
\end{itemize}
Two additional services were used, one for a local BioDiv2Go server, one as log provider for the Neocartographer logs.
The services are managed using Docker\furl{https://www.docker.com/}.
This provides a clear ground for development as well as a easily integrable solution.
Although docker as technology may be a current hype, the build scripts in human readable format provide documentation about dependencies and installation steps if necessary.
\subsection{Background worker: Celery}\label{sec:srv-celery}
The Celery worker process provides the tasks defined in \autoref{sec:tasks}.
Therefore, it requires all the analysis tools, access to the game log data, and access to a storage location to store results.
Additionally, a connection to the Redis DB for the job queue is required.
Access to Redis and to game log providers is granted via a docker network, a storage is mounted with a writable docker volume.
\subsection{User interface: Flask}
The user interface needs to be available to the public, and needs to be attached to the Redis DB to append analysis jobs to the job queue.
In order to use the celery API, it too has to include the whole analysis project.
Therefore it is appropriate to use a single docker image for both the celery and the flask container.
Although it would be possible to use separate images without much overhead in disk space\footnote{
Docker saves each step defined in the Dockerfile as layer.
Using such a layer as basis for another image allows to ship additions with only the difference layer.
Unfortunately, each additional layer consumes more space, and optimizations like removal of build-time requirements may lead to increased runtime overhead when building then images.
},
this reuse with less dependencies helps to keep development on track.
The image itself is rather straightforward.
With an Alpine Linux\furl{https://alpinelinux.org/} image as basis, build-time and runtime dependencies are installed with Alpine's packet management system.
Then the Python libraries are installed using pip, and the build-time requirements are cleared.
To reduce the size of the image, once these steps are working they are combined into a single layer.
Using docker labels, the container is flagged to be exposed using Traefik (see \autoref{sec:srv-traefik}).
\subsection{Result server: Nginx}
To serve the static result files, a simple HTTP server is required.
With its low footprint on memory, storage and CPU, Nginx is a suitable solution.
Equipped with a data volume, again labels mark this container to be exposed.
\subsection{Database: Redis}
Redis is one of the recommended backend storages for celery.
It was chosen due to the simple integration into this environment.
Running in the docker network, the only configuration is the volume for persisting the data across service and system restarts.
\subsection{Geogame Log file provider}\label{sec:ggt-server}
To provide an HTTP interface for geogames without a permanent game server, this service does not need to be public.
With an already integrated HTTP server running Nginx, it is obvious to reuse this image, too.
This service, however does need a little configuration:
To avoid parsing HTML index sites or generating metadata indices, the autoindex feature of Nginx is used.
With the format option\furl{http://nginx.org/en/docs/http/ngx_http_autoindex_module.html\#autoindex_format}, this delivers JSON data instead of HTML, leading to a much more pleasant client.
\subsection{BioDiv2Go Server}
To integrate nicely into the project and the development machines used during this thesis, the BioDiv2Go server was packaged into docker containers, too (see \autoref{app:biogames}).
\subsection{Frontend \& Reverse Proxy: Traefik}\label{sec:srv-traefik}
Traefik\furl{https://traefik.io/} is a reverse proxy.
It offers integration in service orchestration systems like Docker, Swarm, Kubernetes.
With few lines of configuration, it detects new services automatically, and can create appropriate SSL/TLS certificates on the fly via Let's encrypt.
Here, it is configured to watch docker containers, and create forwarding rules for those marked with docker labels.
For fine-grained control, the creation of default forwards, is disabled, so only explicitly marked containers are subject to this automatic proxy.
The label \texttt{traefik.enable=true} enables Traefik's reverse proxy pipeline for this container, while \texttt{traefik.port=8080} documents the port where the container exposes its service.
The proxy rule to forward traffic to this container is configured with \texttt{traefik.frontend.rule= Host:select.ma.potato.kinf.wiai.uni-bamberg.de}.
Here Traefik supports a wide range of options\furl{https://docs.traefik.io/basics/\#frontends}, including grouping by any or all semantics with multiple rules.
For the purposes of this project, a wildcard domain record was used for the development machine, so each service can be accessible with an own subdomain.
See also for an example configuration: \autoref{app:traefik}.
\subsection{Service composition and management}
\autoref{img:arch} shows the integration of the above described services into one solution.
This structure is fixed in a Docker-compose\furl{https://docs.docker.com/compose/} setup (see \autoref{code:gglap}).
The advantage of docker-compose is the definition of all images, volumes and networks in a single file.
When a scenario with high load occurs, this definition allows for simple scaling.
To create more celery worker nodes, issuing the command \textit{docker-compose scale worker=8} suffices to create 8 worker containers running in parallel.
\image{.75\textwidth}{architecture.pdf}{Service composition overview}{img:arch}

View File

@ -1,100 +0,0 @@
\section{Methodology}
BioDiv2Go's Geogame2 (FindeVielfalt Simulation) was base case during the development of the analysis stack.
It was chosen due to its well defined REST API, including log retrieval and user authentication.
This section shows how the framework copes with the integration of another game with completely different architecture and log style.
\subsection{Choosing an additional game}
\autoref{tab:logs2} and \ref{tab:logs3} show an overview of the log files of the different games available.
The game with the highest amount of available log files is Neocartographer.
Neocartographer saves its log files as GPX track.
Additional game states are embedded into the event tag of some of the GPX track-points.
A first overlook yields some GPX files with few bytes, just an GPX header with few Trackpoints and no game actions at all.
However, compared to the other games it has a comprehensible log structure and even with some empty logs there should be a reasonable number of usable game logs.
\begin{longtable}[H]{ccp{0.6\textwidth}}
Geogame & Log files & Notes \\
\hline
BioDiv2Go & $\approx430$ & SQLite database with JSON log entries, references to game config; import base case\\
GeoTicTacToe & $\approx13$ & CSV with pipes; no temporal data; events + tracks\\
\caption{Geogame clients log data}
\label{tab:logs2}
\end{longtable}
\begin{longtable}[H]{ccp{0.6\textwidth}}
Geogame & Log files & Notes \\
\hline
GeoTicTacToe & $\approx2$ & intermediate log format\\
GeoTTT & $\approx130$ & fragmented structure: incomplete or fragmented\\
Neocartographer\furl{http://www.geogames-team.org/?p=23} & $\approx400$ & Partly broken GPX: missing description information; one GPX file per player\\
MissingLink & $\approx6$ & Partly broken GPX: missing spatial information; one GPX file per player\\
Equilibrium\furl{http://www.geogames-team.org/?p=148} & $\approx40$ & GPX with missing end tag\\
\caption{Geogame servers log data}
\label{tab:logs3}
\end{longtable}
The following section \autoref{sec:neocart} describes the integration efforts for Neocartographer.
\section{Integration of Neocartographer}\label{sec:neocart}
\subsection{Neocartographer Game Log Files}
The log files are grouped by folders and contain the GPX tracks and media, mainly photos (see \autoref{img:nclog}).
Many Neocartographer GPX files have invalid XML markup, as \autoref{tab:xml} show.
\begin{longtable}[H]{rl}
Geogame & Log files \\
\hline
missing attribute space & <desc><event message="leaveObject"geoid="9"/></desc>\\
unclosed tag & <desc><event </desc>\\
missing attribute name & <trkpt lat="48.3689110.897709">\\
invalid attribute values & <trkpt lat="UNKNOWN" lon="UNKNOWN">\\
\caption{Neocartographer GPX log error types}
\label{tab:xml}
\end{longtable}
The first two error types (missing separation between two attributes and unclosed tags) are syntactic XML errors.
With the lxml\furl{http://lxml.de/} recovery parser\footnote{\texttt{lxml.etree.XMLParser(recover=True)}} the unclosed tag error is suppressed without further data loss\footnote{With an empty event tag, the data is obviously still missing}.
In the missing attribute separation case, the recovery parser parses only the first attribute properly.
Any additional attributes are stored in the \texttt{tail} field of the XML element's object as raw string.
With string manipulation, the \texttt{geoid} attribute can be restored\footnote{In the data probe, this error occurred only with the \texttt{geoid} attribute}.
The other two errors lead to data corruption, as both cases fail to qualify to valid latitude/longitude pairs.
With the assumption of a two-digit longitude\footnote{The names and other valid longitudes suggest the location of the game field in the eastern part of Bavaria (Augsburg, Bamberg)}, the correct value can be restored through string parsing from the offset of the second decimal separator.
Good practice requires the parser to issue a loud warning to indicate possible errors here.
The last error type occurs with nearly all first and second entries.
They contain the players' \emph{join} and \emph{start} events, when there is no position fix available, yet.
Currently these log entries are discarded with an accompanying log message.
A possible improvement would be the to keep a reference to these entries, and add the first appearing valid location entry.
\subsection{Log Retrieval}
As there is only a playtime server, the files are stored on the file system of the server.
Therefore, an Nginx HTTP server was configured to serve folder indices formatted as JSON (see \autoref{sec:ggt-server}).
This allows the retrieval of the log files in a clean manner by the frameworks loaders.
An additional client implementation in the framework (see \autoref{sec:source}) converts the JSON index to the structure used internally and uses the given functionality to handle file downloads.
\subsection{Analysis Functionality}
Using the \texttt{LocationAnalyzer} in combination with a \texttt{KMLRender} renderer, the analysis of log files was successful on the first run.
\subsection{UI Integration}
The game selection on the login page (see \autoref{img:webindex}) uses a dictionary in the \texttt{clients} package.
With the client implementation is registered with a name, it is automatically added to the selection box.
The URL to the log providing service has to be configured in the \texttt{selector} flask application, too.
\section{Conclusion}
While the implementation of a new client to download log files was straightforward, the parsing of these files proved quite difficult.
However, it was not the integration into the framework but the syntactical errors in the log files that was hard.
While the BioDiv2Go parser requires less than 20 lines of code, the newly written parser scratches the 60 line mark with all the error handling code (see \autoref{code:bd2l} and \ref{code:ncl}).
Once this obstacle is passed, the integration is nearly seamless.
As further challenge proved - like with BioDiv2Go - the understanding of the structure of the log, i.e. deriving the games' internal state machine.
On the down side, the addition of lxml has increased the size of the complete image by nearly 40 MB, breaking the 300 MB mark (see \autoref{img:image-size}).
Naturally this is a rather superficial price to pay for improved functionality.
\image{\textwidth}{image-size}{Increased image size with Lxml}{img:image-size}

View File

@ -1,31 +0,0 @@
\section{Review}
As shown in \autoref{sec:eval}, the proposed framework (see \autoref{sec:solution}) and its implementation (see \autoref{sec:implementation}) deliver what \autoref{sec:scope} asked for regarding the portability aspect.
\subsection{Modular framework}
Given the lean framework core, the development of new analyzers and rendering target is encouraged.
This is backed by the focus on a standalone application instead of extensions to log processing systems struggling with spatial data in the required resolution.
As experienced in \autoref{sec:eval}, a change in the import stage of the processing pipeline is completely unnoticed in the other parts.
The same is true for the addition or modification of analyzering or rendering functionality.
\subsection{Web UI}
With the web interface depicted in \autoref{app:webif}, it is possible for non-expert users to generate pre-defined reports, while researchers can dive into the API of the framework either as preprocessing step or integrated into a larger project.
The web ui also gives direct access to the results for the non-expert users.
\subsection{Results}
Th selection of rendered results in \autoref{img:oebkml}, \ref{img:oebge}, \ref{img:retries}, \ref{img:trackfi}, \ref{img:time} showcases the already possible descriptive analysis capabilities.
\autoref{img:trackfi} features a map view accessible through a browser, which aligns the active screen content of the mobile device with the spatial track.
Selecting the preview image in the timeline below the map, a marker shows the position where this content entered the screen.
\autoref{img:speed} is based on the same analysis result used for \autoref{img:trackfi} with additional postprocessing in between.
This aggregates the speeds of all selected game sessions and calculates the average.
With a baseline reference of 1 kph speed, this plot can hint at game field design issues, e.g. overly long walking passages at high speed.
\section{Outlook}
Considering the future, there are many analysis and rendering targets to come.
As with any kind of software, sooner or later the point may be reached where the proposed architecture fails.
Given the positive result of the integration evaluation, this sure seems pessimistic.
The framework shows some self-debugging feature: Once a log parser is working, analyzers can help to find patterns of log messages to outline the logs structure.
Especially the spatial generalizations presented in \cite{adrienko2011spatial} or the overestimating error accumulation effect of GPS described in \cite{Ranacher_2015} compared to simplified tracks (e.g. following the methodology of \cite{Chen2009TrajectorySM}) are analyses the author looks forward to give a try.

View File

@ -1,42 +0,0 @@
\section{Examples}
\subsection{Configuration}
\image{\textwidth}{../../PresTeX/images/oeb-kml}{Analyzer configuration}{img:oebkml}
\subsection{Results}
\image{\textwidth}{../../PresTeX/images/oeb-ge}{Result visualized}{img:oebge}
\image{\textwidth}{../../PresTeX/images/simu-retries}{Experimentational rounds}{img:retries}
\image{\textwidth}{../../PresTeX/images/speed}{Aggregated speed distribution of four game fields}{img:speed}
\image{\textwidth}{../../PresTeX/images/time-rel}{Time distribution of game sessions overview of four game fields}{img:time}
\section{Containers}
\subsection{Kibana test setup} \label{app:kibana}
\lstinputlisting[language=yaml,caption={Docker-compose file for Kibana test setup},label=code:kibana,numbers=left]{code/kibana-docker-compose.yml}
\subsection{Biogames server dockerized} \label{app:biogames}
\image{.75\textwidth}{biogames.pdf}{Dockerized setup for biogames}{img:bd2gdocker}
\lstinputlisting[language=yaml,caption={Docker-compose file for Biogames server},label=code:bd2s,numbers=left]{code/biogames/docker-compose.yml}
\lstinputlisting[language=yaml,caption={Dockerfile for Biogames server},label=code:bd2d,numbers=left]{code/biogames/Dockerfile}
\lstinputlisting[language=bash,caption={Entrypoint for Biogames docker container},label=code:bd2e,numbers=left]{code/biogames/start.sh}
\subsection{Traefik reverse proxy}\label{app:traefik}
\lstinputlisting[language=yaml,caption={Docker-compose file for Traefik reverse proxy},label=code:bd2t,numbers=left]{code/traefik.yml}
\lstinputlisting[language=yaml,caption={Traefik reverse proxy configuration (traefik.toml)},label=code:bd2toml,numbers=left]{code/traefik.toml}
\subsection{Geogame Log Analysis project setup}\label{app:dcs}
\lstinputlisting[language=yaml,caption={Docker-compose file for Geogame Log Analysis project},label=code:gglap,numbers=left]{code/project.yml}
\section{Loader implementations}
\lstinputlisting[language=python,caption={Log loader for BioDiv2Go},label=code:bd2l,numbers=left]{code/biogames.py}
\lstinputlisting[language=python,caption={Log loader for Neocartographer},label=code:ncl,numbers=left]{code/neocart.py}
\image{\textwidth}{nclog}{Neocartographer game log}{img:nclog}
\section{Web interface}\label{app:webif}
\subsection{Workflow}
\image{\textwidth}{webgui}{Web workflow}{img:webflow}
\subsection{Samples}
\image{\textwidth}{webgui/index}{Web login}{img:webindex}
\image{\textwidth}{webgui/results}{Analysis result overview}{img:webresults}
\image{\textwidth}{webgui/create}{Configuration for a new analysis run}{img:webcreate}
\image{\textwidth}{webgui/status}{Raw JSON status data}{img:webstatus}
The status page (\autoref{img:webstatus}) is not linked anywhere in the UI directly.

View File

@ -487,95 +487,3 @@ keywords = "Games, Agent based models, Simulations, Analytics"
year={2011},
publisher={Springer}
}
@inproceedings{grossmann2017monitoring,
title={Monitoring Container Services at the Network Edge},
author={Gro{\ss}mann, Marcel and Klug, Clemens},
booktitle={Teletraffic Congress (ITC 29), 2017 29th International},
volume={1},
pages={130--133},
year={2017},
organization={IEEE}
}
@misc{RDD,
title={{RDD galley example}},
year={2011},
month={7},
url={https://oss.oetiker.ch/rrdtool/gallery/index.en.html}
}
@Article{Li2015,
author="Li, Xingxing
and Ge, Maorong
and Dai, Xiaolei
and Ren, Xiaodong
and Fritsche, Mathias
and Wickert, Jens
and Schuh, Harald",
title="Accuracy and reliability of multi-GNSS real-time precise positioning: GPS, GLONASS, BeiDou, and Galileo",
journal="Journal of Geodesy",
year="2015",
month="Jun",
day="01",
volume="89",
number="6",
pages="607--635",
issn="1432-1394",
doi="10.1007/s00190-015-0802-8",
url="https://doi.org/10.1007/s00190-015-0802-8"
}
@InProceedings{10.1007/978-3-642-23199-5_37,
author="Chen, Chun-Sheng
and Eick, Christoph F.
and Rizk, Nouhad J.",
editor="Perner, Petra",
title="Mining Spatial Trajectories Using Non-parametric Density Functions",
booktitle="Machine Learning and Data Mining in Pattern Recognition",
year="2011",
publisher="Springer Berlin Heidelberg",
address="Berlin, Heidelberg",
pages="496--510",
isbn="978-3-642-23199-5"
}
@article{spatialspecial,
title={What is Special About Spatial Data? Alternative Perspectives on Spatial Data Analysis (89-4)},
author={Anselin, Luc},
year={1989}
}
@inproceedings{kraak2003space,
title={The space-time cube revisited from a geovisualization perspective},
author={Kraak, Menno-Jan},
booktitle={Proc. 21st International Cartographic Conference},
pages={1988--1996},
year={2003}
}
@article{demvsar2015analysis,
title={Analysis and visualisation of movement: an interdisciplinary review},
author={Dem{\v{s}}ar, Ur{\v{s}}ka and Buchin, Kevin and Cagnacci, Francesca and Safi, Kamran and Speckmann, Bettina and Van de Weghe, Nico and Weiskopf, Daniel and Weibel, Robert},
journal={Movement ecology},
volume={3},
number={1},
pages={5},
year={2015},
publisher={BioMed Central}
}
@incollection{montola2009games,
title={Games and pervasive games},
author={Montola, Markus},
booktitle={Pervasive Games},
pages={7--23},
year={2009},
publisher={Elsevier}
}
@inproceedings{kremer2013spatial,
title={Spatial choices in an educational geogame},
author={Kremer, Dominik and Schlieder, Christoph and Feulner, Barbara and Ohl, Ulrike},
booktitle={Games and Virtual Worlds for Serious Applications (VS-GAMES), 2013 5th International Conference on},
pages={1--4},
year={2013},
organization={IEEE}
}
@inproceedings{Chen2009TrajectorySM,
title={Trajectory simplification method for location-based social networking services},
author={Yukun Chen and Kai Jiang and Yu Zheng and Chunping Li and Nenghai Yu},
booktitle={GIS-LBSN},
year={2009}
}

View File

@ -1,20 +1,37 @@
\chapter{Geogames as research field and data source}
\chapter{intro}
\input{content/0-introduction}
\chapter{A Framework for the Analysis of Spatial Game Data}
\chapter{problemstellung}
\input{content/1-scope}
\chapter{Log processing, trajectories \& game analysis}
\chapter{forschungsstand}
\input{content/2-state-of-the-art}
\chapter{Design of the modular analysis framework}\label{sec:solution}
\chapter{lösungsansatz}
\input{content/3-solution}
\chapter{Implementation of the analysis framework}\label{sec:implementation}
\input{content/4-implementation}
\chapter{umsetzung}
%\input{content/4-implementation}
\section{klassenstruktur}
\section{konfigurierbarkeit (log import)}
\section{...}
\chapter{Portability evaluation of the analysis framework}\label{sec:eval}
\input{content/5-evaluation}
\chapter{evaluierung}
%\input{content/5-evaluation}
\section{wie viele metriken umsetzbar?}
\section{erweiterbarkeit}
\chapter{A modular framework: Discussion and outlook}
\input{content/6-discussion}
\chapter{diskussion}
%\input{content/6-discussion}
\section{dummies}
\cite{zheng2011computing}
\nomenclature[s]{$E=mc^2$}{Energie}
\begin{longtable}[H]{|p{0.2\textwidth}|p{0.2\textwidth}|p{0.2\textwidth}|}
\hline
A&B&C\\
\hline
\caption{Tabelle 1}
\label{tab:tab1}
\end{longtable}
\image{3cm}{logo.png}{Uni-Logo}{img:uni}

View File

@ -1,33 +0,0 @@
graph{
rankdir=TB;
margin=0;
subgraph {
rank=same;
c [label="Worker\nCelery"];
s [label="Web-App\nFlask"];
}
subgraph{rank = max;
db [label="DB\nRedis"];
gg [label="Geogame log provider\nNginx"];
}
subgraph{
rank=same;
in [label="Internal network",shape="diamond"];
n [label="Static file server\nNginx"];
}
subgraph{
rank=source;
t [label="HTTP frontend\nTraefik",style=dotted];
en [label="External network",shape="diamond"];
}
t -- en [style=dotted];
s -- en [label="labels"];
n --en [label="labels"];
c -- n [label="Shared volume ",style=dashed];
s -- in;
c -- in;
db --in;
gg -- in;
}

View File

@ -1,13 +0,0 @@
graph{
margin=0;
//rankdir="LR";
s [label="Web-App\nBiogames"];
c [label="Worker\nCelery"];
db [label="DB\nPostgres"];
t [label="HTTP frontend\nTraefik"];
s -- db [];
c -- db [label="Internal network"];
t -- s [label=" External network,\nlabel"];
s -- c [label="Shared volume"];
}

View File

@ -1,5 +1,4 @@
digraph{
margin=0;
rankdir="LR";
input;
analysis;

Binary file not shown.

Before

Width:  |  Height:  |  Size: 131 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 15 KiB

View File

@ -1,29 +0,0 @@
digraph{
margin=0;
rankdir="LR";
log [shape="record", label="Log | entry0 | entry1 | entry2 | …"];
{rank = same;
a1 [label="Analyzer A"];
a2 [label="Analyzer B"];
a3 [label="Analyzer …",style="dotted"];
}
{rank = same;
r1 [label="Result A"];
r2 [label="Result B"];
r3 [label="Result …",style="dotted"];
}
c [label="Postprocessing & Render"];
log -> a1;
a1->a2;
a2->a3 [style="dotted"];
a1 -> r1;
a2 -> r2;
a3 -> r3 [style="dotted"];
r1-> c;
r2-> c;
r3-> c [style="dotted"];
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 53 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 86 KiB

View File

@ -1,9 +0,0 @@
digraph{
margin=0;
r [label="analysis.analyzers"];
r -> analyzer;
r -> render;
r -> settings;
settings [shape = "box"];
}

View File

@ -1,11 +0,0 @@
digraph{
margin=0;
r [label="analysis"];
r -> analyzers;
r -> loaders;
r -> util;
r -> log_analyzer;
log_analyzer [shape = "box"];
}

View File

@ -1,9 +0,0 @@
digraph{
margin=0;
node [shape = "box"];
r [label="analysis.loaders",shape="ellipse"];
r -> biogames;
r -> loader;
r -> neocart;
}

View File

@ -1,9 +0,0 @@
digraph{
margin=0;
r [label="/"];
r -> analysis;
r -> clients;
r -> selector;
r -> tasks;
}

View File

@ -1,13 +0,0 @@
digraph{
margin=0;
//rankdir="LR";
{
//rank=same;
s [label="Web Interface"];
a [label="Analysis Framework"];
c [label="CLI / API"];
}
s -> a;
c -> a;
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 140 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 376 KiB

View File

@ -1,14 +0,0 @@
digraph{
margin=0;
//rankdir="LR";
index;
results;
create;
status;
index -> results [label=" login"];
index -> index [label=" failed login"];
create -> results [label=" redirect"];
results -> create [label="click"];
results -> results [label=" refresh"];
}

Binary file not shown.

Before

Width:  |  Height:  |  Size: 93 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 30 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 121 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 95 KiB

View File

@ -33,7 +33,6 @@
%===============================================================================
\newcommand\meta{../meta}
\input{\meta/config/commands}
\DeclareOldFontCommand{\bf}{\normalfont\bfseries}{\mathbf}
%===============================================================================
% LATEX-Dokument
%===============================================================================
@ -47,10 +46,9 @@
% Zur Auswahl der Sprache im folgenden Befehl
% ngerman für deutsch eintragen, english für Englisch.
%===============================================================================
\selectlanguage{english}
\selectlanguage{ngerman}
\setstretch{1.1}
%% Titelseite
\hypersetup{pageanchor=false}
\maketitle
%% Seitenlayout
@ -63,8 +61,7 @@
\newpage
\listoftables
\newpage
%\listofalgorithms
\lstlistoflistings
\listofalgorithms
\newpage
%% Falls nur Abkuerzungs- oder Symbolverzeichnis benoetigt wird, folgende Befehle benutzen %%
%\printnomenclature
@ -76,7 +73,6 @@
%\printnomenclature %TODO
\cleardoubleemptypage
\hypersetup{pageanchor=true}
\pagestyle{scrheadings}
\pagenumbering{arabic}
\setcounter{page}{1}
@ -104,15 +100,14 @@
% Stichwortverzeichnis soll im Inhaltsverzeichnis auftauchen
% Sprungmarke mit Phantomsection korrigiert
\phantomsection%
%\addcontentsline{toc}{chapter}{Index}%
\addcontentsline{toc}{chapter}{Index}%
% Stichwortverzeichnis endgueltig anzeigen
%\printindex%
\printindex%
\appendix
\setstretch{1.5}
\chapter{Appendix}
\input{content/appendix}
\chapter{Anhang}
\setstretch{1.1}
\cleardoubleemptypage

View File

@ -44,7 +44,7 @@
\makeatletter
\ifposter
\else
\hypersetup{pdftitle={\@title}, pdfauthor={\@author}, pdfsubject={\@subtitle}, pdfkeywords={\gitAbbrevHash}, linktoc=page, pdfborder={0 0 0 [3 3]}, breaklinks=true, linkbordercolor=unibablueI, menubordercolor=unibablueI, urlbordercolor=unibablueI, citebordercolor=unibablueI, filebordercolor=unibablueI}
\hypersetup{pdftitle={\@title}, pdfauthor={\@author}, linktoc=page, pdfborder={0 0 0 [3 3]}, breaklinks=true, linkbordercolor=unibablueI, menubordercolor=unibablueI, urlbordercolor=unibablueI, citebordercolor=unibablueI, filebordercolor=unibablueI}
\fi
%% Define a new 'leo' style for the package that will use a smaller font.
\def\url@leostyle{%
@ -178,6 +178,7 @@ Abgabedatum:\> \@date\\
\end{figure}
}
%#1 Datei (liegt im graphic Verzeichnis)
%#2 Beschriftung
%#3 Label fuer Referenzierung
@ -276,7 +277,7 @@ Ich erkläre hiermit gemäß § 17 Abs. 2 APO, dass ich die vorstehende \degree
%===============================================================================
% Listing Styles
%===============================================================================
\lstset{basicstyle=\ttfamily,showstringspaces=false,commentstyle=\color{unibagrayI},keywordstyle=\color{unibablueI},breaklines=true,captionpos=b}
\lstset{basicstyle=\ttfamily,showstringspaces=false,commentstyle=\color{unibagrayI},keywordstyle=\color{unibablueI},breaklines=true}
\DeclareFixedFont{\ttb}{T1}{txtt}{bx}{n}{9} % for bold
\DeclareFixedFont{\ttm}{T1}{txtt}{m}{n}{9} % for normal
\lstset{
@ -450,5 +451,3 @@ major line width/.initial=1pt,
\changemenucolor{gray}{br}{named}{unibablueI}
\changemenucolor{gray}{txt}{named}{unibablueI}
\fi
\newcommand{\furl}[1]{\footnote{\url{#1}}}

0
meta/config/hyphenation.tex Normal file → Executable file
View File

View File

@ -1,9 +1,9 @@
\usepackage[utf8]{inputenc}
\usepackage{lmodern}
\usepackage[T1]{fontenc}
\usepackage{gitinfo2}
\ifgit
\ifpresentation
\usepackage{gitinfo2}
\else
\ifthesis
\usepackage{gitinfo2}