cleanup, improve structure

activity_mapper
Clemens Klug 2017-12-19 13:12:07 +01:00
parent 33d63b120d
commit b21d0bf8ba
7 changed files with 258 additions and 204 deletions

View File

@ -137,3 +137,72 @@ class ProgressAnalyzer(Analyzer):
if entry[self.settings.type_field] in self.settings.boards: if entry[self.settings.type_field] in self.settings.boards:
self.board[entry["timestamp"]] = entry self.board[entry["timestamp"]] = entry
return False return False
class MetaDataAnalyzer(Analyzer):
"""collect metadata"""
__name__ = "MetaDataAnalyzer"
def result(self, store: ResultStore, name=None) -> None:
store.add(Result(type(self), dict(self.store)))
def process(self, entry: dict) -> bool:
if not "metadata" in self.settings.custom:
return False
for mdata in self.settings.custom["metadata"]:
key = self.settings.custom["metadata"]
if key in entry:
self.store[mdata] = json_path(entry, key)
def __init__(self, settings: LogSettings) -> None:
super().__init__(settings)
self.store = {}
def write_logentry_count_csv(LogEntryCountCSV, store, render, analyzers):
global cat, data, lines, csvfile
LogEntryCountCSV.summary = None
for cat in store.get_categories():
data = store.get_category(cat)
render(analyzers.LogEntryCountAnalyzer, data, name=cat)
if LogEntryCountCSV.summary:
headers = []
lines = []
for name in LogEntryCountCSV.summary:
data = LogEntryCountCSV.summary[name]
for head in data:
if not head in headers:
headers.append(head)
line = [name]
for head in headers:
line.append(data[head]) if head in data else line.append(0)
lines.append(line)
import csv
with open('logentrycount.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONE)
writer.writerow(["name"] + [h.split(".")[-1] for h in headers])
for line in lines:
writer.writerow(line)
def write_simulation_flag_csv(store):
global csvfile, result, i
from datetime import datetime
import json
json.dump(store.serializable(), open("simus.json", "w"), indent=2)
with open("simus.csv", "w") as csvfile:
csvfile.write("instanceconfig,log,simu,answered,universe_state,selected_actions,timestamp,time\n")
for key in store.get_store():
csvfile.write("{}\n".format(key))
for result in store.store[key]:
csvfile.write(",{}\n".format(result.name))
for i in result.get():
csvfile.write(",,{},{},{},{},{},{}\n".format(
i['answers']['@id'],
i['answers']['answered'],
len(i['answers']['universe_state']) if i['answers']['universe_state'] else 0,
len(i['selected_actions']) if i['selected_actions'] else 0,
i['timestamp'],
str(datetime.fromtimestamp(i['timestamp'] / 1000))
))

87
analyzers/render/wip.py Normal file
View File

@ -0,0 +1,87 @@
def time_distribution(store):
# json.dump(store.serializable(), open("new.json", "w"), indent=1)
from collections import defaultdict
import json
import numpy as np
keys = [
"simu",
"question",
"image",
"audio",
"video",
"other",
"map"
]
import matplotlib.pyplot as plt
# results = []
places = defaultdict(list)
for log in store.get_all():
result = defaultdict(lambda: 0)
for i in log.get()['track']:
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
result[i['properties']['activity_type']] += duration
print(json.dumps(result, indent=4))
total = sum(result.values())
print(total)
percentage = defaultdict(lambda: 0)
minutes = defaultdict(lambda: 0)
for i in result:
percentage[i] = result[i] / total
minutes[i] = result[i] / 60_000
print(json.dumps(percentage, indent=4))
if not 'error' in result:
# places[log.get()['instance']].append(percentage)
places[log.get()['instance']].append(minutes)
for place in places:
places[place] = sorted(places[place], key=lambda item: item['map'])
dummy = [0] * len(keys)
results = []
sites = []
from util.meta_temp import CONFIG_NAMES
for i in places:
for j in places[i]:
ordered = []
for k in keys:
ordered.append(j[k])
results.append(ordered)
results.append(dummy)
sites.append(CONFIG_NAMES[i] if i in CONFIG_NAMES else "---")
size = len(results)
ind = np.arange(size)
width = 0.9
print(results)
data = list(zip(*results))
print(data)
lines = []
bottom = [0] * len(results)
for i in range(0, len(data)):
lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
for k, x in enumerate(data[i]):
bottom[k] += x
plt.legend(lines, keys)
plt.title(", ".join(sites))
plt.show()
# size = len(results)
# ind = np.arange(size)
# width = 0.9
# print(results)
# data = list(zip(*results))
# print(data)
# lines = []
# bottom = [0] * len(results)
# for i in range(0, len(data)):
# lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
# for k, x in enumerate(data[i]):
# bottom[k] += x
# plt.legend(lines, keys)
# plt.title("Zwei Spiele in Filderstadt (t1=237min; t2=67min)")
# plt.show()

View File

@ -67,7 +67,12 @@
"action":"PAUSE" "action":"PAUSE"
} }
}, },
"coordinates": "location.coordinates" "coordinates": "location.coordinates",
"metadata":{
"timestamp": "timestamp",
"gamefield": "instance_id",
"user": "player_group_name"
}
}, },
"source":{ "source":{
"type": "Biogames", "type": "Biogames",

View File

@ -7,9 +7,12 @@ import numpy as np
import analyzers import analyzers
from analyzers import get_renderer, Analyzer, render, Store from analyzers import get_renderer, Analyzer, render, Store
from analyzers.analyzer import ResultStore from analyzers.analyzer import ResultStore
from analyzers.analyzer.default import write_logentry_count_csv, write_simulation_flag_csv
from analyzers.render.default import LogEntryCountCSV from analyzers.render.default import LogEntryCountCSV
from analyzers.render.wip import time_distribution
from analyzers.settings import LogSettings, load_settings from analyzers.settings import LogSettings, load_settings
from loaders import LOADERS from loaders import LOADERS
from util.processing import grep, run_analysis
logging.basicConfig(format='%(levelname)s %(name)s:%(message)s', level=logging.DEBUG) logging.basicConfig(format='%(levelname)s %(name)s:%(message)s', level=logging.DEBUG)
log: logging.Logger = logging.getLogger(__name__) log: logging.Logger = logging.getLogger(__name__)
@ -18,98 +21,11 @@ logging.getLogger('requests').setLevel(logging.WARN)
logging.getLogger("urllib3").setLevel(logging.WARNING) logging.getLogger("urllib3").setLevel(logging.WARNING)
def process_log(logfile: str, settings: LogSettings) -> List[Analyzer]:
loader = LOADERS[settings.log_format]()
try:
loader.load(logfile)
except BaseException as e:
raise RuntimeError(e)
analyzers: List[Analyzer] = []
log.debug("build analyzers")
for analyzer in settings.analyzers:
analyzers.append(analyzer(settings))
log.debug("process entries")
for entry in loader.get_entry():
for analyzer in analyzers:
try:
if analyzer.process(entry):
break
except KeyError as e:
log.exception(e)
return analyzers
def run_analysis(log_ids: list, settings):
store: ResultStore = ResultStore()
for log_id in log_ids:
for analysis in process_log(log_id, settings):
log.info("* Result for " + analysis.name())
analysis.result(store, name=log_id)
return store
def load_ids(name: str):
log_ids = []
with open(name) as src:
for line in src:
line = line.strip()
log_ids.append(line)
return log_ids
def urach_logs(log_ids, settings): def urach_logs(log_ids, settings):
#return ["data/inst_{id}.{format}".format(id=log_id, format=settings.log_format) for log_id in log_ids] # return ["data/inst_{id}.{format}".format(id=log_id, format=settings.log_format) for log_id in log_ids]
return ["data/{id}.{format}".format(id=log_id, format=settings.log_format) for log_id in log_ids] return ["data/{id}.{format}".format(id=log_id, format=settings.log_format) for log_id in log_ids]
def write_logentry_count_csv():
global cat, data, lines, csvfile
LogEntryCountCSV.summary = None
for cat in store.get_categories():
data = store.get_category(cat)
render(analyzers.LogEntryCountAnalyzer, data, name=cat)
if LogEntryCountCSV.summary:
headers = []
lines = []
for name in LogEntryCountCSV.summary:
data = LogEntryCountCSV.summary[name]
for head in data:
if not head in headers:
headers.append(head)
line = [name]
for head in headers:
line.append(data[head]) if head in data else line.append(0)
lines.append(line)
import csv
with open('logentrycount.csv', 'w', newline='') as csvfile:
writer = csv.writer(csvfile, quoting=csv.QUOTE_NONE)
writer.writerow(["name"] + [h.split(".")[-1] for h in headers])
for line in lines:
writer.writerow(line)
def write_simulation_flag_csv():
global csvfile, result, i
from datetime import datetime
json.dump(store.serializable(), open("simus.json", "w"), indent=2)
with open("simus.csv", "w") as csvfile:
csvfile.write("instanceconfig,log,simu,answered,universe_state,selected_actions,timestamp,time\n")
for key in store.get_store():
csvfile.write("{}\n".format(key))
for result in store.store[key]:
csvfile.write(",{}\n".format(result.name))
for i in result.get():
csvfile.write(",,{},{},{},{},{},{}\n".format(
i['answers']['@id'],
i['answers']['answered'],
len(i['answers']['universe_state']) if i['answers']['universe_state'] else 0,
len(i['selected_actions']) if i['selected_actions'] else 0,
i['timestamp'],
str(datetime.fromtimestamp(i['timestamp'] / 1000))
))
if __name__ == '__main__': if __name__ == '__main__':
settings: LogSettings = load_settings("biogames2.json") settings: LogSettings = load_settings("biogames2.json")
log_ids_urach: List[str] = urach_logs([ log_ids_urach: List[str] = urach_logs([
@ -121,11 +37,19 @@ if __name__ == '__main__':
# "597b704fe9ace475316c345903", # "597b704fe9ace475316c345903",
# "e01a684aa29dff9ddd9705edf8", # "e01a684aa29dff9ddd9705edf8",
# "fbf9d64ae0bdad0de7efa3eec6", # "fbf9d64ae0bdad0de7efa3eec6",
"fe1331481f85560681f86827ec", "fe1331481f85560681f86827ec", # urach
# "fe1331481f85560681f86827ec"] # "fe1331481f85560681f86827ec"]
"fec57041458e6cef98652df625", ] "fec57041458e6cef98652df625",
,settings) ]
store: ResultStore = run_analysis(log_ids_urach, settings) , settings)
log_ids_gf = grep(["9d11b749c78a57e786bf5c8d28", # filderstadt
"a192ff420b8bdd899fd28573e2", # eichstätt
"3a3d994c04b1b1d87168422309", # stadtökologie
"96f6d9cc556b42f3b2fec0a2cb7ed36e" # oberelsbach
],
"/home/clemens/git/ma/test/src",
settings)
store: ResultStore = run_analysis(log_ids_gf, settings, LOADERS)
if False: if False:
for r in get_renderer(analyzers.LocomotionActionAnalyzer): for r in get_renderer(analyzers.LocomotionActionAnalyzer):
r().render(store.get_all()) r().render(store.get_all())
@ -148,110 +72,12 @@ if __name__ == '__main__':
data = store.get_category(cat) data = store.get_category(cat)
render(analyzers.SimulationOrderAnalyzer, data, name=cat) render(analyzers.SimulationOrderAnalyzer, data, name=cat)
if False: if False:
write_logentry_count_csv() write_logentry_count_csv(LogEntryCountCSV, store, render, analyzers)
if False: if False:
write_simulation_flag_csv() write_simulation_flag_csv(store)
def calc_distance(geojson: str):
from shapely.geometry import LineString
from shapely.ops import transform
from functools import partial
import pyproj
track = LineString(json.loads(geojson)['coordinates'])
project = partial(
pyproj.transform,
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(init='EPSG:32633'))
return transform(project, track).length
if False:
# json.dump(store.serializable(), open("new.json", "w"), indent=1)
from collections import defaultdict
keys = [
"simu",
"question",
"image",
"audio",
"video",
"other",
"map"
]
import matplotlib.pyplot as plt
# results = []
places = defaultdict(list)
for log in store.get_all():
result = defaultdict(lambda: 0)
for i in log.get()['track']:
duration = i['properties']['end_timestamp'] - i['properties']['start_timestamp']
result[i['properties']['activity_type']] += duration
print(json.dumps(result, indent=4))
total = sum(result.values())
print(total)
percentage = defaultdict(lambda: 0)
minutes = defaultdict(lambda: 0)
for i in result:
percentage[i] = result[i] / total
minutes[i] = result[i] / 60_000
print(json.dumps(percentage, indent=4))
if not 'error' in result:
# places[log.get()['instance']].append(percentage)
places[log.get()['instance']].append(minutes)
for place in places:
places[place] = sorted(places[place], key=lambda item: item['map'])
dummy = [0] * len(keys)
results = []
sites = []
from util.meta_temp import CONFIG_NAMES
for i in places:
for j in places[i]:
ordered = []
for k in keys:
ordered.append(j[k])
results.append(ordered)
results.append(dummy)
sites.append(CONFIG_NAMES[i] if i in CONFIG_NAMES else "---")
size = len(results)
ind = np.arange(size)
width = 0.9
print(results)
data = list(zip(*results))
print(data)
lines = []
bottom = [0] * len(results)
for i in range(0, len(data)):
lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
for k, x in enumerate(data[i]):
bottom[k] += x
plt.legend(lines, keys)
plt.title(", ".join(sites))
plt.show()
# size = len(results)
# ind = np.arange(size)
# width = 0.9
# print(results)
# data = list(zip(*results))
# print(data)
# lines = []
# bottom = [0] * len(results)
# for i in range(0, len(data)):
# lines.append(plt.bar(ind, data[i], bottom=bottom, width=width)[0])
# for k, x in enumerate(data[i]):
# bottom[k] += x
# plt.legend(lines, keys)
# plt.title("Zwei Spiele in Filderstadt (t1=237min; t2=67min)")
# plt.show()
if True:
time_distribution(store)
# for analyzers in analyzers: # for analyzers in analyzers:

View File

@ -1,12 +1,10 @@
//$.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_03b9b6b4-c8ab-4182-8902-1620eebe8889.json", function (data) { $.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_03b9b6b4-c8ab-4182-8902-1620eebe8889.json", function (data) { //urach
$.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_de7df5b5-edd5-4070-840f-68854ffab9aa.json", function (data) { //$.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_de7df5b5-edd5-4070-840f-68854ffab9aa.json", function (data) { //urach
//$.getJSON("data/90278021-4c57-464e-90b1-d603799d07eb_07da99c9-398a-424f-99fc-2701763a63e9.json", function (data) { //eichstätt
//$.getJSON("data/13241906-cdae-441a-aed0-d57ebeb37cac_d33976a6-8a56-4a63-b492-fe5427dbf377.json", function (data) { //stadtökologie
//$.getJSON("data/5e64ce07-1c16-4d50-ac4e-b3117847ea43_2f664d7b-f0d8-42f5-8731-c034ef86703e.json", function (data) { //filderstadt
var images = {}; var images = {};
var tiles = { var tiles = {
"osm": L.tileLayer('http://{s}.tile.osm.org/{z}/{x}/{y}.png', {
maxNativeZoom: 19,
maxZoom: 24,
attribution: '&copy; <a href="http://osm.org/copyright">OpenStreetMap</a> contributors',
}),
"openstreetmap": L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', { "openstreetmap": L.tileLayer('https://{s}.tile.openstreetmap.org/{z}/{x}/{y}.png', {
maxNativeZoom: 19, maxNativeZoom: 19,
maxZoom: 24, maxZoom: 24,
@ -17,13 +15,13 @@ $.getJSON("data/ff8f1e8f-6cf5-4a7b-835b-5e2226c1e771_de7df5b5-edd5-4070-840f-688
maxZoom: 24, maxZoom: 24,
attribution: 'Tiles &copy; Esri &mdash; Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community' attribution: 'Tiles &copy; Esri &mdash; Source: Esri, i-cubed, USDA, USGS, AEX, GeoEye, Getmapping, Aerogrid, IGN, IGP, UPR-EGP, and the GIS User Community'
}), }),
"google sat": L.tileLayer('http://{s}.google.com/vt/lyrs=s&x={x}&y={y}&z={z}', { "google sat": L.tileLayer('https://{s}.google.com/vt/lyrs=s&x={x}&y={y}&z={z}', {
maxNativeZoom: 20, maxNativeZoom: 20,
maxZoom: 24, maxZoom: 24,
subdomains: ['mt0', 'mt1', 'mt2', 'mt3'] subdomains: ['mt0', 'mt1', 'mt2', 'mt3']
}) })
}; };
var map = L.map("mainMap", {layers: [tiles.osm]}); var map = L.map("mainMap", {layers: [tiles.openstreetmap]});
function styleTrack(feature) { function styleTrack(feature) {
var styles = {}; var styles = {};

12
util/geo.py Normal file
View File

@ -0,0 +1,12 @@
def calc_distance(geojson: str):
from shapely.geometry import LineString
from shapely.ops import transform
from functools import partial
import pyproj
import json
track = LineString(json.loads(geojson)['coordinates'])
project = partial(
pyproj.transform,
pyproj.Proj(init='EPSG:4326'),
pyproj.Proj(init='EPSG:32633'))
return transform(project, track).length

57
util/processing.py Normal file
View File

@ -0,0 +1,57 @@
import logging
from typing import List
from analyzers.analyzer import ResultStore, Analyzer
from analyzers.settings import LogSettings
log: logging.Logger = logging.getLogger(__name__)
def process_log(logfile: str, settings: LogSettings, loaders) -> List[Analyzer]:
loader = loaders[settings.log_format]()
try:
loader.load(logfile)
except BaseException as e:
raise RuntimeError(e)
analyzers: List[Analyzer] = []
log.debug("build analyzers")
for analyzer in settings.analyzers:
analyzers.append(analyzer(settings))
log.debug("process entries")
for entry in loader.get_entry():
for analyzer in analyzers:
try:
if analyzer.process(entry):
break
except KeyError as e:
log.exception(e)
return analyzers
def run_analysis(log_ids: list, settings, loaders):
store: ResultStore = ResultStore()
for log_id in log_ids:
for analysis in process_log(log_id, settings, loaders):
log.info("* Result for " + analysis.name())
analysis.result(store, name=log_id)
return store
def load_ids(name: str):
log_ids = []
with open(name) as src:
for line in src:
line = line.strip()
log_ids.append(line)
return log_ids
def grep(log_ids, source, settings):
logs = []
with open(source) as src:
lines = src.readlines()
for id in log_ids:
for line in lines:
if id in line:
logs.append(line.strip())
return logs