Compare commits

..

2 Commits

Author SHA1 Message Date
Daniel Ehlers 01fa5e7a0b nodedb.py: Transport new data to node entry 2014-02-09 15:27:25 +01:00
Daniel Ehlers 02a2862ca9 alfred.py: Transfer more data from alfred into the alias list 2014-02-09 15:27:25 +01:00
31 changed files with 965 additions and 1931 deletions

11
.gitignore vendored
View File

@ -1,11 +1,2 @@
# backups
*~
# script-generated
aliases*.json
nodedb/
# python bytecode / cache
*.pyc *.pyc
pycache/ aliases.json
__pycache__/

View File

@ -1,6 +0,0 @@
sudo: false
language: python
python:
- "3.4"
install: "pip install pep8"
script: "pep8 --ignore=E501 *.py lib/*.py"

118
README.md
View File

@ -1,117 +1 @@
# Data for Freifunk Map, Graph and Node List See mkmap.sh for now :)
[![Build Status](https://travis-ci.org/ffnord/ffmap-backend.svg?branch=master)](https://travis-ci.org/ffnord/ffmap-backend)
ffmap-backend gathers information on the batman network by invoking :
* batctl (might require root),
* alfred-json and
* batadv-vis
The output will be written to a directory (`-d output`).
Run `backend.py --help` for a quick overview of all available options.
For the script's regular execution add the following to the crontab:
<pre>
* * * * * backend.py -d /path/to/output -a /path/to/aliases.json --vpn ae:7f:58:7d:6c:2a d2:d0:93:63:f7:da
</pre>
# Dependencies
- Python 3
- Python 3 Package [Networkx](https://networkx.github.io/)
- [alfred-json](https://github.com/tcatm/alfred-json)
- rrdtool (if run with `--with-rrd`)
# Running as unprivileged user
Some information collected by ffmap-backend requires access to specific system resources.
Make sure the user you are running this under is part of the group that owns the alfred socket, so
alfred-json can access the alfred daemon.
# ls -al /var/run/alfred.sock
srw-rw---- 1 root alfred 0 Mar 19 22:00 /var/run/alfred.sock=
# adduser map alfred
Adding user `map' to group `alfred' ...
Adding user map to group alfred
Done.
$ groups
map alfred
Running batctl requires passwordless sudo access, because it needs to access the debugfs to retrive
the gateway list.
# echo 'map ALL = NOPASSWD: /usr/sbin/batctl' | tee /etc/sudoers.d/map
map ALL = NOPASSWD: /usr/sbin/batctl
# chmod 0440 /etc/sudoers.d/map
That should be everything. The script automatically detects if it is run in unprivileged mode and
will prefix `sudo` where necessary.
# Data format
## nodes.json
{ 'nodes': {
node_id: { 'flags': { flags },
'firstseen': isoformat,
'lastseen': isoformat,
'nodeinfo': {...}, # copied from alfred type 158
'statistics': {
'uptime': double, # seconds
'memory_usage': double, # 0..1
'clients': double,
'rootfs_usage': double, # 0..1
'loadavg': double,
'gateway': mac
}
},
...
}
'timestamp': isoformat
}
### flags (bool)
- online
- gateway
## Old data format
If you want to still use the old [ffmap-d3](https://github.com/ffnord/ffmap-d3)
front end, you can use the file `ffmap-d3.jq` to convert the new output to the
old one:
```
jq -n -f ffmap-d3.jq \
--argfile nodes nodedb/nodes.json \
--argfile graph nodedb/graph.json \
> nodedb/ffmap-d3.json
```
Then point your ffmap-d3 instance to the `ffmap-d3.json` file.
# Removing owner information
If you'd like to redact information about the node owner from `nodes.json`,
you may use a filter like [jq]. In this case, specify an output directory
different from your webserver directory, e.g.:
./backend.py -d /ffmap-data
Don't write to files generated in there. ffmap-backend uses them as its
database.
After running ffmap-backend, copy `graph.json` to your webserver. Then,
filter `nodes.json` using `jq` like this:
jq '.nodes = (.nodes | with_entries(del(.value.nodeinfo.owner)))' \
< /ffmap-data/nodes.json > /var/www/data/nodes.json
This will remove owner information from nodes.json before copying the data
to your webserver.
[jq]: https://stedolan.github.io/jq/

58
alfred.py Executable file
View File

@ -0,0 +1,58 @@
#!/usr/bin/env python3
import subprocess
import json
class alfred:
def __init__(self,request_data_type = 158):
self.request_data_type = request_data_type
def aliases(self):
output = subprocess.check_output(["alfred-json","-r",str(self.request_data_type),"-f","json"])
alfred_data = json.loads(output.decode("utf-8"))
alias = {}
for mac,node in alfred_data.items():
node_alias = {}
if 'location' in node:
if 'latitude' in node['location'] and 'longitude' in node['location']:
node_alias['gps'] = str(node['location']['latitude']) + ' ' + str(node['location']['longitude'])
if 'software' in node:
if 'firmware' in node['software']:
if 'base' in node['software']['firmware']:
node_alias['firmware-base'] = node['software']['firmware']['base']
if 'release' in node['software']['firmware']:
node_alias['firmware'] = node['software']['firmware']['release']
if 'autoupdater' in node['software']:
if 'branch' in node['software']['autoupdater']:
node_alias['autoupdater-branch'] = node['software']['autoupdater']['branch']
if 'enabled' in node['software']['autoupdater']:
node_alias['autoupdater-enabled'] = node['software']['autoupdater']['enabled']
if 'fastd' in node['software']:
if 'enabled' in node['software']['fastd']:
node_alias['fastd-enabled'] = node['software']['fastd']['enabled']
if 'version' in node['software']['fastd']:
node_alias['fastd-version'] = node['software']['fastd']['version']
if 'hardware' in node:
if 'model' in node['hardware']:
node_alias['hardware-model'] = node['hardware']['model']
if 'network' in node:
if 'gateway' in node['network']:
node_alias['selected-gateway'] = node['network']['gateway']
if 'hostname' in node:
node_alias['name'] = node['hostname']
elif 'name' in node:
node_alias['name'] = node['name']
if len(node_alias):
alias[mac] = node_alias
return alias
if __name__ == "__main__":
ad = alfred()
al = ad.alias()
print(al)

View File

@ -1,36 +1,9 @@
[
{ {
"node_id": "krtek", "b0:48:7a:e7:d3:64" : {
"hostname": "krtek", "name" : "Meute-AP"
"location": {
"longitude": 10.74,
"latitude": 53.86
}, },
"network": { "8e:3d:c2:10:10:28" : {
"mesh": { "name" : "holstentor",
"bat0": { "vpn" : true
"interfaces": {
"tunnel": [
"00:25:86:e6:f1:bf"
]
} }
} }
}
}
},
{
"node_id": "gw1",
"hostname": "burgtor",
"network": {
"mesh": {
"bat0": {
"interfaces": {
"tunnel": [
"52:54:00:f3:62:d9"
]
}
}
}
}
}
]

View File

@ -1,269 +0,0 @@
#!/usr/bin/env python3
"""
backend.py - ffmap-backend runner
https://github.com/ffnord/ffmap-backend
Erweiterte Version von Freifunk Pinneberg
- Graphiken aus RRD-Daten nur auf Anforderung erzeugen
- Verzeichnis für die RRD-Nodedb als Kommandozeilenparameter
- Statistikerzeugung korrigiert: Initialisierung und Befüllung
zu passenden Zeitpunkten
"""
import argparse
import configparser
import json
import os
import sys
import logging, logging.handlers
from datetime import datetime
import networkx as nx
from networkx.readwrite import json_graph
from lib import graph, nodes
from lib.alfred import Alfred
from lib.batman import Batman
from lib.rrddb import RRD
from lib.nodelist import export_nodelist
from lib.validate import validate_nodeinfos
NODES_VERSION = 1
GRAPH_VERSION = 1
cfg = {
'cfgfile': '/etc/ffmap/ffmap-test.cfg',
'logfile': '/var/log/ffmap.log',
'loglevel': 5,
'dest_dir': '/var/lib/ffmap/mapdata',
'aliases': [],
'prune': 0,
'nodedb': '/var/lib/ffmap/nodedb',
'rrd_data': False,
'rrd_graphs': False,
'redis': False
}
def main(params):
os.makedirs(params['dest_dir'], exist_ok=True)
nodes_fn = os.path.join(params['dest_dir'], 'nodes.json')
graph_fn = os.path.join(params['dest_dir'], 'graph.json')
nodelist_fn = os.path.join(params['dest_dir'], 'nodelist.json')
now = datetime.utcnow().replace(microsecond=0)
# parse mesh param and instantiate Alfred/Batman instances
alfred_instances = []
batman_instances = []
for value in params['mesh']:
# (1) only batman-adv if, no alfred sock
if ':' not in value:
if len(params['mesh']) > 1:
raise ValueError(
'Multiple mesh interfaces require the use of '
'alfred socket paths.')
alfred_instances.append(Alfred(unix_sockpath=None))
batman_instances.append(Batman(mesh_interface=value))
else:
# (2) batman-adv if + alfred socket
try:
batif, alfredsock = value.split(':')
alfred_instances.append(Alfred(unix_sockpath=alfredsock))
batman_instances.append(Batman(mesh_interface=batif,
alfred_sockpath=alfredsock))
except ValueError:
raise ValueError(
'Unparseable value "{0}" in --mesh parameter.'.
format(value))
# read nodedb state from nodes.json
try:
with open(nodes_fn, 'r') as nodedb_handle:
nodedb = json.load(nodedb_handle)
except (IOError, ValueError):
nodedb = {'nodes': dict()}
# flush nodedb if it uses the old format
if 'links' in nodedb:
nodedb = {'nodes': dict()}
# set version we're going to output
nodedb['version'] = NODES_VERSION
# update timestamp and assume all nodes are offline
nodedb['timestamp'] = now.isoformat()
for node_id, node in nodedb['nodes'].items():
node['flags']['online'] = False
# integrate alfred nodeinfo
for alfred in alfred_instances:
nodeinfo = validate_nodeinfos(alfred.nodeinfo())
nodes.import_nodeinfo(nodedb['nodes'], nodeinfo,
now, assume_online=True)
# integrate static aliases data
for aliases in params['aliases']:
with open(aliases, 'r') as f:
nodeinfo = validate_nodeinfos(json.load(f))
nodes.import_nodeinfo(nodedb['nodes'], nodeinfo,
now, assume_online=False)
# prepare statistics collection
nodes.reset_statistics(nodedb['nodes'])
# acquire gwl and visdata for each batman instance
mesh_info = []
for batman in batman_instances:
vd = batman.vis_data()
gwl = batman.gateway_list()
mesh_info.append((vd, gwl))
# update nodedb from batman-adv data
for vd, gwl in mesh_info:
nodes.import_mesh_ifs_vis_data(nodedb['nodes'], vd)
nodes.import_vis_clientcount(nodedb['nodes'], vd)
nodes.mark_vis_data_online(nodedb['nodes'], vd, now)
nodes.mark_gateways(nodedb['nodes'], gwl)
# get alfred statistics
for alfred in alfred_instances:
nodes.import_statistics(nodedb['nodes'], alfred.statistics())
# clear the nodedb from nodes that have not been online in $prune days
if params['prune']:
nodes.prune_nodes(nodedb['nodes'], now, params['prune'])
# build nxnetworks graph from nodedb and visdata
batadv_graph = nx.DiGraph()
for vd, gwl in mesh_info:
graph.import_vis_data(batadv_graph, nodedb['nodes'], vd)
# force mac addresses to be vpn-link only (like gateways for example)
if params['vpn']:
graph.mark_vpn(batadv_graph, frozenset(params['vpn']))
def extract_tunnel(nodes):
macs = set()
for id, node in nodes.items():
try:
for mac in node["nodeinfo"]["network"]["mesh"]["bat0"]["interfaces"]["tunnel"]:
macs.add(mac)
except KeyError:
pass
return macs
graph.mark_vpn(batadv_graph, extract_tunnel(nodedb['nodes']))
batadv_graph = graph.merge_nodes(batadv_graph)
batadv_graph = graph.to_undirected(batadv_graph)
# write processed data to dest dir
with open(nodes_fn, 'w') as f:
json.dump(nodedb, f)
graph_out = {'batadv': json_graph.node_link_data(batadv_graph),
'version': GRAPH_VERSION}
with open(graph_fn, 'w') as f:
json.dump(graph_out, f)
with open(nodelist_fn, 'w') as f:
json.dump(export_nodelist(now, nodedb), f)
# optional rrd graphs (trigger with --rrd)
if params['rrd']:
if params['nodedb']:
rrd = RRD(params['nodedb'], os.path.join(params['dest_dir'], 'nodes'))
else:
script_directory = os.path.dirname(os.path.realpath(__file__))
rrd = RRD(os.path.join(script_directory, 'nodedb'),
os.path.join(params['dest_dir'], 'nodes'))
rrd.update_database(nodedb['nodes'])
if params['img']:
rrd.update_images()
def set_loglevel(nr):
"""
Umsetzen der Nummer auf einen für "logging" passenden Wert
Die Nummer kann ein Wert zwischen 0 - kein Logging und 5 - Debug sein
"""
level = (None, logging.CRITICAL, logging.ERROR, logging.WARNING,
logging.INFO, logging.DEBUG)
if nr > 5:
nr = 5
elif nr < 0:
nr = 0
return level[nr]
if __name__ == '__main__':
# get options from command line
parser = argparse.ArgumentParser(
description = "Collect data for ffmap: creates json files and "
"optional rrd data and graphs")
parser.add_argument('-a', '--aliases',
help='Read aliases from FILE',
nargs='+', default=[], metavar='FILE')
parser.add_argument('-m', '--mesh',
default=['bat0'], nargs='+',
help='Use given batman-adv mesh interface(s) (defaults '
'to bat0); specify alfred unix socket like '
'bat0:/run/alfred0.sock.')
parser.add_argument('-d', '--dest-dir', action='store',
help='Write output to destination directory',
required=False)
parser.add_argument('-c', '--config', action='store', metavar='FILE',
help='read configuration from FILE')
parser.add_argument('-V', '--vpn', nargs='+', metavar='MAC',
help='Assume MAC addresses are part of vpn')
parser.add_argument('-p', '--prune', metavar='DAYS', type=int,
help='Forget nodes offline for at least DAYS')
parser.add_argument('-r', '--with-rrd', dest='rrd', action='store_true',
default=False,
help='Enable the collection of RRD data')
parser.add_argument('-n', '--nodedb', metavar='RRD_DIR', action='store',
help='Directory for node RRD data files')
parser.add_argument('-i', '--with-img', dest='img', action='store_true',
default=False,
help='Enable the rendering of RRD graphs (cpu '
'intensive)')
options = vars(parser.parse_args())
if options['config']:
cfg['cfgfile'] = options['config']
config = configparser.ConfigParser(cfg)
if config.read(cfg['cfgfile']):
if not options['nodedb']:
options['nodedb'] = config.get('rrd', 'nodedb')
if not options['dest_dir']:
options['dest_dir'] = config.get('global', 'dest_dir')
if not options['rrd']:
options['rrd'] = config.getboolean('rrd', 'enabled')
if not options['img']:
options['img'] = config.getboolean('rrd', 'graphs')
cfg['logfile'] = config.get('global', 'logfile')
cfg['loglevel'] = config.getint('global', 'loglevel')
# At this point global configuration is available. Time to enable logging
# Logging is handled by the operating system, so use WatchedFileHandler
handler = logging.handlers.WatchedFileHandler(cfg['logfile'])
handler.setFormatter(logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s',
datefmt='%Y-%m-%d %H:%M:%S'))
log = logging.getLogger()
log.addHandler(handler)
loglevel = set_loglevel(cfg['loglevel'])
if loglevel:
log.setLevel(loglevel)
else:
log.disabled = True
log.info("%s started" % sys.argv[0])
if os.path.isfile(cfg['cfgfile']):
log.info("using configuration from '%s'" % cfg['cfgfile'])
main(options)
log.info("%s finished" % sys.argv[0])

84
bat2nodes.py Executable file
View File

@ -0,0 +1,84 @@
#!/usr/bin/env python3
import json
import fileinput
import argparse
import os
from batman import batman
from alfred import alfred
from rrd import rrd
from nodedb import NodeDB
from d3mapbuilder import D3MapBuilder
# Force encoding to UTF-8
import locale # Ensures that subsequent open()s
locale.getpreferredencoding = lambda _=None: 'UTF-8' # are UTF-8 encoded.
import sys
#sys.stdin = open('/dev/stdin', 'r')
#sys.stdout = open('/dev/stdout', 'w')
#sys.stderr = open('/dev/stderr', 'w')
parser = argparse.ArgumentParser()
parser.add_argument('-a', '--aliases',
help='read aliases from FILE',
action='append',
metavar='FILE')
parser.add_argument('-m', '--mesh', action='append',
help='batman mesh interface')
parser.add_argument('-o', '--obscure', action='store_true',
help='obscure client macs')
parser.add_argument('-A', '--alfred', action='store_true',
help='retrieve aliases from alfred')
parser.add_argument('-d', '--destination-directory', action='store',
help='destination directory for generated files',required=True)
args = parser.parse_args()
options = vars(args)
db = NodeDB()
if options['mesh']:
for mesh_interface in options['mesh']:
bm = batman(mesh_interface)
db.parse_vis_data(bm.vis_data(options['alfred']))
for gw in bm.gateway_list():
db.mark_gateways(gw.mac)
else:
bm = batman()
db.parse_vis_data(bm.vis_data(options['alfred']))
for gw in bm.gateway_list():
db.mark_gateways([gw['mac']])
if options['aliases']:
for aliases in options['aliases']:
db.import_aliases(json.load(open(aliases)))
if options['alfred']:
af = alfred()
db.import_aliases(af.aliases())
if options['obscure']:
db.obscure_clients()
scriptdir = os.path.dirname(os.path.realpath(__file__))
rrd = rrd(scriptdir + "/nodedb/", options['destination_directory'] + "/nodes")
rrd.update_database(db)
rrd.update_images()
m = D3MapBuilder(db)
#Write nodes json
nodes_json = open(options['destination_directory'] + '/nodes.json.new','w')
nodes_json.write(m.build())
nodes_json.close()
#Move to destination
os.rename(options['destination_directory'] + '/nodes.json.new',options['destination_directory'] + '/nodes.json')

84
batman.py Executable file
View File

@ -0,0 +1,84 @@
#!/usr/bin/env python3
import subprocess
import json
import re
class batman:
""" Bindings for B.A.T.M.A.N. advanced batctl tool
"""
def __init__(self, mesh_interface = "bat0"):
self.mesh_interface = mesh_interface
def vis_data(self,batadv_vis=False):
vds = self.vis_data_batctl_legacy()
if batadv_vis:
vds += self.vis_data_batadv_vis()
return vds
def vis_data_helper(self,lines):
vd = []
for line in lines:
try:
utf8_line = line.decode("utf-8")
vd.append(json.loads(utf8_line))
except e:
pass
return vd
def vis_data_batctl_legacy(self):
""" Parse "batctl -m <mesh_interface> vd json -n" into an array of dictionaries.
"""
output = subprocess.check_output(["batctl","-m",self.mesh_interface,"vd","json","-n"])
lines = output.splitlines()
vds = self.vis_data_helper(lines)
for vd in vds:
vd['legacy'] = True
return vds
def vis_data_batadv_vis(self):
""" Parse "batadv-vis -i <mesh_interface> -f json" into an array of dictionaries.
"""
output = subprocess.check_output(["batadv-vis","-i",self.mesh_interface,"-f","json"])
lines = output.splitlines()
return self.vis_data_helper(lines)
def gateway_list(self):
""" Parse "batctl -m <mesh_interface> gwl -n" into an array of dictionaries.
"""
output = subprocess.check_output(["batctl","-m",self.mesh_interface,"gwl","-n"])
output_utf8 = output.decode("utf-8")
# TODO Parse information
lines = output_utf8.splitlines()
own_mac = re.match(r"^.*MainIF/MAC: [^/]+/([0-9a-f:]+).*$",lines[0]).group(1)
# Remove header line
del lines[0]
# Fill gateway list
gw = []
gw_mode = self.gateway_mode()
if gw_mode['mode'] == 'server':
gw.append({'mac': own_mac, 'bandwidth': gw_mode['bandwidth']})
for line in lines:
gw_line = line.split()
# When in client gateway mode maybe gw_line[0] is not the right.
gw.append({'mac':gw_line[0], 'bandwidth': gw_line[-1]})
return gw
def gateway_mode(self):
""" Parse "batctl -m <mesh_interface> gw"
"""
output = subprocess.check_output(["batctl","-m",self.mesh_interface,"gw"])
elements = output.decode("utf-8").split()
mode = elements[0]
if mode == "server":
return {'mode': 'server', 'bandwidth': elements[3]}
else:
return {'mode': mode}
if __name__ == "__main__":
bc = batman()
vd = bc.vis_data()
gw = bc.gateway_list()
for x in vd:
print(x)
print(gw)
print(bc.gateway_mode())

35
d3mapbuilder.py Normal file
View File

@ -0,0 +1,35 @@
import json
import datetime
class D3MapBuilder:
def __init__(self, db):
self._db = db
def build(self):
output = dict()
now = datetime.datetime.utcnow().replace(microsecond=0)
nodes = self._db.get_nodes()
output['nodes'] = [{'name': x.name, 'id': x.id,
'macs': ', '.join(x.macs),
'geo': [float(x) for x in x.gps.split(" ")] if x.gps else None,
'firmware': x.firmware,
'flags': x.flags
} for x in nodes]
links = self._db.get_links()
output['links'] = [{'source': x.source.id, 'target': x.target.id,
'quality': x.quality,
'type': x.type,
'id': x.id
} for x in links]
output['meta'] = {
'timestamp': now.isoformat()
}
return json.dumps(output)

93
ffhlwiki.py Executable file
View File

@ -0,0 +1,93 @@
#!/usr/bin/env python3
import json
import argparse
from itertools import zip_longest
from urllib.request import urlopen
from bs4 import BeautifulSoup
def import_wikigps(url):
def fetch_wikitable(url):
f = urlopen(url)
soup = BeautifulSoup(f)
table = soup.find_all("table")[0]
rows = table.find_all("tr")
headers = []
data = []
def maybe_strip(x):
if isinstance(x.string, str):
return x.string.strip()
else:
return ""
for row in rows:
tds = list([maybe_strip(x) for x in row.find_all("td")])
ths = list([maybe_strip(x) for x in row.find_all("th")])
if any(tds):
data.append(tds)
if any(ths):
headers = ths
nodes = []
for d in data:
nodes.append(dict(zip(headers, d)))
return nodes
nodes = fetch_wikitable(url)
aliases = {}
for node in nodes:
try:
node['MAC'] = node['MAC'].split(',')
except KeyError:
pass
try:
node['GPS'] = node['GPS'].split(',')
except KeyError:
pass
try:
node['Knotenname'] = node['Knotenname'].split(',')
except KeyError:
pass
nodes = zip_longest(node['MAC'], node['GPS'], node['Knotenname'])
for data in nodes:
alias = {}
mac = data[0].strip()
if data[1]:
alias['gps'] = data[1].strip()
if data[2]:
alias['name'] = data[2].strip()
aliases[mac] = alias
return aliases
parser = argparse.ArgumentParser()
parser.add_argument('url', help='wiki URL')
args = parser.parse_args()
options = vars(args)
aliases = import_wikigps(options['url'])
print(json.dumps(aliases))

View File

@ -1,52 +0,0 @@
{
"meta": {
"timestamp": $nodes.timestamp
},
"nodes": (
$graph.batadv.nodes
| map(
if has("node_id") and .node_id
then (
$nodes.nodes[.node_id] as $node
| {
"id": .id,
"uptime": $node.statistics.uptime,
"flags": ($node.flags + {"client": false}),
"name": $node.nodeinfo.hostname,
"clientcount": (if $node.statistics.clients >= 0 then $node.statistics.clients else 0 end),
"hardware": $node.nodeinfo.hardware.model,
"firmware": $node.nodeinfo.software.firmware.release,
"geo": (if $node.nodeinfo.location then [$node.nodeinfo.location.latitude, $node.nodeinfo.location.longitude] else null end),
#"lastseen": $node.lastseen,
"network": $node.nodeinfo.network
}
)
else
{
"flags": {},
"id": .id,
"geo": null,
"clientcount": 0
}
end
)
),
"links": (
$graph.batadv.links
| map(
$graph.batadv.nodes[.source].node_id as $source_id
| $graph.batadv.nodes[.target].node_id as $target_id
| select(
$source_id and $target_id and
($nodes.nodes | (has($source_id) and has($target_id)))
)
| {
"target": .target,
"source": .source,
"quality": "\(.tq), \(.tq)",
"id": ($source_id + "-" + $target_id),
"type": (if .vpn then "vpn" else null end)
}
)
)
}

View File

@ -1,185 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Knotendaten manuell ändern
Das ist z.B. für ausgeschaltete Knoten interessant, die nur
temporär nicht zur Verfügung stehen. Die können ausgeblendet
werden.
Das ist besser als löschen, weil so die Statistik nicht
verschwindet
Änderungsprotokoll
==================
Version Datum Änderung(en) von
-------- ----------- ------------------------------------------------------ ----
1.0 2017-08-03 Programm in das ffmap-backend Projekt integriert tho
"""
import argparse
import configparser
import json
import os
import sys
import glob
# Einstellungen werden in folgender Reihenfolge verarbeitet
# später gesetzte Werte überschreiben frühere
# 1. im Programm hart codiert
# 2. aus der zentralen Konfigurationsdatei gelesen
# 3. als Kommandozeilenoptionen angegeben
cfg = {
'cfgfile': '/etc/ffmap/ffmap.cfg',
'logfile': '/var/log/ffmap.log',
'loglevel': 2,
'dest_dir': '/var/lib/ffmap/mapdata',
'nodedb': '/var/lib/ffmap/nodedb',
'imgpath': '/var/www/meshviewer/stats/img'
}
roles_defined = ('node', 'temp', 'mobile', 'offloader', 'service', 'test', 'gate', 'plan', 'hidden')
def main(cfg):
# Pfade zu den beteiligten Dateien
nodes_fn = os.path.join(cfg['dest_dir'], 'nodes.json')
nodelist_fn = os.path.join(cfg['dest_dir'], 'nodelist.json')
# 1. Knotendaten (NodeDB)
# 1.1 Daten laden
try:
with open(nodes_fn, 'r') as nodedb_handle:
nodedb = json.load(nodedb_handle)
except IOError:
print("Error reading nodedb file %s" % nodes_fn)
nodedb = {'nodes': dict()}
# 1.2 Knoten bearbeiten
changed = False
for n in cfg['nodeid']:
if n in nodedb['nodes']:
print("Modify %s in nodedb" % n)
if 'role' in cfg and cfg['role'] in roles_defined:
try:
oldrole = nodedb['nodes'][n]['nodeinfo']['system']['role']
except KeyError:
oldrole = '<unset>'
print(" - change role from '%s' to '%s'" % (oldrole, cfg['role']))
nodedb['nodes'][n]['nodeinfo']['system']['role'] = cfg['role']
changed = True
if 'location' in cfg:
print(" - remove location")
# del nodedb['nodes'][n]['nodeinfo']['location']
changed = True
else:
print("Node %s not found in nodedb" % n)
# 1.3 Geänderte Daten zurückschreiben
if changed:
try:
with open(nodes_fn, 'w') as nodedb_handle:
json.dump(nodedb, nodedb_handle)
except IOError:
print("Error writing nodedb file %s" % nodes_fn)
# 2. Knotenliste (NodeList)
try:
with open(nodelist_fn, 'r') as nodelist_handle:
nodelist = json.load(nodelist_handle)
except IOError:
print("Error reading nodelist file %s" % nodelist_fn)
nodelist = {'nodelist': dict()}
# 2.1 Knoten bearbeiten
changed = False
ixlist = []
for nodeid in cfg['nodeid']:
found = False
for ix, node in enumerate(nodelist['nodes']):
if node['id'] == nodeid:
found = True
break
if found:
print("Modify %s in nodelist" % nodeid)
if 'role' in cfg and cfg['role'] in roles_defined:
try:
oldrole = nodelist['nodes'][ix]['role']
except KeyError:
oldrole = '<unset>'
print(" - change role from '%s' to '%s'" % (oldrole, cfg['role']))
nodelist['nodes'][ix]['role'] = cfg['role']
if 'location' in cfg:
print(" - remove location")
try:
#del nodelist['nodes'][ix]['position']
pass
except KeyError:
pass
changed = True
else:
print ("Node %s not found in nodelist" % nodeid)
# 2.3 Geänderte Daten zurückschreiben
if changed:
try:
with open(nodelist_fn, 'w') as nodelist_handle:
json.dump(nodelist, nodelist_handle)
except IOError:
print("Error writing nodelist file %s" % nodelist_fn)
if __name__ == "__main__":
# Optionen von der Kommandozeile lesen
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', action='store',
help='Configuration file')
parser.add_argument('-d', '--dest-dir', action='store',
help='Directory with JSON data files',
required=False)
parser.add_argument('-i', '--nodeid', metavar='ID', action='store',
nargs='+', required=True,
help='Node id to modify')
parser.add_argument('-l', '--location', action='store_true',
help='Clear location information (hides node)',
required=False)
parser.add_argument('-r', '--role', action='store',
help='Set new role',
required=False)
# TODO
# Optionen was genau gemacht werden soll
# -p Position entfernen, Knoten wird nicht mehr angezeigt
# -r <rolle> Rolle einstellen
options = vars(parser.parse_args())
# Konfigurationsdatei einlesen
if options['config']:
cfg['cfgfile'] = options['config']
config = configparser.ConfigParser(cfg)
# config.read liefert eine Liste der geparsten Dateien
# zurück. Wenn sie leer ist, war z.B. die Datei nicht
# vorhanden
if config.read(cfg['cfgfile']):
if 'global' in config:
cfg['logfile'] = config['global']['logfile']
cfg['loglevel'] = config['global']['loglevel']
cfg['dest_dir'] = config['global']['dest_dir']
else:
print('Config file %s not parsed' % cfg['cfgfile'])
# Optionen von der Kommandozeile haben höchste Priorität
cfg['nodeid'] = options['nodeid']
if options['dest_dir']:
cfg['dest_dir'] = options['dest_dir']
if options['location']:
cfg['location'] = True
if options['role']:
cfg['role'] = options['role']
# Alles initialisiert, auf geht's
main(cfg)

View File

@ -1,225 +0,0 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Lösche einen Knoten manuell aus dem Backend:
- JSON
- NodeDB
- NodeList
- Graph
- RRD-Dateien
- Bilder vom Webserver
Änderungsprotokoll
==================
Version Datum Änderung(en) von
-------- ----------- ------------------------------------------------------ ----
1.0 2017-01-06 Programm in das ffmap-backend Projekt integriert tho
"""
import argparse
import configparser
import json
import os
import sys
import glob
# Einstellungen werden in folgender Reihenfolge verarbeitet
# später gesetzte Werte überschreiben frühere
# 1. im Programm hart codiert
# 2. aus der zentralen Konfigurationsdatei gelesen
# 3. als Kommandozeilenoptionen angegeben
cfg = {
'cfgfile': '/etc/ffmap/ffmap.cfg',
'logfile': '/var/log/ffmap.log',
'loglevel': 2,
'dest_dir': '/var/lib/ffmap/mapdata',
'nodedb': '/var/lib/ffmap/nodedb',
'imgpath': '/var/www/meshviewer/stats/img'
}
def main(cfg):
# Pfade zu den beteiligten Dateien
nodes_fn = os.path.join(cfg['dest_dir'], 'nodes.json')
graph_fn = os.path.join(cfg['dest_dir'], 'graph.json')
nodelist_fn = os.path.join(cfg['dest_dir'], 'nodelist.json')
# 1. Knotendaten (NodeDB) bereinigen
# 1.1 Daten laden
try:
with open(nodes_fn, 'r') as nodedb_handle:
nodedb = json.load(nodedb_handle)
except IOError:
print("Error reading nodedb file %s" % nodes_fn)
nodedb = {'nodes': dict()}
# 1.2 Knoten entfernen
changed = False
for n in cfg['nodeid']:
if n in nodedb['nodes']:
print("Remove %s from nodedb" % n)
del nodedb['nodes'][n]
changed = True
else:
print("Node %s not found in nodedb" % n)
# 1.3 Geänderte Daten zurückschreiben
if changed:
try:
with open(nodes_fn, 'w') as nodedb_handle:
json.dump(nodedb, nodedb_handle)
except IOError:
print("Error writing nodedb file %s" % nodes_fn)
# 2. Knotenliste (NodeList) bereinigen
try:
with open(nodelist_fn, 'r') as nodelist_handle:
nodelist = json.load(nodelist_handle)
except IOError:
print("Error reading nodelist file %s" % nodelist_fn)
nodelist = {'nodelist': dict()}
# 2.1 Knoten entfernen
changed = False
ixlist = []
for nodeid in cfg['nodeid']:
found = False
for ix, node in enumerate(nodelist['nodes']):
if node['id'] == nodeid:
found = True
break
if found:
print("Remove %s from nodelist" % nodeid)
del nodelist['nodes'][ix]
changed = True
else:
print ("Node %s not found in nodelist" % nodeid)
# 2.3 Geänderte Daten zurückschreiben
if changed:
try:
with open(nodelist_fn, 'w') as nodelist_handle:
json.dump(nodelist, nodelist_handle)
except IOError:
print("Error writing nodelist file %s" % nodelist_fn)
# 3. Graph (NodeGraph) bereinigen
# 3.1 Graph laden
try:
with open(graph_fn, 'r') as graph_handle:
graph = json.load(graph_handle)
except IOError:
print("Error reading graph file %s" % graph_fn)
graph = {'graph': dict()}
# 3.2 Finde Knoten und Links
# Nodes und Links gehören zusammen
changed = False
for nodeid in cfg['nodeid']:
found = False
for ixn, node in enumerate(graph["batadv"]["nodes"]):
# Es kann nodes ohne "node_id" geben
try:
if node["node_id"] == nodeid:
found = True
break
except KeyError:
pass
if found:
print("Found %s in graph nodes at index %d" % (nodeid, ixn))
del graph["batadv"]["nodes"][ixn]
# Suche Link source oder target dem gefundenen Index entsprechen
ixlist = []
for ixg, link in enumerate(graph["batadv"]["links"]):
if link["source"] == ixn:
print("Found source link at index %d" % ixg)
print(" -> %s" % graph["batadv"]["nodes"][link["target"]])
ixlist.append(ixg)
if link["target"] == ixn:
print("Found target link at index %d" % ixg)
print(" -> %s" % graph["batadv"]["nodes"][link["source"]])
ixlist.append(ixg)
for ix in ixlist:
del graph["batadv"]["nodes"][ix]
changed = True
else:
print("Node %s not found in graph nodes" % nodeid)
# 3.3 Zurückschreiben der geänderten Daten
if changed:
try:
with open(graph_fn, 'w') as graph_handle:
json.dump(graph, graph_handle)
except IOError:
print("Error writing graph file %s" % graph_fn)
# 4. Entferne RRD-Dateien
for nodeid in cfg['nodeid']:
rrdfile = os.path.join(cfg['nodedb'], nodeid+'.rrd')
if os.path.isfile(rrdfile):
print("Removing RRD database file %s" % os.path.basename(rrdfile))
else:
print("RRD database file %s not found" % os.path.basename(rrdfile))
try:
os.remove(rrdfile)
except OSError:
pass
# 5. Entferne Bilder vom Webserver
count_deleted = 0
for nodeid in cfg['nodeid']:
for imagefile in glob.glob(os.path.join(cfg['imgpath'], nodeid+'_*.png')):
print("Removing stats image %s" % os.path.basename(imagefile))
try:
os.remove(imagefile)
count_deleted += 1
except OSError:
pass
if count_deleted == 0:
print("No stats images found in %s" % cfg['imgpath'])
if __name__ == "__main__":
# Optionen von der Kommandozeile lesen
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', action='store',
help='Configuration file')
parser.add_argument('-d', '--dest-dir', action='store',
help='Directory with JSON data files',
required=False)
parser.add_argument('-i', '--nodeid', metavar='ID', action='store',
nargs='+', required=True,
help='Node id to remove')
parser.add_argument('-n', '--nodedb', metavar='RRD_DIR', action='store',
help='Directory for node RRD data files')
options = vars(parser.parse_args())
# Konfigurationsdatei einlesen
if options['config']:
cfg['cfgfile'] = options['config']
config = configparser.ConfigParser(cfg)
# config.read liefert eine Liste der geparsten Dateien
# zurück. Wenn sie leer ist, war z.B. die Datei nicht
# vorhanden
if config.read(cfg['cfgfile']):
if 'global' in config:
cfg['logfile'] = config['global']['logfile']
cfg['loglevel'] = config['global']['loglevel']
cfg['dest_dir'] = config['global']['dest_dir']
if 'rrd' in config:
cfg['nodedb'] = config['rrd']['nodedb']
else:
print('Config file %s not parsed' % cfg['cfgfile'])
# Optionen von der Kommandozeile haben höchste Priorität
cfg['nodeid'] = options['nodeid']
if options['dest_dir']:
cfg['dest_dir'] = options['dest_dir']
if options['nodedb']:
cfg['nodedb'] = options['nodedb']
# Alles initialisiert, auf geht's
main(cfg)

13
hostid.py Normal file
View File

@ -0,0 +1,13 @@
import re
from functools import reduce
def mac_to_hostid(mac):
int_mac = list(map(lambda x: int(x, 16), mac.split(":")))
int_mac[0] ^= 2
bytes = map(lambda x: "%02x" % x, int_mac[0:3] + [0xff, 0xfe] + int_mac[3:])
return reduce(lambda a, i:
[a[0] + ("" if i == 0 else ":") + a[1] + a[2]] + a[3:],
range(0, 4),
[""] + list(bytes)
)

View File

@ -1,74 +0,0 @@
"""
RRD for gateways
"""
import os
import subprocess
from lib.RRD import DS, RRA, RRD
class GateRRD(RRD):
ds_list = [
DS('upstate', 'GAUGE', 120, 0, 1),
DS('clients', 'GAUGE', 120, 0, float('NaN')),
DS('loadavg', 'GAUGE', 120, 0, float('NaN')),
DS('leases', 'GAUGE', 120, 0, float('NaN')),
]
rra_list = [
RRA('AVERAGE', 0.5, 1, 120), # 2 hours of 1 minute samples
RRA('AVERAGE', 0.5, 5, 1440), # 5 days of 5 minute samples
RRA('AVERAGE', 0.5, 15, 672), # 7 days of 15 minute samples
RRA('AVERAGE', 0.5, 60, 720), # 30 days of 1 hour samples
RRA('AVERAGE', 0.5, 720, 730), # 1 year of 12 hour samples
]
def __init__(self, filename, node=None):
"""
Create a new RRD for a given node.
If the RRD isn't supposed to be updated, the node can be omitted.
"""
self.node = node
super().__init__(filename)
self.ensure_sanity(self.ds_list, self.rra_list, step=60)
@property
def imagename(self):
return "{basename}.png".format(
basename=os.path.basename(self.filename).rsplit('.', 2)[0])
# TODO: fix this, python does not support function overloading
def update(self):
values = {
'upstate': int(self.node['flags']['online']),
'clients': float(self.node['statistics']['clients']),
}
if 'loadavg' in self.node['statistics']:
values['loadavg'] = float(self.node['statistics'].get('loadavg', 0))
# Gateways can send the peer count. We use the clients field to store data
if 'peers' in self.node['statistics']:
values['clients'] = self.node['statistics']['peers']
if 'leases' in self.node['statistics']:
values['leases'] = self.node['statistics']['leases']
super().update(values)
def graph(self, directory, timeframe):
"""
Create a graph in the given directory. The file will be named
basename.png if the RRD file is named basename.rrd
"""
args = ['rrdtool', 'graph', os.path.join(directory, self.imagename),
'-s', '-' + timeframe,
'-w', '800',
'-h', '400',
'-l', '0',
'-y', '1:1',
'DEF:clients=' + self.filename + ':clients:AVERAGE',
'VDEF:maxc=clients,MAXIMUM',
'CDEF:c=0,clients,ADDNAN',
'CDEF:d=clients,UN,maxc,UN,1,maxc,IF,*',
'AREA:c#0F0:up\\l',
'AREA:d#F00:down\\l',
'LINE1:c#00F:clients connected\\l']
subprocess.check_output(args)

View File

@ -1,40 +0,0 @@
import os
import subprocess
from lib.RRD import DS, RRA, RRD
class GlobalRRD(RRD):
ds_list = [
# Number of nodes available
DS('nodes', 'GAUGE', 120, 0, float('NaN')),
# Number of client available
DS('clients', 'GAUGE', 120, 0, float('NaN')),
]
rra_list = [
# 2 hours of 1 minute samples
RRA('AVERAGE', 0.5, 1, 120),
# 31 days of 1 hour samples
RRA('AVERAGE', 0.5, 60, 744),
# ~5 years of 1 day samples
RRA('AVERAGE', 0.5, 1440, 1780),
]
def __init__(self, directory):
super().__init__(os.path.join(directory, "nodes.rrd"))
self.ensure_sanity(self.ds_list, self.rra_list, step=60)
# TODO: fix this, python does not support function overloading
def update(self, node_count, client_count):
super().update({'nodes': node_count, 'clients': client_count})
def graph(self, filename, timeframe):
args = ["rrdtool", 'graph', filename,
'-s', '-' + timeframe,
'-w', '800',
'-h' '400',
'DEF:nodes=' + self.filename + ':nodes:AVERAGE',
'LINE1:nodes#F00:nodes\\l',
'DEF:clients=' + self.filename + ':clients:AVERAGE',
'LINE2:clients#00F:clients']
subprocess.check_output(args)

View File

@ -1,67 +0,0 @@
"""
RRD for nodes
"""
import os
import subprocess
from lib.RRD import DS, RRA, RRD
class NodeRRD(RRD):
ds_list = [
DS('upstate', 'GAUGE', 120, 0, 1),
DS('clients', 'GAUGE', 120, 0, float('NaN')),
DS('loadavg', 'GAUGE', 120, 0, float('NaN')),
]
rra_list = [
RRA('AVERAGE', 0.5, 1, 120), # 2 hours of 1 minute samples
RRA('AVERAGE', 0.5, 5, 1440), # 5 days of 5 minute samples
RRA('AVERAGE', 0.5, 60, 720), # 30 days of 1 hour samples
RRA('AVERAGE', 0.5, 720, 730), # 1 year of 12 hour samples
]
def __init__(self, filename, node=None):
"""
Create a new RRD for a given node.
If the RRD isn't supposed to be updated, the node can be omitted.
"""
self.node = node
super().__init__(filename)
self.ensure_sanity(self.ds_list, self.rra_list, step=60)
@property
def imagename(self):
return "{basename}.png".format(
basename=os.path.basename(self.filename).rsplit('.', 2)[0])
# TODO: fix this, python does not support function overloading
def update(self):
values = {
'upstate': int(self.node['flags']['online']),
'clients': self.node['statistics']['clients']
}
if 'loadavg' in self.node['statistics']:
values['loadavg'] = float(self.node['statistics']['loadavg'])
super().update(values)
def graph(self, directory, timeframe):
"""
Create a graph in the given directory. The file will be named
basename.png if the RRD file is named basename.rrd
"""
args = ['rrdtool', 'graph', os.path.join(directory, self.imagename),
'-s', '-' + timeframe,
'-w', '800',
'-h', '400',
'-l', '0',
'-y', '1:1',
'DEF:clients=' + self.filename + ':clients:AVERAGE',
'VDEF:maxc=clients,MAXIMUM',
'CDEF:c=0,clients,ADDNAN',
'CDEF:d=clients,UN,maxc,UN,1,maxc,IF,*',
'AREA:c#0F0:up\\l',
'AREA:d#F00:down\\l',
'LINE1:c#00F:clients connected\\l']
subprocess.check_output(args)

View File

@ -1,346 +0,0 @@
import subprocess
import re
import os
from operator import xor, eq
from functools import reduce
from itertools import starmap
import math
class RRDIncompatibleException(Exception):
"""
Is raised when an RRD doesn't have the desired definition and cannot be
upgraded to it.
"""
pass
class RRDOutdatedException(Exception):
"""
Is raised when an RRD doesn't have the desired definition, but can be
upgraded to it.
"""
pass
if not hasattr(__builtins__, "FileNotFoundError"):
class FileNotFoundError(Exception):
pass
class RRD(object):
"""
An RRD is a Round Robin Database, a database which forgets old data and
aggregates multiple records into new ones.
It contains multiple Data Sources (DS) which can be thought of as columns,
and Round Robin Archives (RRA) which can be thought of as tables with the
DS as columns and time-dependant rows.
"""
# rra[2].cdp_prep[0].value = 1,8583033333e+03
_info_regex = re.compile("""
(?P<section>[a-z_]+)
\[ (?P<key>[a-zA-Z0-9_]+) \]
\.
|
(?P<name>[a-z_]+)
\s*=\s*
"? (?P<value>.*?) "?
$""", re.X)
_cached_info = None
def _exec_rrdtool(self, cmd, *args, **kwargs):
pargs = ["rrdtool", cmd, self.filename]
for k, v in kwargs.items():
pargs.extend(["--" + k, str(v)])
pargs.extend(args)
subprocess.check_output(pargs)
def __init__(self, filename):
self.filename = filename
def ensure_sanity(self, ds_list, rra_list, **kwargs):
"""
Create or upgrade the RRD file if necessary to contain all DS in
ds_list. If it needs to be created, the RRAs in rra_list and any kwargs
will be used for creation. Note that RRAs and options of an existing
database are NOT modified!
"""
try:
self.check_sanity(ds_list)
except FileNotFoundError:
self.create(ds_list, rra_list, **kwargs)
except RRDOutdatedException:
self.upgrade(ds_list)
def check_sanity(self, ds_list=()):
"""
Check if the RRD file exists and contains (at least) the DS listed in
ds_list.
"""
if not os.path.exists(self.filename):
raise FileNotFoundError(self.filename)
info = self.info()
if set(ds_list) - set(info['ds'].values()) != set():
for ds in ds_list:
if ds.name in info['ds'] and\
ds.type != info['ds'][ds.name].type:
raise RRDIncompatibleException(
"{} is {} but should be {}".format(
ds.name, ds.type, info['ds'][ds.name].type))
else:
raise RRDOutdatedException()
def upgrade(self, dss):
"""
Upgrade the DS definitions (!) of this RRD.
(To update its values, use update())
The list dss contains DSS objects to be updated or added. The
parameters of a DS can be changed, but not its type. New DS are always
added at the end in the order of their appearance in the list.
This is done internally via an rrdtool dump -> rrdtool restore and
modifying the dump on the fly.
"""
info = self.info()
new_ds = list(info['ds'].values())
new_ds.sort(key=lambda ds: ds.index)
for ds in dss:
if ds.name in info['ds']:
old_ds = info['ds'][ds.name]
if info['ds'][ds.name].type != ds.type:
raise RuntimeError(
"Cannot convert existing DS '{}'"
"from type '{}' to '{}'".format(
ds.name, old_ds.type, ds.type))
ds.index = old_ds.index
new_ds[ds.index] = ds
else:
ds.index = len(new_ds)
new_ds.append(ds)
added_ds_num = len(new_ds) - len(info['ds'])
dump = subprocess.Popen(
["rrdtool", "dump", self.filename],
stdout=subprocess.PIPE)
restore = subprocess.Popen(
["rrdtool", "restore", "-", self.filename + ".new"],
stdin=subprocess.PIPE)
echo = True
ds_definitions = True
for line in dump.stdout:
if ds_definitions and b'<ds>' in line:
echo = False
if b'<!-- Round Robin Archives -->' in line:
ds_definitions = False
for ds in new_ds:
restore.stdin.write(bytes("""
<ds>
<name> %s </name>
<type> %s </type>
<minimal_heartbeat>%i</minimal_heartbeat>
<min>%s</min>
<max>%s</max>
<!-- PDP Status -->
<last_ds>%s</last_ds>
<value>%s</value>
<unknown_sec> %i </unknown_sec>
</ds>
""" % (ds.name,
ds.type,
ds.args[0],
ds.args[1],
ds.args[2],
ds.last_ds,
ds.value,
ds.unknown_sec), "utf-8"))
if b'</cdp_prep>' in line:
restore.stdin.write(added_ds_num * b"""
<ds>
<primary_value> NaN </primary_value>
<secondary_value> NaN </secondary_value>
<value> NaN </value>
<unknown_datapoints> 0 </unknown_datapoints>
</ds>
""")
# echoing of input line
if echo:
restore.stdin.write(
line.replace(
b'</row>',
(added_ds_num * b'<v>NaN</v>') + b'</row>'
)
)
if ds_definitions and b'</ds>' in line:
echo = True
dump.stdout.close()
restore.stdin.close()
dump.wait()
restore.wait()
os.rename(self.filename + ".new", self.filename)
self._cached_info = None
def create(self, ds_list, rra_list, **kwargs):
"""
Create a new RRD file with the specified list of RRAs and DSs.
Any kwargs are passed as --key=value to rrdtool create.
"""
self._exec_rrdtool(
"create",
*map(str, rra_list + ds_list),
**kwargs
)
self._cached_info = None
def update(self, V):
"""
Update the RRD with new values V.
V can be either list or dict:
* If it is a dict, its keys must be DS names in the RRD and it is
ensured that the correct DS are updated with the correct values, by
passing a "template" to rrdtool update (see man rrdupdate).
* If it is a list, no template is generated and the order of the
values in V must be the same as that of the DS in the RRD.
"""
try:
args = ['N:' + ':'.join(map(str, V.values()))]
kwargs = {'template': ':'.join(V.keys())}
except AttributeError:
args = ['N:' + ':'.join(map(str, V))]
kwargs = {}
self._exec_rrdtool("update", *args, **kwargs)
self._cached_info = None
def info(self):
"""
Return a dictionary with information about the RRD.
See `man rrdinfo` for more details.
"""
if self._cached_info:
return self._cached_info
env = os.environ.copy()
env["LC_ALL"] = "C"
proc = subprocess.Popen(
["rrdtool", "info", self.filename],
stdout=subprocess.PIPE,
env=env
)
out, err = proc.communicate()
out = out.decode()
info = {}
for line in out.splitlines():
base = info
for match in self._info_regex.finditer(line):
section, key, name, value = match.group(
"section", "key", "name", "value")
if section and key:
try:
key = int(key)
except ValueError:
pass
if section not in base:
base[section] = {}
if key not in base[section]:
base[section][key] = {}
base = base[section][key]
if name and value:
try:
base[name] = int(value)
except ValueError:
try:
base[name] = float(value)
except:
base[name] = value
dss = {}
for name, ds in info['ds'].items():
ds_obj = DS(name, ds['type'], ds['minimal_heartbeat'],
ds['min'], ds['max'])
ds_obj.index = ds['index']
ds_obj.last_ds = ds['last_ds']
ds_obj.value = ds['value']
ds_obj.unknown_sec = ds['unknown_sec']
dss[name] = ds_obj
info['ds'] = dss
rras = []
for rra in info['rra'].values():
rras.append(RRA(rra['cf'], rra['xff'],
rra['pdp_per_row'], rra['rows']))
info['rra'] = rras
self._cached_info = info
return info
class DS(object):
"""
DS stands for Data Source and represents one line of data points in a Round
Robin Database (RRD).
"""
name = None
type = None
args = []
index = -1
last_ds = 'U'
value = 0
unknown_sec = 0
def __init__(self, name, dst, *args):
self.name = name
self.type = dst
self.args = args
def __str__(self):
return "DS:%s:%s:%s" % (
self.name,
self.type,
":".join(map(str, self._nan_to_u_args()))
)
def __repr__(self):
return "%s(%r, %r, %s)" % (
self.__class__.__name__,
self.name,
self.type,
", ".join(map(repr, self.args))
)
def __eq__(self, other):
return all(starmap(eq, zip(self.compare_keys(), other.compare_keys())))
def __hash__(self):
return reduce(xor, map(hash, self.compare_keys()))
def _nan_to_u_args(self):
return tuple(
'U' if type(arg) is float and math.isnan(arg)
else arg
for arg in self.args
)
def compare_keys(self):
return self.name, self.type, self._nan_to_u_args()
class RRA(object):
def __init__(self, cf, *args):
self.cf = cf
self.args = args
def __str__(self):
return "RRA:%s:%s" % (self.cf, ":".join(map(str, self.args)))
def __repr__(self):
return "%s(%r, %s)" % (
self.__class__.__name__,
self.cf,
", ".join(map(repr, self.args))
)

View File

@ -1 +0,0 @@
__author__ = 'hexa'

View File

@ -1,37 +0,0 @@
import subprocess
import json
import os
class Alfred(object):
"""
Bindings for the alfred-json utility
"""
def __init__(self, unix_sockpath=None):
self.unix_sock = unix_sockpath
if unix_sockpath is not None and not os.path.exists(unix_sockpath):
raise RuntimeError('alfred: invalid unix socket path given')
def _fetch(self, data_type):
cmd = ['alfred-json',
'-z',
'-f', 'json',
'-r', str(data_type)]
if self.unix_sock:
cmd.extend(['-s', self.unix_sock])
# There should not be any warnings which would be sent by cron
# every minute. Therefore suppress error output of called program
FNULL = open(os.devnull, 'w')
output = subprocess.check_output(cmd, stderr=FNULL)
FNULL.close()
return json.loads(output.decode("utf-8")).values()
def nodeinfo(self):
return self._fetch(158)
def statistics(self):
return self._fetch(159)
def vis(self):
return self._fetch(160)

View File

@ -1,101 +0,0 @@
import subprocess
import json
import os
import re
class Batman(object):
"""
Bindings for B.A.T.M.A.N. Advanced
commandline interface "batctl"
"""
def __init__(self, mesh_interface='bat0', alfred_sockpath=None):
self.mesh_interface = mesh_interface
self.alfred_sock = alfred_sockpath
# ensure /usr/sbin and /usr/local/sbin are in PATH
env = os.environ
path = set(env['PATH'].split(':'))
path.add('/usr/sbin/')
path.add('/usr/local/sbin')
env['PATH'] = ':'.join(path)
self.environ = env
# compile regular expressions only once on startup
self.mac_addr_pattern = re.compile(r'(([a-f0-9]{2}:){5}[a-f0-9]{2})')
def vis_data(self):
return self.vis_data_batadv_vis()
@staticmethod
def vis_data_helper(lines):
vd_tmp = []
for line in lines:
try:
utf8_line = line.decode('utf-8')
vd_tmp.append(json.loads(utf8_line))
except UnicodeDecodeError:
pass
return vd_tmp
def vis_data_batadv_vis(self):
"""
Parse "batadv-vis -i <mesh_interface> -f json"
into an array of dictionaries.
"""
cmd = ['batadv-vis', '-i', self.mesh_interface, '-f', 'json']
if self.alfred_sock:
cmd.extend(['-u', self.alfred_sock])
output = subprocess.check_output(cmd, env=self.environ)
lines = output.splitlines()
return self.vis_data_helper(lines)
def gateway_list(self):
"""
Parse "batctl meshif <mesh_interface> gwl -n"
into an array of dictionaries.
"""
cmd = ['batctl', 'meshif', self.mesh_interface, 'gwl', '-n']
if os.geteuid() > 0:
cmd.insert(0, 'sudo')
output = subprocess.check_output(cmd, env=self.environ)
output_utf8 = output.decode('utf-8')
rows = output_utf8.splitlines()
gateways = []
# local gateway
header = rows.pop(0)
mode, bandwidth = self.gateway_mode()
if mode == 'server':
local_gw_mac = self.mac_addr_pattern.search(header).group(0)
gateways.append(local_gw_mac)
# remote gateway(s)
for row in rows:
match = self.mac_addr_pattern.search(row)
if match:
gateways.append(match.group(1))
return gateways
def gateway_mode(self):
"""
Parse "batctl meshif <mesh_interface> gw"
return: tuple mode, bandwidth, if mode != server then bandwidth is None
"""
cmd = ['batctl', 'meshif', self.mesh_interface, 'gw']
if os.geteuid() > 0:
cmd.insert(0, 'sudo')
output = subprocess.check_output(cmd, env=self.environ)
chunks = output.decode("utf-8").split()
return chunks[0], chunks[3] if 3 in chunks else None
if __name__ == "__main__":
bc = Batman()
vd = bc.vis_data()
gw = bc.gateway_list()
for x in vd:
print(x)
print(gw)
print(bc.gateway_mode())

View File

@ -1,85 +0,0 @@
from functools import reduce
from itertools import chain
import networkx as nx
from lib.nodes import build_mac_table
def import_vis_data(graph, nodes, vis_data):
macs = build_mac_table(nodes)
nodes_a = map(lambda d: 2 * [d['primary']],
filter(lambda d: 'primary' in d, vis_data))
nodes_b = map(lambda d: [d['secondary'], d['of']],
filter(lambda d: 'secondary' in d, vis_data))
graph.add_nodes_from(map(lambda a, b:
(a, dict(primary=b, node_id=macs.get(b))),
*zip(*chain(nodes_a, nodes_b))))
edges = filter(lambda d: 'neighbor' in d, vis_data)
graph.add_edges_from(map(lambda d: (d['router'], d['neighbor'],
dict(tq=float(d['label']))), edges))
def mark_vpn(graph, vpn_macs):
components = map(frozenset, nx.weakly_connected_components(graph))
components = filter(vpn_macs.intersection, components)
nodes = reduce(lambda a, b: a | b, components, set())
for node in nodes:
for k, v in graph[node].items():
v['vpn'] = True
def to_multigraph(graph):
def f(a):
node = graph.node[a]
return node['primary'] if node else a
def map_node(node, data):
return (data['primary'],
dict(node_id=data['node_id'])) if data else (node, dict())
digraph = nx.MultiDiGraph()
digraph.add_nodes_from(map(map_node, *zip(*graph.nodes_iter(data=True))))
digraph.add_edges_from(map(lambda a, b, data: (f(a), f(b), data),
*zip(*graph.edges_iter(data=True))))
return digraph
def merge_nodes(graph):
def merge_edges(data):
tq = min(map(lambda d: d['tq'], data))
vpn = all(map(lambda d: d.get('vpn', False), data))
return dict(tq=tq, vpn=vpn)
multigraph = to_multigraph(graph)
digraph = nx.DiGraph()
digraph.add_nodes_from(multigraph.nodes_iter(data=True))
edges = chain.from_iterable([[(e, d, merge_edges(
multigraph[e][d].values()))
for d in multigraph[e]] for e in multigraph])
digraph.add_edges_from(edges)
return digraph
def to_undirected(graph):
multigraph = nx.MultiGraph()
multigraph.add_nodes_from(graph.nodes_iter(data=True))
multigraph.add_edges_from(graph.edges_iter(data=True))
def merge_edges(data):
tq = max(map(lambda d: d['tq'], data))
vpn = all(map(lambda d: d.get('vpn', False), data))
return dict(tq=tq, vpn=vpn, bidirect=len(data) == 2)
graph = nx.Graph()
graph.add_nodes_from(multigraph.nodes_iter(data=True))
edges = chain.from_iterable([[(e, d, merge_edges(
multigraph[e][d].values()))
for d in multigraph[e]] for e in multigraph])
graph.add_edges_from(edges)
return graph

View File

@ -1,32 +0,0 @@
def export_nodelist(now, nodedb):
nodelist = list()
for node_id, node in nodedb["nodes"].items():
node_out = dict()
node_out["id"] = node_id
node_out["name"] = node["nodeinfo"]["hostname"]
if "location" in node["nodeinfo"]:
node_out["position"] = {"lat": node["nodeinfo"]["location"]["latitude"],
"long": node["nodeinfo"]["location"]["longitude"]}
node_out["status"] = dict()
node_out["status"]["online"] = node["flags"]["online"]
if "firstseen" in node:
node_out["status"]["firstcontact"] = node["firstseen"]
if "lastseen" in node:
node_out["status"]["lastcontact"] = node["lastseen"]
if "clients" in node["statistics"]:
node_out["status"]["clients"] = node["statistics"]["clients"]
if "role" in node["nodeinfo"]["system"]:
node_out["role"] = node["nodeinfo"]["system"]["role"]
else:
node_out["role"] = "node"
nodelist.append(node_out)
return {"version": "1.0.1", "nodes": nodelist, "updated_at": now.isoformat()}

View File

@ -1,168 +0,0 @@
from collections import Counter, defaultdict
from datetime import datetime
from functools import reduce
def build_mac_table(nodes):
macs = dict()
for node_id, node in nodes.items():
try:
for mac in node['nodeinfo']['network']['mesh_interfaces']:
macs[mac] = node_id
except KeyError:
pass
try:
for upper_if in node['nodeinfo']['network']['mesh'].values():
for lower_if in upper_if['interfaces'].values():
for mac in lower_if:
macs[mac] = node_id
except KeyError:
pass
return macs
def prune_nodes(nodes, now, days):
prune = []
for node_id, node in nodes.items():
if 'lastseen' not in node:
prune.append(node_id)
continue
lastseen = datetime.strptime(node['lastseen'], '%Y-%m-%dT%H:%M:%S')
delta = (now - lastseen).days
if delta >= days:
prune.append(node_id)
for node_id in prune:
del nodes[node_id]
def mark_online(node, now):
node['lastseen'] = now.isoformat()
node.setdefault('firstseen', now.isoformat())
node['flags']['online'] = True
def import_nodeinfo(nodes, nodeinfos, now, assume_online=False):
for nodeinfo in filter(lambda d: 'node_id' in d, nodeinfos):
node = nodes.setdefault(nodeinfo['node_id'], {'flags': dict()})
node['nodeinfo'] = nodeinfo
node['flags']['online'] = False
node['flags']['gateway'] = False
if assume_online:
mark_online(node, now)
def reset_statistics(nodes):
for node in nodes.values():
node['statistics'] = {'clients': 0}
def import_statistics(nodes, stats):
def add(node, statistics, target, source, f=lambda d: d):
try:
node['statistics'][target] = f(reduce(dict.__getitem__,
source,
statistics))
except (KeyError, TypeError, ZeroDivisionError):
pass
macs = build_mac_table(nodes)
stats = filter(lambda d: 'node_id' in d, stats)
stats = filter(lambda d: d['node_id'] in nodes, stats)
for node, stats in map(lambda d: (nodes[d['node_id']], d), stats):
add(node, stats, 'clients', ['clients', 'total'])
add(node, stats, 'gateway', ['gateway'], lambda d: macs.get(d, d))
add(node, stats, 'uptime', ['uptime'])
add(node, stats, 'loadavg', ['loadavg'])
add(node, stats, 'memory_usage', ['memory'],
lambda d: 1 - d['free'] / d['total'])
add(node, stats, 'rootfs_usage', ['rootfs_usage'])
add(node, stats, 'traffic', ['traffic'])
def import_mesh_ifs_vis_data(nodes, vis_data):
macs = build_mac_table(nodes)
mesh_ifs = defaultdict(lambda: set())
for line in filter(lambda d: 'secondary' in d, vis_data):
primary = line['of']
mesh_ifs[primary].add(primary)
mesh_ifs[primary].add(line['secondary'])
def if_to_node(ifs):
a = filter(lambda d: d in macs, ifs)
a = map(lambda d: nodes[macs[d]], a)
try:
return next(a), ifs
except StopIteration:
return None
mesh_nodes = filter(lambda d: d, map(if_to_node, mesh_ifs.values()))
for v in mesh_nodes:
node = v[0]
ifs = set()
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh_interfaces']))
except KeyError:
pass
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh']['bat0']['interfaces']['wireless']))
except KeyError:
pass
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh']['bat0']['interfaces']['tunnel']))
except KeyError:
pass
try:
ifs = ifs.union(set(node['nodeinfo']['network']['mesh']['bat0']['interfaces']['other']))
except KeyError:
pass
node['nodeinfo']['network']['mesh_interfaces'] = list(ifs | v[1])
def import_vis_clientcount(nodes, vis_data):
macs = build_mac_table(nodes)
data = filter(lambda d: d.get('label', None) == 'TT', vis_data)
data = filter(lambda d: d['router'] in macs, data)
data = map(lambda d: macs[d['router']], data)
for node_id, clientcount in Counter(data).items():
nodes[node_id]['statistics'].setdefault('clients', clientcount)
def mark_gateways(nodes, gateways):
macs = build_mac_table(nodes)
gateways = filter(lambda d: d in macs, gateways)
for node in map(lambda d: nodes[macs[d]], gateways):
node['flags']['gateway'] = True
def mark_vis_data_online(nodes, vis_data, now):
macs = build_mac_table(nodes)
online = set()
for line in vis_data:
if 'primary' in line:
online.add(line['primary'])
elif 'secondary' in line:
online.add(line['secondary'])
elif 'gateway' in line:
# This matches clients' MACs.
# On pre-Gluon nodes the primary MAC will be one of it.
online.add(line['gateway'])
for mac in filter(lambda d: d in macs, online):
mark_online(nodes[macs[mac]], now)

View File

@ -1,61 +0,0 @@
#!/usr/bin/env python3
import time
import os
from lib.GlobalRRD import GlobalRRD
from lib.NodeRRD import NodeRRD
from lib.GateRRD import GateRRD
class RRD(object):
def __init__(self,
database_directory,
image_path,
display_time_global="7d",
display_time_node="1d"):
self.dbPath = database_directory
self.globalDb = GlobalRRD(self.dbPath)
self.imagePath = image_path
self.displayTimeGlobal = display_time_global
self.displayTimeNode = display_time_node
self.currentTimeInt = (int(time.time()) / 60) * 60
self.currentTime = str(self.currentTimeInt)
def update_database(self, nodes):
online_nodes = dict(filter(
lambda d: d[1]['flags']['online'], nodes.items()))
client_count = sum(map(
lambda d: d['statistics']['clients'], online_nodes.values()))
# Refresh global database
self.globalDb.update(len(online_nodes), client_count)
# Refresh databases for all single nodes
for node_id, node in online_nodes.items():
if node['flags']['gateway']:
rrd = GateRRD(os.path.join(self.dbPath, node_id + '.rrd'), node)
else:
rrd = NodeRRD(os.path.join(self.dbPath, node_id + '.rrd'), node)
rrd.update()
def update_images(self):
# Create image path if it does not exist
try:
os.stat(self.imagePath)
except OSError:
os.mkdir(self.imagePath)
self.globalDb.graph(os.path.join(self.imagePath, "globalGraph.png"),
self.displayTimeGlobal)
nodedb_files = os.listdir(self.dbPath)
for file_name in nodedb_files:
if not os.path.isfile(os.path.join(self.dbPath, file_name)):
continue
node_name = os.path.basename(file_name).split('.')
if node_name[1] == 'rrd' and not node_name[0] == "nodes":
rrd = NodeRRD(os.path.join(self.dbPath, file_name))
rrd.graph(self.imagePath, self.displayTimeNode)

View File

@ -1,19 +0,0 @@
import json
def validate_nodeinfos(nodeinfos):
result = []
for nodeinfo in nodeinfos:
if validate_nodeinfo(nodeinfo):
result.append(nodeinfo)
return result
def validate_nodeinfo(nodeinfo):
if 'location' in nodeinfo:
if 'latitude' not in nodeinfo['location'] or 'longitude' not in nodeinfo['location']:
return False
return True

15
link.py Normal file
View File

@ -0,0 +1,15 @@
class Link():
def __init__(self):
self.id = None
self.source = None
self.target = None
self.quality = None
self.type = None
class LinkConnector():
def __init__(self):
self.id = None
self.interface = None
def __repr__(self):
return "LinkConnector(%d, %s)" % (self.id, self.interface)

15
mkmap.sh Executable file
View File

@ -0,0 +1,15 @@
#!/bin/bash
set -e
DEST=$1
[ "$DEST" ] || exit 1
cd "$(dirname "$0")"/
./ffhlwiki.py http://freifunk.metameute.de/wiki/Knoten > aliases_hl.json
./ffhlwiki.py http://freifunk.metameute.de/wiki/Moelln:Knoten > aliases_moelln.json
./bat2nodes.py -A -a aliases.json -a aliases_hl.json -a aliases_moelln.json -d $DEST

30
node.py Normal file
View File

@ -0,0 +1,30 @@
class Node():
def __init__(self):
self.name = ""
self.id = ""
self.macs = set()
self.interfaces = dict()
self.flags = dict({
"online": False,
"gateway": False,
"client": False
})
self.gps = None
self.firmware = None
def add_mac(self, mac):
mac = mac.lower()
if len(self.macs) == 0:
self.id = mac
self.macs.add(mac)
self.interfaces[mac] = Interface()
def __repr__(self):
return self.macs.__repr__()
class Interface():
def __init__(self):
self.vpn = False

374
nodedb.py Normal file
View File

@ -0,0 +1,374 @@
import json
from functools import reduce
from collections import defaultdict
from node import Node, Interface
from link import Link, LinkConnector
class NodeDB:
def __init__(self):
self._nodes = []
self._links = []
# fetch list of links
def get_links(self):
self.update_vpn_links()
return self.reduce_links()
# fetch list of nodes
def get_nodes(self):
return self._nodes
def maybe_node_by_fuzzy_mac(self, mac):
mac_a = mac.lower()
for node in self._nodes:
for mac_b in node.macs:
if is_derived_mac(mac_a, mac_b):
return node
raise KeyError
def maybe_node_by_mac(self, macs):
for node in self._nodes:
for mac in macs:
if mac.lower() in node.macs:
return node
raise KeyError
def maybe_node_by_id(self, mac):
for node in self._nodes:
if mac.lower() == node.id:
return node
raise KeyError
def parse_vis_data(self,vis_data):
for x in vis_data:
if 'of' in x:
try:
node = self.maybe_node_by_mac((x['of'], x['secondary']))
except:
node = Node()
node.flags['online'] = True
if 'legacy' in x:
node.flags['legacy'] = True
self._nodes.append(node)
node.add_mac(x['of'])
node.add_mac(x['secondary'])
for x in vis_data:
if 'router' in x:
try:
node = self.maybe_node_by_mac((x['router'], ))
except:
node = Node()
node.flags['online'] = True
if 'legacy' in x:
node.flags['legacy'] = True
node.add_mac(x['router'])
self._nodes.append(node)
# If it's a TT link and the MAC is very similar
# consider this MAC as one of the routers
# MACs
if 'gateway' in x and x['label'] == "TT":
if is_similar(x['router'], x['gateway']):
node.add_mac(x['gateway'])
# skip processing as regular link
continue
try:
if 'neighbor' in x:
try:
node = self.maybe_node_by_mac((x['neighbor']))
except:
continue
if 'gateway' in x:
x['neighbor'] = x['gateway']
node = self.maybe_node_by_mac((x['neighbor'], ))
except:
node = Node()
node.flags['online'] = True
if x['label'] == 'TT':
node.flags['client'] = True
node.add_mac(x['neighbor'])
self._nodes.append(node)
for x in vis_data:
if 'router' in x:
try:
if 'gateway' in x:
x['neighbor'] = x['gateway']
router = self.maybe_node_by_mac((x['router'], ))
neighbor = self.maybe_node_by_mac((x['neighbor'], ))
except:
continue
# filter TT links merged in previous step
if router == neighbor:
continue
link = Link()
link.source = LinkConnector()
link.source.interface = x['router']
link.source.id = self._nodes.index(router)
link.target = LinkConnector()
link.target.interface = x['neighbor']
link.target.id = self._nodes.index(neighbor)
link.quality = x['label']
link.id = "-".join(sorted((link.source.interface, link.target.interface)))
if x['label'] == "TT":
link.type = "client"
self._links.append(link)
for x in vis_data:
if 'primary' in x:
try:
node = self.maybe_node_by_mac((x['primary'], ))
except:
continue
node.id = x['primary']
def reduce_links(self):
tmp_links = defaultdict(list)
for link in self._links:
tmp_links[link.id].append(link)
links = []
def reduce_link(a, b):
a.id = b.id
a.source = b.source
a.target = b.target
a.type = b.type
a.quality = ", ".join([x for x in (a.quality, b.quality) if x])
return a
for k, v in tmp_links.items():
new_link = reduce(reduce_link, v, Link())
links.append(new_link)
return links
def import_aliases(self, aliases):
for mac, alias in aliases.items():
try:
node = self.maybe_node_by_fuzzy_mac(mac)
except:
# create an offline node
node = Node()
node.add_mac(mac)
self._nodes.append(node)
if 'name' in alias:
node.name = alias['name']
if 'vpn' in alias and alias['vpn']:
node.interfaces[mac].vpn = True
if 'gps' in alias:
node.gps = alias['gps']
if 'firmware' in alias:
node.firmware = alias['firmware']
if 'firmware-base' in alias:
node.firmware-base = alias['firmware-base']
if 'hardware-model' in alias:
node.hardware-model = alias['hardware-model']
if 'selected-gateway' in alias:
node.selected-gateway = alias['selected-gateway']
if 'autoupdater-branch' in alias:
node.autoupdater-branch = alias['autoupdater-branch']
if 'autoupdater-enabled' in alias:
node.autoupdater-branch = alias['autoupdater-enabled']
# list of macs
# if options['gateway']:
# mark_gateways(options['gateway'])
def mark_gateways(self, gateways):
for gateway in gateways:
try:
node = self.maybe_node_by_mac((gateway, ))
except:
continue
node.flags['gateway'] = True
def update_vpn_links(self):
changes = 1
while changes > 0:
changes = 0
for link in self._links:
if link.type == "client":
continue
source_interface = self._nodes[link.source.id].interfaces[link.source.interface]
target_interface = self._nodes[link.target.id].interfaces[link.target.interface]
if source_interface.vpn or target_interface.vpn:
source_interface.vpn = True
target_interface.vpn = True
if link.type != "vpn":
changes += 1
link.type = "vpn"
def obscure_clients(self):
globalIdCounter = 0
nodeCounters = {}
clientIds = {}
for node in self._nodes:
if node.flags['client']:
node.macs = set()
clientIds[node.id] = None
for link in self._links:
ids = link.source.interface
idt = link.target.interface
try:
node_source = self.maybe_node_by_fuzzy_mac(ids)
node_target = self.maybe_node_by_id(idt)
if not node_source.flags['client'] and not node_target.flags['client']:
# if none of the nodes associated with this link are clients,
# we do not want to obscure
continue
if ids in clientIds and idt in clientIds:
# This is for corner cases, when a client
# is linked to another client.
clientIds[ids] = str(globalIdCounter)
ids = str(globalIdCounter)
globalIdCounter += 1
clientIds[idt] = str(globalIdCounter)
idt = str(globalIdCounter)
globalIdCounter += 1
elif ids in clientIds:
newId = generateId(idt)
clientIds[ids] = newId
ids = newId
link.source.interface = ids;
node_source.id = ids;
elif idt in clientIds:
newId = generateId(ids,nodeCounters)
clientIds[idt] = newId
idt = newId
link.target.interface = idt;
node_target.id = idt;
link.id = ids + "-" + idt
except KeyError:
pass
# extends node id by incremented node counter
def generateId(nodeId,nodeCounters):
if nodeId in nodeCounters:
n = nodeCounters[nodeId]
nodeCounters[nodeId] = n + 1
else:
nodeCounters[nodeId] = 1
n = 0
return nodeId + "_" + str(n)
# compares two MACs and decides whether they are
# similar and could be from the same node
def is_similar(a, b):
if a == b:
return True
try:
mac_a = list(int(i, 16) for i in a.split(":"))
mac_b = list(int(i, 16) for i in b.split(":"))
except ValueError:
return False
# first byte must only differ in bit 2
if mac_a[0] | 2 == mac_b[0] | 2:
# count different bytes
c = [x for x in zip(mac_a[1:], mac_b[1:]) if x[0] != x[1]]
else:
return False
# no more than two additional bytes must differ
if len(c) <= 2:
delta = 0
if len(c) > 0:
delta = sum(abs(i[0] -i[1]) for i in c)
# These addresses look pretty similar!
return delta < 8
def is_derived_mac(a, b):
if a == b:
return True
try:
mac_a = list(int(i, 16) for i in a.split(":"))
mac_b = list(int(i, 16) for i in b.split(":"))
except ValueError:
return False
if mac_a[4] != mac_b[4] or mac_a[2] != mac_b[2] or mac_a[1] != mac_b[1]:
return False
x = list(mac_a)
x[5] += 1
x[5] %= 255
if mac_b == x:
return True
x[0] |= 2
if mac_b == x:
return True
x[3] += 1
x[3] %= 255
if mac_b == x:
return True
x = list(mac_a)
x[0] |= 2
x[5] += 2
x[5] %= 255
if mac_b == x:
return True
x = list(mac_a)
x[0] |= 2
x[3] += 1
x[3] %= 255
if mac_b == x:
return True
return False

153
rrd.py Executable file
View File

@ -0,0 +1,153 @@
#!/usr/bin/env python3
import subprocess
import time
import os
class rrd:
def __init__( self
, databaseDirectory
, imagePath
, displayTimeGlobal = "7d"
, displayTimeNode = "1d"
):
self.dbPath = databaseDirectory
self.globalDbFile = databaseDirectory + "/nodes.rrd"
self.imagePath = imagePath
self.displayTimeGlobal = displayTimeGlobal
self.displayTimeNode = displayTimeNode
self.currentTimeInt = (int(time.time())/60)*60
self.currentTime = str(self.currentTimeInt)
try:
os.stat(self.imagePath)
except:
os.mkdir(self.imagePath)
def checkAndCreateIfNeededGlobalDatabase(self):
""" Creates the global database file iff it did not exist.
"""
if not os.path.exists(self.globalDbFile):
# Create Database with rrdtool
args = ["rrdtool",'create', self.globalDbFile
,'--start', str(round(self.currentTimeInt - 60))
,'--step' , '60'
# Number of nodes available
,'DS:nodes:GAUGE:120:0:U'
,'RRA:LAST:0:1:44640'
,'RRA:LAST:0:60:744'
,'RRA:LAST:0:1440:1780'
# Number of client available
,'DS:clients:GAUGE:120:0:U'
,'RRA:LAST:0:1:44640'
,'RRA:LAST:0:60:744'
,'RRA:LAST:0:1440:1780'
]
subprocess.call(args)
def updateGlobalDatabase(self,nodeCount,clientCount):
""" Adds a new (#Nodes,#Clients) entry to the global database.
"""
# Update Global RRDatabase
args = ["rrdtool",'updatev', self.globalDbFile
# #Nodes #Clients
, self.currentTime + ":"+str(nodeCount)+":"+str(clientCount)
]
subprocess.check_output(args)
def createGlobalGraph(self):
nodeGraph = self.imagePath + "/" + "globalGraph.png"
args = ["rrdtool", 'graph', nodeGraph, '-s', '-' + self.displayTimeGlobal, '-w', '800', '-h' '400'
,'DEF:nodes=' + self.globalDbFile + ':nodes:LAST', 'LINE1:nodes#F00:nodes\\l'
,'DEF:clients=' + self.globalDbFile + ':clients:LAST','LINE2:clients#00F:clients'
]
subprocess.check_output(args)
def nodeMACToRRDFile(self,nodeMAC):
return self.dbPath + "/" + str(nodeMAC).replace(":","") + ".rrd"
def nodeMACToPNGFile(self,nodeMAC):
return self.imagePath + "/" + str(nodeMAC).replace(":","") + ".png"
def checkAndCreateIfNeededNodeDatabase(self,nodePrimaryMAC):
# TODO check for bad nodeNames
nodeFile = self.nodeMACToRRDFile(nodePrimaryMAC);
if not os.path.exists(nodeFile):
# TODO Skalen anpassen
args = ["rrdtool",'create',nodeFile
,'--start',str(round(self.currentTimeInt - 60))
,'--step' , '60'
,'DS:upstate:GAUGE:120:0:1'
,'RRA:LAST:0:1:44640'
# Number of client available
,'DS:clients:GAUGE:120:0:U'
,'RRA:LAST:0:1:44640'
]
subprocess.check_output(args)
# Call only if node is up
def updateNodeDatabase(self,nodePrimaryMAC,clientCount):
nodeFile = self.nodeMACToRRDFile(nodePrimaryMAC)
# Update Global RRDatabase
args = ["rrdtool",'updatev', nodeFile
# #Upstate #Clients
, self.currentTime + ":"+str(1)+":"+str(clientCount)
]
subprocess.check_output(args)
def createNodeGraph(self,nodePrimaryMAC,displayTimeNode):
nodeGraph = self.nodeMACToPNGFile(nodePrimaryMAC)
nodeFile = self.nodeMACToRRDFile(nodePrimaryMAC)
args = ['rrdtool','graph', nodeGraph, '-s', '-' + self.displayTimeNode , '-w', '800', '-h', '400', '-l', '0', '-y', '1:1',
'DEF:clients=' + nodeFile + ':clients:LAST',
'VDEF:maxc=clients,MAXIMUM',
'CDEF:c=0,clients,ADDNAN',
'CDEF:d=clients,UN,maxc,UN,1,maxc,IF,*',
'AREA:c#0F0:up\\l',
'AREA:d#F00:down\\l',
'LINE1:c#00F:clients connected\\l',
]
subprocess.check_output(args)
def update_database(self,db):
nodes = {}
clientCount = 0
for node in db.get_nodes():
if node.flags['online']:
if not node.flags['client']:
nodes[node.id] = node
node.clients = 0;
if 'legacy' in node.flags and node.flags['legacy']:
clientCount -= 1
else:
clientCount += 1
for link in db.get_links():
source = link.source.interface
target = link.target.interface
if source in nodes and not target in nodes:
nodes[source].clients += 1
elif target in nodes and not source in nodes:
nodes[target].clients += 1
self.checkAndCreateIfNeededGlobalDatabase()
self.updateGlobalDatabase(len(nodes),clientCount)
for mac in nodes:
self.checkAndCreateIfNeededNodeDatabase(mac)
self.updateNodeDatabase(mac,nodes[mac].clients)
def update_images(self):
""" Creates a image for every rrd file in the database directory.
"""
self.createGlobalGraph()
nodeDbFiles = os.listdir(self.dbPath)
for fileName in nodeDbFiles:
if not os.path.isfile(os.path.join(self.dbPath, fileName)):
continue
nodeName = os.path.basename(fileName).split('.')
if nodeName[1] == 'rrd' and not nodeName[0] == "nodes":
self.createNodeGraph(nodeName[0],self.displayTimeNode)