Compare commits
13 Commits
Author | SHA1 | Date |
---|---|---|
Jan-Philipp Litza | 010ad4a43b | |
Michael Rüttgers | 84fe43f5dc | |
Jan-Philipp Litza | ddcf8fbead | |
Jan-Philipp Litza | dc8171dbd6 | |
Jan-Philipp Litza | abc84e9fc9 | |
Michael Rüttgers | d9bc7eb9a1 | |
Nils Schneider | 3b8308722f | |
Nils Schneider | c2e21b4f5b | |
Nils Schneider | 431d46e191 | |
Nils Schneider | 823b64b8ba | |
Nils Schneider | f2214ab130 | |
kantorkel | c5b321430e | |
kantorkel | ee84327b5c |
78
README.md
78
README.md
|
@ -55,23 +55,24 @@ will prefix `sudo` where necessary.
|
||||||
|
|
||||||
## nodes.json
|
## nodes.json
|
||||||
|
|
||||||
{ 'nodes': {
|
{ "nodes": [
|
||||||
node_id: { 'flags': { flags },
|
{ "flags": { flags },
|
||||||
'firstseen': isoformat,
|
"firstseen": isoformat,
|
||||||
'lastseen': isoformat,
|
"lastseen": isoformat,
|
||||||
'nodeinfo': {...}, # copied from alfred type 158
|
"nodeinfo": {...}, # copied from node's nodeinfo announcement
|
||||||
'statistics': {
|
"statistics": {
|
||||||
'uptime': double, # seconds
|
"uptime": double, # seconds
|
||||||
'memory_usage': double, # 0..1
|
"memory_usage": double, # 0..1
|
||||||
'clients': double,
|
"clients": double,
|
||||||
'rootfs_usage': double, # 0..1
|
"rootfs_usage": double, # 0..1
|
||||||
'loadavg': double,
|
"loadavg": double,
|
||||||
'gateway': mac
|
"gateway": mac
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
...
|
...
|
||||||
}
|
]
|
||||||
'timestamp': isoformat
|
"timestamp": isoformat,
|
||||||
|
"version": 2
|
||||||
}
|
}
|
||||||
|
|
||||||
### flags (bool)
|
### flags (bool)
|
||||||
|
@ -108,10 +109,57 @@ database.
|
||||||
After running ffmap-backend, copy `graph.json` to your webserver. Then,
|
After running ffmap-backend, copy `graph.json` to your webserver. Then,
|
||||||
filter `nodes.json` using `jq` like this:
|
filter `nodes.json` using `jq` like this:
|
||||||
|
|
||||||
jq '.nodes = (.nodes | with_entries(del(.value.nodeinfo.owner)))' \
|
jq '.nodes = (.nodes | map(del(.nodeinfo.owner)))' \
|
||||||
< /ffmap-data/nodes.json > /var/www/data/nodes.json
|
< /ffmap-data/nodes.json > /var/www/data/nodes.json
|
||||||
|
|
||||||
This will remove owner information from nodes.json before copying the data
|
This will remove owner information from nodes.json before copying the data
|
||||||
to your webserver.
|
to your webserver.
|
||||||
|
|
||||||
[jq]: https://stedolan.github.io/jq/
|
[jq]: https://stedolan.github.io/jq/
|
||||||
|
|
||||||
|
|
||||||
|
# Convert from nodes.json version 1 to version 2
|
||||||
|
|
||||||
|
jq '.nodes = (.nodes | to_entries | map(.value)) | .version = 2' \
|
||||||
|
< nodes.json > nodes.json.new
|
||||||
|
mv nodes.json.new nodes.json
|
||||||
|
|
||||||
|
|
||||||
|
# Graphite support
|
||||||
|
|
||||||
|
## Comand line arguments
|
||||||
|
Running `backend.py` with `--with-graphite` will enable graphite support for storing statistical data.
|
||||||
|
|
||||||
|
graphite integration:
|
||||||
|
--with-graphite Send statistical data to graphite backend
|
||||||
|
--graphite-host GRAPHITE_HOST
|
||||||
|
Hostname of the machine running graphite
|
||||||
|
--graphite-port GRAPHITE_PORT
|
||||||
|
Port of the carbon daemon
|
||||||
|
--graphite-prefix GRAPHITE_PREFIX
|
||||||
|
Storage prefix (default value: 'freifunk.nodes.')
|
||||||
|
--graphite-metrics GRAPHITE_METRICS
|
||||||
|
Comma separated list of metrics to store (default
|
||||||
|
value: 'clients,loadavg,uptime')
|
||||||
|
|
||||||
|
## Graphite configuration
|
||||||
|
|
||||||
|
### storage-schemas.conf
|
||||||
|
|
||||||
|
[freifunk_node_stats]
|
||||||
|
pattern = ^freifunk\.nodes\.
|
||||||
|
retentions = 60s:1d,5min:7d,1h:30d,1d:4y
|
||||||
|
|
||||||
|
### storage-aggregation.conf
|
||||||
|
|
||||||
|
[freifunk_node_stats_loadavg]
|
||||||
|
pattern = ^freifunk\.nodes\..*\.loadavg$
|
||||||
|
aggregationMethod = avg
|
||||||
|
|
||||||
|
[freifunk_node_stats_clients]
|
||||||
|
pattern = ^freifunk\.nodes\..*\.clients$
|
||||||
|
aggregationMethod = max
|
||||||
|
|
||||||
|
[freifunk_node_stats_uptime]
|
||||||
|
pattern = ^freifunk\.nodes\..*\.uptime$
|
||||||
|
aggregationMethod = last
|
||||||
|
|
74
backend.py
74
backend.py
|
@ -18,8 +18,9 @@ from lib.batman import Batman
|
||||||
from lib.rrddb import RRD
|
from lib.rrddb import RRD
|
||||||
from lib.nodelist import export_nodelist
|
from lib.nodelist import export_nodelist
|
||||||
from lib.validate import validate_nodeinfos
|
from lib.validate import validate_nodeinfos
|
||||||
|
from lib.graphite import Graphite
|
||||||
|
|
||||||
NODES_VERSION = 1
|
NODES_VERSION = 2
|
||||||
GRAPH_VERSION = 1
|
GRAPH_VERSION = 1
|
||||||
|
|
||||||
|
|
||||||
|
@ -58,71 +59,72 @@ def main(params):
|
||||||
|
|
||||||
# read nodedb state from node.json
|
# read nodedb state from node.json
|
||||||
try:
|
try:
|
||||||
with open(nodes_fn, 'r') as nodedb_handle:
|
with open(nodes_fn, 'r', encoding=('UTF-8')) as nodedb_handle:
|
||||||
nodedb = json.load(nodedb_handle)
|
nodedb = json.load(nodedb_handle)
|
||||||
except IOError:
|
except IOError:
|
||||||
nodedb = {'nodes': dict()}
|
nodedb = {'nodes': []}
|
||||||
|
|
||||||
# flush nodedb if it uses the old format
|
|
||||||
if 'links' in nodedb:
|
|
||||||
nodedb = {'nodes': dict()}
|
|
||||||
|
|
||||||
# set version we're going to output
|
# set version we're going to output
|
||||||
nodedb['version'] = NODES_VERSION
|
nodedb['version'] = NODES_VERSION
|
||||||
|
|
||||||
# update timestamp and assume all nodes are offline
|
# update timestamp and assume all nodes are offline
|
||||||
nodedb['timestamp'] = now.isoformat()
|
nodedb['timestamp'] = now.isoformat()
|
||||||
for node_id, node in nodedb['nodes'].items():
|
for node in nodedb['nodes']:
|
||||||
node['flags']['online'] = False
|
node['flags']['online'] = False
|
||||||
|
|
||||||
|
nodesdict = {}
|
||||||
|
|
||||||
|
for node in nodedb['nodes']:
|
||||||
|
nodesdict[node['nodeinfo']['node_id']] = node
|
||||||
|
|
||||||
# integrate alfred nodeinfo
|
# integrate alfred nodeinfo
|
||||||
for alfred in alfred_instances:
|
for alfred in alfred_instances:
|
||||||
nodeinfo = validate_nodeinfos(alfred.nodeinfo())
|
nodeinfo = validate_nodeinfos(alfred.nodeinfo())
|
||||||
nodes.import_nodeinfo(nodedb['nodes'], nodeinfo,
|
nodes.import_nodeinfo(nodesdict, nodeinfo,
|
||||||
now, assume_online=True)
|
now, assume_online=True)
|
||||||
|
|
||||||
# integrate static aliases data
|
# integrate static aliases data
|
||||||
for aliases in params['aliases']:
|
for aliases in params['aliases']:
|
||||||
with open(aliases, 'r') as f:
|
with open(aliases, 'r') as f:
|
||||||
nodeinfo = validate_nodeinfos(json.load(f))
|
nodeinfo = validate_nodeinfos(json.load(f))
|
||||||
nodes.import_nodeinfo(nodedb['nodes'], nodeinfo,
|
nodes.import_nodeinfo(nodesdict, nodeinfo,
|
||||||
now, assume_online=False)
|
now, assume_online=False)
|
||||||
|
|
||||||
nodes.reset_statistics(nodedb['nodes'])
|
nodes.reset_statistics(nodesdict)
|
||||||
for alfred in alfred_instances:
|
for alfred in alfred_instances:
|
||||||
nodes.import_statistics(nodedb['nodes'], alfred.statistics())
|
nodes.import_statistics(nodesdict, alfred.statistics())
|
||||||
|
|
||||||
# acquire gwl and visdata for each batman instance
|
# acquire visdata for each batman instance
|
||||||
mesh_info = []
|
mesh_info = []
|
||||||
for batman in batman_instances:
|
for batman in batman_instances:
|
||||||
vd = batman.vis_data()
|
vd = batman.vis_data()
|
||||||
gwl = batman.gateway_list()
|
|
||||||
|
|
||||||
mesh_info.append((vd, gwl))
|
mesh_info.append(vd)
|
||||||
|
|
||||||
# update nodedb from batman-adv data
|
# update nodedb from batman-adv data
|
||||||
for vd, gwl in mesh_info:
|
for vd in mesh_info:
|
||||||
nodes.import_mesh_ifs_vis_data(nodedb['nodes'], vd)
|
nodes.import_mesh_ifs_vis_data(nodesdict, vd)
|
||||||
nodes.import_vis_clientcount(nodedb['nodes'], vd)
|
nodes.import_vis_clientcount(nodesdict, vd)
|
||||||
nodes.mark_vis_data_online(nodedb['nodes'], vd, now)
|
nodes.mark_vis_data_online(nodesdict, vd, now)
|
||||||
nodes.mark_gateways(nodedb['nodes'], gwl)
|
|
||||||
|
|
||||||
# clear the nodedb from nodes that have not been online in $prune days
|
# clear the nodedb from nodes that have not been online in $prune days
|
||||||
if params['prune']:
|
if params['prune']:
|
||||||
nodes.prune_nodes(nodedb['nodes'], now, params['prune'])
|
nodes.prune_nodes(nodesdict, now, params['prune'])
|
||||||
|
|
||||||
# build nxnetworks graph from nodedb and visdata
|
# build nxnetworks graph from nodedb and visdata
|
||||||
batadv_graph = nx.DiGraph()
|
batadv_graph = nx.DiGraph()
|
||||||
for vd, gwl in mesh_info:
|
for vd in mesh_info:
|
||||||
graph.import_vis_data(batadv_graph, nodedb['nodes'], vd)
|
graph.import_vis_data(batadv_graph, nodesdict, vd)
|
||||||
|
|
||||||
# force mac addresses to be vpn-link only (like gateways for example)
|
# force mac addresses to be vpn-link only (like gateways for example)
|
||||||
if params['vpn']:
|
if params['vpn']:
|
||||||
graph.mark_vpn(batadv_graph, frozenset(params['vpn']))
|
graph.mark_vpn(batadv_graph, frozenset(params['vpn']))
|
||||||
|
|
||||||
|
nodedb['nodes'] = list(nodesdict.values())
|
||||||
|
|
||||||
def extract_tunnel(nodes):
|
def extract_tunnel(nodes):
|
||||||
macs = set()
|
macs = set()
|
||||||
for id, node in nodes.items():
|
for node in nodes:
|
||||||
try:
|
try:
|
||||||
for mac in node["nodeinfo"]["network"]["mesh"]["bat0"]["interfaces"]["tunnel"]:
|
for mac in node["nodeinfo"]["network"]["mesh"]["bat0"]["interfaces"]["tunnel"]:
|
||||||
macs.add(mac)
|
macs.add(mac)
|
||||||
|
@ -149,11 +151,14 @@ def main(params):
|
||||||
with open(nodelist_fn, 'w') as f:
|
with open(nodelist_fn, 'w') as f:
|
||||||
json.dump(export_nodelist(now, nodedb), f)
|
json.dump(export_nodelist(now, nodedb), f)
|
||||||
|
|
||||||
|
# optional Graphite integration
|
||||||
|
if params['graphite']:
|
||||||
|
graphite = Graphite(params['graphite_host'], params['graphite_port'])
|
||||||
|
graphite.update(params['graphite_prefix'], params['graphite_metrics'], nodedb['nodes'])
|
||||||
|
|
||||||
# optional rrd graphs (trigger with --rrd)
|
# optional rrd graphs (trigger with --rrd)
|
||||||
if params['rrd']:
|
if params['rrd']:
|
||||||
script_directory = os.path.dirname(os.path.realpath(__file__))
|
rrd = RRD(params['rrd_path'], os.path.join(params['dest_dir'], 'nodes'))
|
||||||
rrd = RRD(os.path.join(script_directory, 'nodedb'),
|
|
||||||
os.path.join(params['dest_dir'], 'nodes'))
|
|
||||||
rrd.update_database(nodedb['nodes'])
|
rrd.update_database(nodedb['nodes'])
|
||||||
rrd.update_images()
|
rrd.update_images()
|
||||||
|
|
||||||
|
@ -176,10 +181,25 @@ if __name__ == '__main__':
|
||||||
help='Assume MAC addresses are part of vpn')
|
help='Assume MAC addresses are part of vpn')
|
||||||
parser.add_argument('-p', '--prune', metavar='DAYS', type=int,
|
parser.add_argument('-p', '--prune', metavar='DAYS', type=int,
|
||||||
help='forget nodes offline for at least DAYS')
|
help='forget nodes offline for at least DAYS')
|
||||||
|
parser.add_argument('--rrd-path', default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'nodedb'),
|
||||||
|
help='path to RRD files')
|
||||||
parser.add_argument('--with-rrd', dest='rrd', action='store_true',
|
parser.add_argument('--with-rrd', dest='rrd', action='store_true',
|
||||||
default=False,
|
default=False,
|
||||||
help='enable the rendering of RRD graphs (cpu '
|
help='enable the rendering of RRD graphs (cpu '
|
||||||
'intensive)')
|
'intensive)')
|
||||||
|
|
||||||
|
# Graphite integration
|
||||||
|
graphite = parser.add_argument_group('graphite integration')
|
||||||
|
graphite.add_argument('--with-graphite', dest='graphite', action='store_true', default=False,
|
||||||
|
help='Send statistical data to graphite backend')
|
||||||
|
graphite.add_argument('--graphite-host', dest='graphite_host', default="localhost",
|
||||||
|
help='Hostname of the machine running graphite')
|
||||||
|
graphite.add_argument('--graphite-port', dest='graphite_port', default="2003", type=int,
|
||||||
|
help='Port of the carbon daemon')
|
||||||
|
graphite.add_argument('--graphite-prefix', dest='graphite_prefix', default="freifunk.nodes.",
|
||||||
|
help='Storage prefix (default value: \'freifunk.nodes.\')')
|
||||||
|
graphite.add_argument('--graphite-metrics', dest='graphite_metrics', default="clients,loadavg,uptime",
|
||||||
|
help='Comma separated list of metrics to store (default value: \'clients,loadavg,uptime\')')
|
||||||
|
|
||||||
options = vars(parser.parse_args())
|
options = vars(parser.parse_args())
|
||||||
main(options)
|
main(options)
|
||||||
|
|
|
@ -0,0 +1,47 @@
|
||||||
|
import socket
|
||||||
|
import time
|
||||||
|
|
||||||
|
|
||||||
|
class Graphite(object):
|
||||||
|
|
||||||
|
def __init__(self, hostname, port):
|
||||||
|
self.hostname = hostname
|
||||||
|
self.port = int(port)
|
||||||
|
|
||||||
|
def flatten_dict(self, d):
|
||||||
|
def expand(key, value):
|
||||||
|
if isinstance(value, dict):
|
||||||
|
return [('{}.{}'.format(key, k), v) for k, v in self.flatten_dict(value).items()]
|
||||||
|
else:
|
||||||
|
return [(key, value)]
|
||||||
|
items = [item for k, v in d.items() for item in expand(k, v)]
|
||||||
|
return dict(items)
|
||||||
|
|
||||||
|
def update(self, prefix, metrics, nodes):
|
||||||
|
timestamp = int(time.time())
|
||||||
|
|
||||||
|
sock = socket.socket()
|
||||||
|
sock.connect((self.hostname, self.port))
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
try:
|
||||||
|
if node['flags']['online']:
|
||||||
|
stats = self.flatten_dict(node['statistics'])
|
||||||
|
for metric in metrics.split(','):
|
||||||
|
try:
|
||||||
|
msg = '{}{}.{} {} {}\n'.format(
|
||||||
|
prefix,
|
||||||
|
node['nodeinfo']['node_id'].replace(' ', '_'),
|
||||||
|
metric.replace(' ', '_'),
|
||||||
|
stats[metric],
|
||||||
|
timestamp
|
||||||
|
)
|
||||||
|
sock.send(msg.encode('utf-8'))
|
||||||
|
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
|
||||||
|
sock.close()
|
|
@ -1,9 +1,9 @@
|
||||||
def export_nodelist(now, nodedb):
|
def export_nodelist(now, nodedb):
|
||||||
nodelist = list()
|
nodelist = list()
|
||||||
|
|
||||||
for node_id, node in nodedb["nodes"].items():
|
for node in nodedb["nodes"]:
|
||||||
node_out = dict()
|
node_out = dict()
|
||||||
node_out["id"] = node_id
|
node_out["id"] = node["nodeinfo"]["node_id"]
|
||||||
node_out["name"] = node["nodeinfo"]["hostname"]
|
node_out["name"] = node["nodeinfo"]["hostname"]
|
||||||
|
|
||||||
if "location" in node["nodeinfo"]:
|
if "location" in node["nodeinfo"]:
|
||||||
|
@ -13,6 +13,9 @@ def export_nodelist(now, nodedb):
|
||||||
node_out["status"] = dict()
|
node_out["status"] = dict()
|
||||||
node_out["status"]["online"] = node["flags"]["online"]
|
node_out["status"]["online"] = node["flags"]["online"]
|
||||||
|
|
||||||
|
if "firstseen" in node:
|
||||||
|
node_out["status"]["firstcontact"] = node["firstseen"]
|
||||||
|
|
||||||
if "lastseen" in node:
|
if "lastseen" in node:
|
||||||
node_out["status"]["lastcontact"] = node["lastseen"]
|
node_out["status"]["lastcontact"] = node["lastseen"]
|
||||||
|
|
||||||
|
|
12
lib/nodes.py
12
lib/nodes.py
|
@ -61,7 +61,6 @@ def import_nodeinfo(nodes, nodeinfos, now, assume_online=False):
|
||||||
node = nodes.setdefault(nodeinfo['node_id'], {'flags': dict()})
|
node = nodes.setdefault(nodeinfo['node_id'], {'flags': dict()})
|
||||||
node['nodeinfo'] = nodeinfo
|
node['nodeinfo'] = nodeinfo
|
||||||
node['flags']['online'] = False
|
node['flags']['online'] = False
|
||||||
node['flags']['gateway'] = False
|
|
||||||
|
|
||||||
if assume_online:
|
if assume_online:
|
||||||
mark_online(node, now)
|
mark_online(node, now)
|
||||||
|
@ -86,11 +85,10 @@ def import_statistics(nodes, stats):
|
||||||
stats = filter(lambda d: d['node_id'] in nodes, stats)
|
stats = filter(lambda d: d['node_id'] in nodes, stats)
|
||||||
for node, stats in map(lambda d: (nodes[d['node_id']], d), stats):
|
for node, stats in map(lambda d: (nodes[d['node_id']], d), stats):
|
||||||
add(node, stats, 'clients', ['clients', 'total'])
|
add(node, stats, 'clients', ['clients', 'total'])
|
||||||
add(node, stats, 'gateway', ['gateway'], lambda d: macs.get(d, d))
|
|
||||||
add(node, stats, 'uptime', ['uptime'])
|
add(node, stats, 'uptime', ['uptime'])
|
||||||
add(node, stats, 'loadavg', ['loadavg'])
|
add(node, stats, 'loadavg', ['loadavg'])
|
||||||
add(node, stats, 'memory_usage', ['memory'],
|
add(node, stats, 'memory_usage', ['memory'],
|
||||||
lambda d: 1 - d['free'] / d['total'])
|
lambda d: 1 - (d['free'] + d['buffers'] + d['cached']) / d['total'])
|
||||||
add(node, stats, 'rootfs_usage', ['rootfs_usage'])
|
add(node, stats, 'rootfs_usage', ['rootfs_usage'])
|
||||||
add(node, stats, 'traffic', ['traffic'])
|
add(node, stats, 'traffic', ['traffic'])
|
||||||
|
|
||||||
|
@ -152,14 +150,6 @@ def import_vis_clientcount(nodes, vis_data):
|
||||||
nodes[node_id]['statistics'].setdefault('clients', clientcount)
|
nodes[node_id]['statistics'].setdefault('clients', clientcount)
|
||||||
|
|
||||||
|
|
||||||
def mark_gateways(nodes, gateways):
|
|
||||||
macs = build_mac_table(nodes)
|
|
||||||
gateways = filter(lambda d: d in macs, gateways)
|
|
||||||
|
|
||||||
for node in map(lambda d: nodes[macs[d]], gateways):
|
|
||||||
node['flags']['gateway'] = True
|
|
||||||
|
|
||||||
|
|
||||||
def mark_vis_data_online(nodes, vis_data, now):
|
def mark_vis_data_online(nodes, vis_data, now):
|
||||||
macs = build_mac_table(nodes)
|
macs = build_mac_table(nodes)
|
||||||
|
|
||||||
|
|
19
lib/rrddb.py
19
lib/rrddb.py
|
@ -28,15 +28,18 @@ class RRD(object):
|
||||||
os.mkdir(self.imagePath)
|
os.mkdir(self.imagePath)
|
||||||
|
|
||||||
def update_database(self, nodes):
|
def update_database(self, nodes):
|
||||||
online_nodes = dict(filter(
|
node_count = 0
|
||||||
lambda d: d[1]['flags']['online'], nodes.items()))
|
client_count = 0
|
||||||
client_count = sum(map(
|
for node in nodes:
|
||||||
lambda d: d['statistics']['clients'], online_nodes.values()))
|
try:
|
||||||
|
if node['flags']['online']:
|
||||||
self.globalDb.update(len(online_nodes), client_count)
|
node_count += 1
|
||||||
for node_id, node in online_nodes.items():
|
client_count += node['statistics']['clients']
|
||||||
rrd = NodeRRD(os.path.join(self.dbPath, node_id + '.rrd'), node)
|
rrd = NodeRRD(os.path.join(self.dbPath, node['nodeinfo']['node_id'] + '.rrd'), node)
|
||||||
rrd.update()
|
rrd.update()
|
||||||
|
except KeyError:
|
||||||
|
pass
|
||||||
|
self.globalDb.update(node_count, client_count)
|
||||||
|
|
||||||
def update_images(self):
|
def update_images(self):
|
||||||
self.globalDb.graph(os.path.join(self.imagePath, "globalGraph.png"),
|
self.globalDb.graph(os.path.join(self.imagePath, "globalGraph.png"),
|
||||||
|
|
Loading…
Reference in New Issue