Compare commits

...

13 Commits
master ... dev

Author SHA1 Message Date
Jan-Philipp Litza 010ad4a43b Merge pull request #67 from ffac:dev
Added graphite support for storage of statistical data
2015-09-08 21:37:26 +02:00
Michael Rüttgers 84fe43f5dc Added graphite support for storage of statistical data 2015-09-08 21:36:29 +02:00
Jan-Philipp Litza ddcf8fbead Update RRDs of offline nodes, ignore key errors 2015-09-08 21:34:41 +02:00
Jan-Philipp Litza dc8171dbd6 Merge pull request #68 from ffac/hotfix/rrd-type-error
Fixes a type error (nodes is no dict any longer, error was: Attribute…
2015-09-08 21:31:53 +02:00
Jan-Philipp Litza abc84e9fc9 README: Update to new nodes.json version 2015-09-04 10:52:19 +02:00
Michael Rüttgers d9bc7eb9a1 Fixes a type error (nodes is no dict any longer, error was: AttributeError: 'list' object has no attribute 'items') 2015-09-02 23:16:06 +02:00
Nils Schneider 3b8308722f add --rrd-path option 2015-08-22 23:54:03 +02:00
Nils Schneider c2e21b4f5b stop relying on batmanadv gateway feature 2015-07-31 11:57:44 +02:00
Nils Schneider 431d46e191 change nodes.json format to version 2 2015-07-30 19:21:56 +02:00
Nils Schneider 823b64b8ba memory_usage: assume buffers and cached to be free 2015-07-13 17:43:41 +02:00
Nils Schneider f2214ab130 Merge pull request #62 from kantorkel/firstseen
add firstseen to nodelist.json
2015-07-12 18:44:53 +02:00
kantorkel c5b321430e fixed 'blank line contains whitespace'
lib/nodelist.py:18:1: W293 blank line contains whitespace. fixed.
2015-07-12 18:08:17 +02:00
kantorkel ee84327b5c add firstseen to nodelist.json 2015-07-06 15:26:41 +02:00
6 changed files with 177 additions and 66 deletions

View File

@ -55,23 +55,24 @@ will prefix `sudo` where necessary.
## nodes.json
{ 'nodes': {
node_id: { 'flags': { flags },
'firstseen': isoformat,
'lastseen': isoformat,
'nodeinfo': {...}, # copied from alfred type 158
'statistics': {
'uptime': double, # seconds
'memory_usage': double, # 0..1
'clients': double,
'rootfs_usage': double, # 0..1
'loadavg': double,
'gateway': mac
}
},
{ "nodes": [
{ "flags": { flags },
"firstseen": isoformat,
"lastseen": isoformat,
"nodeinfo": {...}, # copied from node's nodeinfo announcement
"statistics": {
"uptime": double, # seconds
"memory_usage": double, # 0..1
"clients": double,
"rootfs_usage": double, # 0..1
"loadavg": double,
"gateway": mac
}
},
...
}
'timestamp': isoformat
]
"timestamp": isoformat,
"version": 2
}
### flags (bool)
@ -108,10 +109,57 @@ database.
After running ffmap-backend, copy `graph.json` to your webserver. Then,
filter `nodes.json` using `jq` like this:
jq '.nodes = (.nodes | with_entries(del(.value.nodeinfo.owner)))' \
jq '.nodes = (.nodes | map(del(.nodeinfo.owner)))' \
< /ffmap-data/nodes.json > /var/www/data/nodes.json
This will remove owner information from nodes.json before copying the data
to your webserver.
[jq]: https://stedolan.github.io/jq/
# Convert from nodes.json version 1 to version 2
jq '.nodes = (.nodes | to_entries | map(.value)) | .version = 2' \
< nodes.json > nodes.json.new
mv nodes.json.new nodes.json
# Graphite support
## Comand line arguments
Running `backend.py` with `--with-graphite` will enable graphite support for storing statistical data.
graphite integration:
--with-graphite Send statistical data to graphite backend
--graphite-host GRAPHITE_HOST
Hostname of the machine running graphite
--graphite-port GRAPHITE_PORT
Port of the carbon daemon
--graphite-prefix GRAPHITE_PREFIX
Storage prefix (default value: 'freifunk.nodes.')
--graphite-metrics GRAPHITE_METRICS
Comma separated list of metrics to store (default
value: 'clients,loadavg,uptime')
## Graphite configuration
### storage-schemas.conf
[freifunk_node_stats]
pattern = ^freifunk\.nodes\.
retentions = 60s:1d,5min:7d,1h:30d,1d:4y
### storage-aggregation.conf
[freifunk_node_stats_loadavg]
pattern = ^freifunk\.nodes\..*\.loadavg$
aggregationMethod = avg
[freifunk_node_stats_clients]
pattern = ^freifunk\.nodes\..*\.clients$
aggregationMethod = max
[freifunk_node_stats_uptime]
pattern = ^freifunk\.nodes\..*\.uptime$
aggregationMethod = last

View File

@ -18,8 +18,9 @@ from lib.batman import Batman
from lib.rrddb import RRD
from lib.nodelist import export_nodelist
from lib.validate import validate_nodeinfos
from lib.graphite import Graphite
NODES_VERSION = 1
NODES_VERSION = 2
GRAPH_VERSION = 1
@ -58,71 +59,72 @@ def main(params):
# read nodedb state from node.json
try:
with open(nodes_fn, 'r') as nodedb_handle:
with open(nodes_fn, 'r', encoding=('UTF-8')) as nodedb_handle:
nodedb = json.load(nodedb_handle)
except IOError:
nodedb = {'nodes': dict()}
# flush nodedb if it uses the old format
if 'links' in nodedb:
nodedb = {'nodes': dict()}
nodedb = {'nodes': []}
# set version we're going to output
nodedb['version'] = NODES_VERSION
# update timestamp and assume all nodes are offline
nodedb['timestamp'] = now.isoformat()
for node_id, node in nodedb['nodes'].items():
for node in nodedb['nodes']:
node['flags']['online'] = False
nodesdict = {}
for node in nodedb['nodes']:
nodesdict[node['nodeinfo']['node_id']] = node
# integrate alfred nodeinfo
for alfred in alfred_instances:
nodeinfo = validate_nodeinfos(alfred.nodeinfo())
nodes.import_nodeinfo(nodedb['nodes'], nodeinfo,
nodes.import_nodeinfo(nodesdict, nodeinfo,
now, assume_online=True)
# integrate static aliases data
for aliases in params['aliases']:
with open(aliases, 'r') as f:
nodeinfo = validate_nodeinfos(json.load(f))
nodes.import_nodeinfo(nodedb['nodes'], nodeinfo,
nodes.import_nodeinfo(nodesdict, nodeinfo,
now, assume_online=False)
nodes.reset_statistics(nodedb['nodes'])
nodes.reset_statistics(nodesdict)
for alfred in alfred_instances:
nodes.import_statistics(nodedb['nodes'], alfred.statistics())
nodes.import_statistics(nodesdict, alfred.statistics())
# acquire gwl and visdata for each batman instance
# acquire visdata for each batman instance
mesh_info = []
for batman in batman_instances:
vd = batman.vis_data()
gwl = batman.gateway_list()
mesh_info.append((vd, gwl))
mesh_info.append(vd)
# update nodedb from batman-adv data
for vd, gwl in mesh_info:
nodes.import_mesh_ifs_vis_data(nodedb['nodes'], vd)
nodes.import_vis_clientcount(nodedb['nodes'], vd)
nodes.mark_vis_data_online(nodedb['nodes'], vd, now)
nodes.mark_gateways(nodedb['nodes'], gwl)
for vd in mesh_info:
nodes.import_mesh_ifs_vis_data(nodesdict, vd)
nodes.import_vis_clientcount(nodesdict, vd)
nodes.mark_vis_data_online(nodesdict, vd, now)
# clear the nodedb from nodes that have not been online in $prune days
if params['prune']:
nodes.prune_nodes(nodedb['nodes'], now, params['prune'])
nodes.prune_nodes(nodesdict, now, params['prune'])
# build nxnetworks graph from nodedb and visdata
batadv_graph = nx.DiGraph()
for vd, gwl in mesh_info:
graph.import_vis_data(batadv_graph, nodedb['nodes'], vd)
for vd in mesh_info:
graph.import_vis_data(batadv_graph, nodesdict, vd)
# force mac addresses to be vpn-link only (like gateways for example)
if params['vpn']:
graph.mark_vpn(batadv_graph, frozenset(params['vpn']))
nodedb['nodes'] = list(nodesdict.values())
def extract_tunnel(nodes):
macs = set()
for id, node in nodes.items():
for node in nodes:
try:
for mac in node["nodeinfo"]["network"]["mesh"]["bat0"]["interfaces"]["tunnel"]:
macs.add(mac)
@ -149,11 +151,14 @@ def main(params):
with open(nodelist_fn, 'w') as f:
json.dump(export_nodelist(now, nodedb), f)
# optional Graphite integration
if params['graphite']:
graphite = Graphite(params['graphite_host'], params['graphite_port'])
graphite.update(params['graphite_prefix'], params['graphite_metrics'], nodedb['nodes'])
# optional rrd graphs (trigger with --rrd)
if params['rrd']:
script_directory = os.path.dirname(os.path.realpath(__file__))
rrd = RRD(os.path.join(script_directory, 'nodedb'),
os.path.join(params['dest_dir'], 'nodes'))
rrd = RRD(params['rrd_path'], os.path.join(params['dest_dir'], 'nodes'))
rrd.update_database(nodedb['nodes'])
rrd.update_images()
@ -176,10 +181,25 @@ if __name__ == '__main__':
help='Assume MAC addresses are part of vpn')
parser.add_argument('-p', '--prune', metavar='DAYS', type=int,
help='forget nodes offline for at least DAYS')
parser.add_argument('--rrd-path', default=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'nodedb'),
help='path to RRD files')
parser.add_argument('--with-rrd', dest='rrd', action='store_true',
default=False,
help='enable the rendering of RRD graphs (cpu '
'intensive)')
# Graphite integration
graphite = parser.add_argument_group('graphite integration')
graphite.add_argument('--with-graphite', dest='graphite', action='store_true', default=False,
help='Send statistical data to graphite backend')
graphite.add_argument('--graphite-host', dest='graphite_host', default="localhost",
help='Hostname of the machine running graphite')
graphite.add_argument('--graphite-port', dest='graphite_port', default="2003", type=int,
help='Port of the carbon daemon')
graphite.add_argument('--graphite-prefix', dest='graphite_prefix', default="freifunk.nodes.",
help='Storage prefix (default value: \'freifunk.nodes.\')')
graphite.add_argument('--graphite-metrics', dest='graphite_metrics', default="clients,loadavg,uptime",
help='Comma separated list of metrics to store (default value: \'clients,loadavg,uptime\')')
options = vars(parser.parse_args())
main(options)

47
lib/graphite.py Normal file
View File

@ -0,0 +1,47 @@
import socket
import time
class Graphite(object):
def __init__(self, hostname, port):
self.hostname = hostname
self.port = int(port)
def flatten_dict(self, d):
def expand(key, value):
if isinstance(value, dict):
return [('{}.{}'.format(key, k), v) for k, v in self.flatten_dict(value).items()]
else:
return [(key, value)]
items = [item for k, v in d.items() for item in expand(k, v)]
return dict(items)
def update(self, prefix, metrics, nodes):
timestamp = int(time.time())
sock = socket.socket()
sock.connect((self.hostname, self.port))
for node in nodes:
try:
if node['flags']['online']:
stats = self.flatten_dict(node['statistics'])
for metric in metrics.split(','):
try:
msg = '{}{}.{} {} {}\n'.format(
prefix,
node['nodeinfo']['node_id'].replace(' ', '_'),
metric.replace(' ', '_'),
stats[metric],
timestamp
)
sock.send(msg.encode('utf-8'))
except KeyError:
pass
except KeyError:
pass
sock.close()

View File

@ -1,9 +1,9 @@
def export_nodelist(now, nodedb):
nodelist = list()
for node_id, node in nodedb["nodes"].items():
for node in nodedb["nodes"]:
node_out = dict()
node_out["id"] = node_id
node_out["id"] = node["nodeinfo"]["node_id"]
node_out["name"] = node["nodeinfo"]["hostname"]
if "location" in node["nodeinfo"]:
@ -13,6 +13,9 @@ def export_nodelist(now, nodedb):
node_out["status"] = dict()
node_out["status"]["online"] = node["flags"]["online"]
if "firstseen" in node:
node_out["status"]["firstcontact"] = node["firstseen"]
if "lastseen" in node:
node_out["status"]["lastcontact"] = node["lastseen"]

View File

@ -61,7 +61,6 @@ def import_nodeinfo(nodes, nodeinfos, now, assume_online=False):
node = nodes.setdefault(nodeinfo['node_id'], {'flags': dict()})
node['nodeinfo'] = nodeinfo
node['flags']['online'] = False
node['flags']['gateway'] = False
if assume_online:
mark_online(node, now)
@ -86,11 +85,10 @@ def import_statistics(nodes, stats):
stats = filter(lambda d: d['node_id'] in nodes, stats)
for node, stats in map(lambda d: (nodes[d['node_id']], d), stats):
add(node, stats, 'clients', ['clients', 'total'])
add(node, stats, 'gateway', ['gateway'], lambda d: macs.get(d, d))
add(node, stats, 'uptime', ['uptime'])
add(node, stats, 'loadavg', ['loadavg'])
add(node, stats, 'memory_usage', ['memory'],
lambda d: 1 - d['free'] / d['total'])
lambda d: 1 - (d['free'] + d['buffers'] + d['cached']) / d['total'])
add(node, stats, 'rootfs_usage', ['rootfs_usage'])
add(node, stats, 'traffic', ['traffic'])
@ -152,14 +150,6 @@ def import_vis_clientcount(nodes, vis_data):
nodes[node_id]['statistics'].setdefault('clients', clientcount)
def mark_gateways(nodes, gateways):
macs = build_mac_table(nodes)
gateways = filter(lambda d: d in macs, gateways)
for node in map(lambda d: nodes[macs[d]], gateways):
node['flags']['gateway'] = True
def mark_vis_data_online(nodes, vis_data, now):
macs = build_mac_table(nodes)

View File

@ -28,15 +28,18 @@ class RRD(object):
os.mkdir(self.imagePath)
def update_database(self, nodes):
online_nodes = dict(filter(
lambda d: d[1]['flags']['online'], nodes.items()))
client_count = sum(map(
lambda d: d['statistics']['clients'], online_nodes.values()))
self.globalDb.update(len(online_nodes), client_count)
for node_id, node in online_nodes.items():
rrd = NodeRRD(os.path.join(self.dbPath, node_id + '.rrd'), node)
rrd.update()
node_count = 0
client_count = 0
for node in nodes:
try:
if node['flags']['online']:
node_count += 1
client_count += node['statistics']['clients']
rrd = NodeRRD(os.path.join(self.dbPath, node['nodeinfo']['node_id'] + '.rrd'), node)
rrd.update()
except KeyError:
pass
self.globalDb.update(node_count, client_count)
def update_images(self):
self.globalDb.graph(os.path.join(self.imagePath, "globalGraph.png"),