Added graphite support for storage of statistical data

dev^2
Michael Rüttgers 2015-09-01 11:47:00 +02:00 committed by Jan-Philipp Litza
parent 3b8308722f
commit 84fe43f5dc
3 changed files with 106 additions and 0 deletions

View File

@ -122,3 +122,43 @@ to your webserver.
jq '.nodes = (.nodes | to_entries | map(.value)) | .version = 2' \ jq '.nodes = (.nodes | to_entries | map(.value)) | .version = 2' \
< nodes.json > nodes.json.new < nodes.json > nodes.json.new
mv nodes.json.new nodes.json mv nodes.json.new nodes.json
# Graphite support
## Comand line arguments
Running `backend.py` with `--with-graphite` will enable graphite support for storing statistical data.
graphite integration:
--with-graphite Send statistical data to graphite backend
--graphite-host GRAPHITE_HOST
Hostname of the machine running graphite
--graphite-port GRAPHITE_PORT
Port of the carbon daemon
--graphite-prefix GRAPHITE_PREFIX
Storage prefix (default value: 'freifunk.nodes.')
--graphite-metrics GRAPHITE_METRICS
Comma separated list of metrics to store (default
value: 'clients,loadavg,uptime')
## Graphite configuration
### storage-schemas.conf
[freifunk_node_stats]
pattern = ^freifunk\.nodes\.
retentions = 60s:1d,5min:7d,1h:30d,1d:4y
### storage-aggregation.conf
[freifunk_node_stats_loadavg]
pattern = ^freifunk\.nodes\..*\.loadavg$
aggregationMethod = avg
[freifunk_node_stats_clients]
pattern = ^freifunk\.nodes\..*\.clients$
aggregationMethod = max
[freifunk_node_stats_uptime]
pattern = ^freifunk\.nodes\..*\.uptime$
aggregationMethod = last

View File

@ -18,6 +18,7 @@ from lib.batman import Batman
from lib.rrddb import RRD from lib.rrddb import RRD
from lib.nodelist import export_nodelist from lib.nodelist import export_nodelist
from lib.validate import validate_nodeinfos from lib.validate import validate_nodeinfos
from lib.graphite import Graphite
NODES_VERSION = 2 NODES_VERSION = 2
GRAPH_VERSION = 1 GRAPH_VERSION = 1
@ -150,6 +151,11 @@ def main(params):
with open(nodelist_fn, 'w') as f: with open(nodelist_fn, 'w') as f:
json.dump(export_nodelist(now, nodedb), f) json.dump(export_nodelist(now, nodedb), f)
# optional Graphite integration
if params['graphite']:
graphite = Graphite(params['graphite_host'], params['graphite_port'])
graphite.update(params['graphite_prefix'], params['graphite_metrics'], nodedb['nodes'])
# optional rrd graphs (trigger with --rrd) # optional rrd graphs (trigger with --rrd)
if params['rrd']: if params['rrd']:
rrd = RRD(params['rrd_path'], os.path.join(params['dest_dir'], 'nodes')) rrd = RRD(params['rrd_path'], os.path.join(params['dest_dir'], 'nodes'))
@ -182,5 +188,18 @@ if __name__ == '__main__':
help='enable the rendering of RRD graphs (cpu ' help='enable the rendering of RRD graphs (cpu '
'intensive)') 'intensive)')
# Graphite integration
graphite = parser.add_argument_group('graphite integration')
graphite.add_argument('--with-graphite', dest='graphite', action='store_true', default=False,
help='Send statistical data to graphite backend')
graphite.add_argument('--graphite-host', dest='graphite_host', default="localhost",
help='Hostname of the machine running graphite')
graphite.add_argument('--graphite-port', dest='graphite_port', default="2003", type=int,
help='Port of the carbon daemon')
graphite.add_argument('--graphite-prefix', dest='graphite_prefix', default="freifunk.nodes.",
help='Storage prefix (default value: \'freifunk.nodes.\')')
graphite.add_argument('--graphite-metrics', dest='graphite_metrics', default="clients,loadavg,uptime",
help='Comma separated list of metrics to store (default value: \'clients,loadavg,uptime\')')
options = vars(parser.parse_args()) options = vars(parser.parse_args())
main(options) main(options)

47
lib/graphite.py Normal file
View File

@ -0,0 +1,47 @@
import socket
import time
class Graphite(object):
def __init__(self, hostname, port):
self.hostname = hostname
self.port = int(port)
def flatten_dict(self, d):
def expand(key, value):
if isinstance(value, dict):
return [('{}.{}'.format(key, k), v) for k, v in self.flatten_dict(value).items()]
else:
return [(key, value)]
items = [item for k, v in d.items() for item in expand(k, v)]
return dict(items)
def update(self, prefix, metrics, nodes):
timestamp = int(time.time())
sock = socket.socket()
sock.connect((self.hostname, self.port))
for node in nodes:
try:
if node['flags']['online']:
stats = self.flatten_dict(node['statistics'])
for metric in metrics.split(','):
try:
msg = '{}{}.{} {} {}\n'.format(
prefix,
node['nodeinfo']['node_id'].replace(' ', '_'),
metric.replace(' ', '_'),
stats[metric],
timestamp
)
sock.send(msg.encode('utf-8'))
except KeyError:
pass
except KeyError:
pass
sock.close()