2019-02-13 20:21:14 +00:00
|
|
|
"""Support for Zabbix."""
|
2021-03-23 13:36:43 +00:00
|
|
|
from contextlib import suppress
|
2020-10-09 12:11:50 +00:00
|
|
|
import json
|
2017-01-17 08:41:37 +00:00
|
|
|
import logging
|
2020-10-09 12:11:50 +00:00
|
|
|
import math
|
|
|
|
import queue
|
|
|
|
import threading
|
|
|
|
import time
|
|
|
|
from urllib.error import HTTPError
|
2017-01-17 08:41:37 +00:00
|
|
|
from urllib.parse import urljoin
|
|
|
|
|
2020-10-09 12:11:50 +00:00
|
|
|
from pyzabbix import ZabbixAPI, ZabbixAPIException, ZabbixMetric, ZabbixSender
|
2017-01-17 08:41:37 +00:00
|
|
|
import voluptuous as vol
|
|
|
|
|
|
|
|
from homeassistant.const import (
|
2019-07-31 19:25:30 +00:00
|
|
|
CONF_HOST,
|
|
|
|
CONF_PASSWORD,
|
2019-11-29 01:07:52 +00:00
|
|
|
CONF_PATH,
|
|
|
|
CONF_SSL,
|
2019-07-31 19:25:30 +00:00
|
|
|
CONF_USERNAME,
|
2020-10-09 12:11:50 +00:00
|
|
|
EVENT_HOMEASSISTANT_STOP,
|
|
|
|
EVENT_STATE_CHANGED,
|
|
|
|
STATE_UNAVAILABLE,
|
|
|
|
STATE_UNKNOWN,
|
2019-07-31 19:25:30 +00:00
|
|
|
)
|
2020-10-23 15:38:46 +00:00
|
|
|
from homeassistant.core import callback
|
2020-10-09 12:11:50 +00:00
|
|
|
from homeassistant.helpers import event as event_helper, state as state_helper
|
2017-01-17 08:41:37 +00:00
|
|
|
import homeassistant.helpers.config_validation as cv
|
2020-10-09 12:11:50 +00:00
|
|
|
from homeassistant.helpers.entityfilter import (
|
|
|
|
INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA,
|
|
|
|
convert_include_exclude_filter,
|
|
|
|
)
|
2017-01-17 08:41:37 +00:00
|
|
|
|
|
|
|
_LOGGER = logging.getLogger(__name__)
|
|
|
|
|
2020-10-09 12:11:50 +00:00
|
|
|
CONF_PUBLISH_STATES_HOST = "publish_states_host"
|
|
|
|
|
2017-01-17 08:41:37 +00:00
|
|
|
DEFAULT_SSL = False
|
2019-07-31 19:25:30 +00:00
|
|
|
DEFAULT_PATH = "zabbix"
|
|
|
|
DOMAIN = "zabbix"
|
|
|
|
|
2020-10-09 12:11:50 +00:00
|
|
|
TIMEOUT = 5
|
|
|
|
RETRY_DELAY = 20
|
|
|
|
QUEUE_BACKLOG_SECONDS = 30
|
|
|
|
RETRY_INTERVAL = 60 # seconds
|
|
|
|
RETRY_MESSAGE = f"%s Retrying in {RETRY_INTERVAL} seconds."
|
|
|
|
|
|
|
|
BATCH_TIMEOUT = 1
|
|
|
|
BATCH_BUFFER_SIZE = 100
|
|
|
|
|
2019-07-31 19:25:30 +00:00
|
|
|
CONFIG_SCHEMA = vol.Schema(
|
|
|
|
{
|
2020-10-09 12:11:50 +00:00
|
|
|
DOMAIN: INCLUDE_EXCLUDE_BASE_FILTER_SCHEMA.extend(
|
2019-07-31 19:25:30 +00:00
|
|
|
{
|
|
|
|
vol.Required(CONF_HOST): cv.string,
|
|
|
|
vol.Optional(CONF_PASSWORD): cv.string,
|
|
|
|
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
|
|
|
|
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
|
|
|
|
vol.Optional(CONF_USERNAME): cv.string,
|
2020-10-09 12:11:50 +00:00
|
|
|
vol.Optional(CONF_PUBLISH_STATES_HOST): cv.string,
|
2019-07-31 19:25:30 +00:00
|
|
|
}
|
|
|
|
)
|
|
|
|
},
|
|
|
|
extra=vol.ALLOW_EXTRA,
|
|
|
|
)
|
2017-01-17 08:41:37 +00:00
|
|
|
|
|
|
|
|
|
|
|
def setup(hass, config):
|
|
|
|
"""Set up the Zabbix component."""
|
|
|
|
|
|
|
|
conf = config[DOMAIN]
|
2020-05-22 11:08:53 +00:00
|
|
|
protocol = "https" if conf[CONF_SSL] else "http"
|
2017-01-17 08:41:37 +00:00
|
|
|
|
2020-04-10 20:01:57 +00:00
|
|
|
url = urljoin(f"{protocol}://{conf[CONF_HOST]}", conf[CONF_PATH])
|
2020-04-07 19:06:05 +00:00
|
|
|
username = conf.get(CONF_USERNAME)
|
|
|
|
password = conf.get(CONF_PASSWORD)
|
2017-01-17 08:41:37 +00:00
|
|
|
|
2020-10-09 12:11:50 +00:00
|
|
|
publish_states_host = conf.get(CONF_PUBLISH_STATES_HOST)
|
|
|
|
|
|
|
|
entities_filter = convert_include_exclude_filter(conf)
|
|
|
|
|
2017-01-17 08:41:37 +00:00
|
|
|
try:
|
2020-10-09 12:11:50 +00:00
|
|
|
zapi = ZabbixAPI(url=url, user=username, password=password)
|
2017-01-17 08:41:37 +00:00
|
|
|
_LOGGER.info("Connected to Zabbix API Version %s", zapi.api_version())
|
2018-03-08 23:25:10 +00:00
|
|
|
except ZabbixAPIException as login_exception:
|
|
|
|
_LOGGER.error("Unable to login to the Zabbix API: %s", login_exception)
|
2017-01-17 08:41:37 +00:00
|
|
|
return False
|
2020-10-09 12:11:50 +00:00
|
|
|
except HTTPError as http_error:
|
|
|
|
_LOGGER.error("HTTPError when connecting to Zabbix API: %s", http_error)
|
|
|
|
zapi = None
|
|
|
|
_LOGGER.error(RETRY_MESSAGE, http_error)
|
|
|
|
event_helper.call_later(hass, RETRY_INTERVAL, lambda _: setup(hass, config))
|
|
|
|
return True
|
2017-01-17 08:41:37 +00:00
|
|
|
|
|
|
|
hass.data[DOMAIN] = zapi
|
2020-10-09 12:11:50 +00:00
|
|
|
|
|
|
|
def event_to_metrics(event, float_keys, string_keys):
|
|
|
|
"""Add an event to the outgoing Zabbix list."""
|
|
|
|
state = event.data.get("new_state")
|
|
|
|
if state is None or state.state in (STATE_UNKNOWN, "", STATE_UNAVAILABLE):
|
|
|
|
return
|
|
|
|
|
|
|
|
entity_id = state.entity_id
|
|
|
|
if not entities_filter(entity_id):
|
|
|
|
return
|
|
|
|
|
|
|
|
floats = {}
|
|
|
|
strings = {}
|
|
|
|
try:
|
|
|
|
_state_as_value = float(state.state)
|
|
|
|
floats[entity_id] = _state_as_value
|
|
|
|
except ValueError:
|
|
|
|
try:
|
|
|
|
_state_as_value = float(state_helper.state_as_number(state))
|
|
|
|
floats[entity_id] = _state_as_value
|
|
|
|
except ValueError:
|
|
|
|
strings[entity_id] = state.state
|
|
|
|
|
|
|
|
for key, value in state.attributes.items():
|
|
|
|
# For each value we try to cast it as float
|
|
|
|
# But if we can not do it we store the value
|
|
|
|
# as string
|
|
|
|
attribute_id = f"{entity_id}/{key}"
|
|
|
|
try:
|
|
|
|
float_value = float(value)
|
|
|
|
except (ValueError, TypeError):
|
|
|
|
float_value = None
|
|
|
|
if float_value is None or not math.isfinite(float_value):
|
|
|
|
strings[attribute_id] = str(value)
|
|
|
|
else:
|
|
|
|
floats[attribute_id] = float_value
|
|
|
|
|
|
|
|
metrics = []
|
|
|
|
float_keys_count = len(float_keys)
|
|
|
|
float_keys.update(floats)
|
|
|
|
if len(float_keys) != float_keys_count:
|
|
|
|
floats_discovery = []
|
|
|
|
for float_key in float_keys:
|
|
|
|
floats_discovery.append({"{#KEY}": float_key})
|
|
|
|
metric = ZabbixMetric(
|
|
|
|
publish_states_host,
|
|
|
|
"homeassistant.floats_discovery",
|
|
|
|
json.dumps(floats_discovery),
|
|
|
|
)
|
|
|
|
metrics.append(metric)
|
|
|
|
for key, value in floats.items():
|
|
|
|
metric = ZabbixMetric(
|
|
|
|
publish_states_host, f"homeassistant.float[{key}]", value
|
|
|
|
)
|
|
|
|
metrics.append(metric)
|
|
|
|
|
|
|
|
string_keys.update(strings)
|
|
|
|
return metrics
|
|
|
|
|
|
|
|
if publish_states_host:
|
|
|
|
zabbix_sender = ZabbixSender(zabbix_server=conf[CONF_HOST])
|
|
|
|
instance = ZabbixThread(hass, zabbix_sender, event_to_metrics)
|
|
|
|
instance.setup(hass)
|
|
|
|
|
2017-01-17 08:41:37 +00:00
|
|
|
return True
|
2020-10-09 12:11:50 +00:00
|
|
|
|
|
|
|
|
|
|
|
class ZabbixThread(threading.Thread):
|
|
|
|
"""A threaded event handler class."""
|
|
|
|
|
|
|
|
MAX_TRIES = 3
|
|
|
|
|
|
|
|
def __init__(self, hass, zabbix_sender, event_to_metrics):
|
|
|
|
"""Initialize the listener."""
|
|
|
|
threading.Thread.__init__(self, name="Zabbix")
|
|
|
|
self.queue = queue.Queue()
|
|
|
|
self.zabbix_sender = zabbix_sender
|
|
|
|
self.event_to_metrics = event_to_metrics
|
|
|
|
self.write_errors = 0
|
|
|
|
self.shutdown = False
|
|
|
|
self.float_keys = set()
|
|
|
|
self.string_keys = set()
|
|
|
|
|
|
|
|
def setup(self, hass):
|
|
|
|
"""Set up the thread and start it."""
|
|
|
|
hass.bus.listen(EVENT_STATE_CHANGED, self._event_listener)
|
|
|
|
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self._shutdown)
|
|
|
|
self.start()
|
|
|
|
_LOGGER.debug("Started publishing state changes to Zabbix")
|
|
|
|
|
|
|
|
def _shutdown(self, event):
|
|
|
|
"""Shut down the thread."""
|
|
|
|
self.queue.put(None)
|
|
|
|
self.join()
|
|
|
|
|
2020-10-23 15:38:46 +00:00
|
|
|
@callback
|
2020-10-09 12:11:50 +00:00
|
|
|
def _event_listener(self, event):
|
|
|
|
"""Listen for new messages on the bus and queue them for Zabbix."""
|
|
|
|
item = (time.monotonic(), event)
|
|
|
|
self.queue.put(item)
|
|
|
|
|
|
|
|
def get_metrics(self):
|
|
|
|
"""Return a batch of events formatted for writing."""
|
|
|
|
queue_seconds = QUEUE_BACKLOG_SECONDS + self.MAX_TRIES * RETRY_DELAY
|
|
|
|
|
|
|
|
count = 0
|
|
|
|
metrics = []
|
|
|
|
|
|
|
|
dropped = 0
|
|
|
|
|
2021-03-23 13:36:43 +00:00
|
|
|
with suppress(queue.Empty):
|
2020-10-09 12:11:50 +00:00
|
|
|
while len(metrics) < BATCH_BUFFER_SIZE and not self.shutdown:
|
|
|
|
timeout = None if count == 0 else BATCH_TIMEOUT
|
|
|
|
item = self.queue.get(timeout=timeout)
|
|
|
|
count += 1
|
|
|
|
|
|
|
|
if item is None:
|
|
|
|
self.shutdown = True
|
|
|
|
else:
|
|
|
|
timestamp, event = item
|
|
|
|
age = time.monotonic() - timestamp
|
|
|
|
|
|
|
|
if age < queue_seconds:
|
|
|
|
event_metrics = self.event_to_metrics(
|
|
|
|
event, self.float_keys, self.string_keys
|
|
|
|
)
|
|
|
|
if event_metrics:
|
|
|
|
metrics += event_metrics
|
|
|
|
else:
|
|
|
|
dropped += 1
|
|
|
|
|
|
|
|
if dropped:
|
|
|
|
_LOGGER.warning("Catching up, dropped %d old events", dropped)
|
|
|
|
|
|
|
|
return count, metrics
|
|
|
|
|
|
|
|
def write_to_zabbix(self, metrics):
|
|
|
|
"""Write preprocessed events to zabbix, with retry."""
|
|
|
|
|
|
|
|
for retry in range(self.MAX_TRIES + 1):
|
|
|
|
try:
|
|
|
|
self.zabbix_sender.send(metrics)
|
|
|
|
|
|
|
|
if self.write_errors:
|
|
|
|
_LOGGER.error("Resumed, lost %d events", self.write_errors)
|
|
|
|
self.write_errors = 0
|
|
|
|
|
|
|
|
_LOGGER.debug("Wrote %d metrics", len(metrics))
|
|
|
|
break
|
|
|
|
except OSError as err:
|
|
|
|
if retry < self.MAX_TRIES:
|
|
|
|
time.sleep(RETRY_DELAY)
|
|
|
|
else:
|
|
|
|
if not self.write_errors:
|
|
|
|
_LOGGER.error("Write error: %s", err)
|
|
|
|
self.write_errors += len(metrics)
|
|
|
|
|
|
|
|
def run(self):
|
|
|
|
"""Process incoming events."""
|
|
|
|
while not self.shutdown:
|
|
|
|
count, metrics = self.get_metrics()
|
|
|
|
if metrics:
|
|
|
|
self.write_to_zabbix(metrics)
|
|
|
|
for _ in range(count):
|
|
|
|
self.queue.task_done()
|