Rework ondilo ico coordinator (#138204)

Rework ondilo ico coordinators
pull/136293/merge
Martin Hjelmare 2025-02-10 20:36:10 +01:00 committed by GitHub
parent 20f6bd309e
commit a62619894a
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
5 changed files with 454 additions and 61 deletions

View File

@ -8,7 +8,7 @@ from homeassistant.helpers import config_entry_oauth2_flow
from .api import OndiloClient
from .config_flow import OndiloIcoOAuth2FlowHandler
from .const import DOMAIN
from .coordinator import OndiloIcoCoordinator
from .coordinator import OndiloIcoPoolsCoordinator
from .oauth_impl import OndiloOauth2Implementation
PLATFORMS = [Platform.SENSOR]
@ -28,7 +28,7 @@ async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
)
)
coordinator = OndiloIcoCoordinator(
coordinator = OndiloIcoPoolsCoordinator(
hass, entry, OndiloClient(hass, entry, implementation)
)

View File

@ -1,7 +1,10 @@
"""Define an object to coordinate fetching Ondilo ICO data."""
from dataclasses import dataclass
from datetime import timedelta
from __future__ import annotations
import asyncio
from dataclasses import dataclass, field
from datetime import datetime, timedelta
import logging
from typing import Any
@ -9,25 +12,37 @@ from ondilo import OndiloError
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import device_registry as dr
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from homeassistant.util import dt as dt_util
from . import DOMAIN
from .api import OndiloClient
_LOGGER = logging.getLogger(__name__)
TIME_TO_NEXT_UPDATE = timedelta(hours=1, minutes=5)
UPDATE_LOCK = asyncio.Lock()
@dataclass
class OndiloIcoData:
"""Class for storing the data."""
class OndiloIcoPoolData:
"""Store the pools the data."""
ico: dict[str, Any]
pool: dict[str, Any]
measures_coordinator: OndiloIcoMeasuresCoordinator = field(init=False)
@dataclass
class OndiloIcoMeasurementData:
"""Store the measurement data for one pool."""
sensors: dict[str, Any]
class OndiloIcoCoordinator(DataUpdateCoordinator[dict[str, OndiloIcoData]]):
"""Class to manage fetching Ondilo ICO data from API."""
class OndiloIcoPoolsCoordinator(DataUpdateCoordinator[dict[str, OndiloIcoPoolData]]):
"""Fetch Ondilo ICO pools data from API."""
config_entry: ConfigEntry
@ -39,45 +54,138 @@ class OndiloIcoCoordinator(DataUpdateCoordinator[dict[str, OndiloIcoData]]):
hass,
logger=_LOGGER,
config_entry=config_entry,
name=DOMAIN,
update_interval=timedelta(hours=1),
name=f"{DOMAIN}_pools",
update_interval=timedelta(minutes=20),
)
self.api = api
self.config_entry = config_entry
self._device_registry = dr.async_get(self.hass)
async def _async_update_data(self) -> dict[str, OndiloIcoData]:
"""Fetch data from API endpoint."""
async def _async_update_data(self) -> dict[str, OndiloIcoPoolData]:
"""Fetch pools data from API endpoint and update devices."""
known_pools: set[str] = set(self.data) if self.data else set()
try:
return await self.hass.async_add_executor_job(self._update_data)
async with UPDATE_LOCK:
data = await self.hass.async_add_executor_job(self._update_data)
except OndiloError as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
def _update_data(self) -> dict[str, OndiloIcoData]:
"""Fetch data from API endpoint."""
current_pools = set(data)
new_pools = current_pools - known_pools
for pool_id in new_pools:
pool_data = data[pool_id]
pool_data.measures_coordinator = OndiloIcoMeasuresCoordinator(
self.hass, self.config_entry, self.api, pool_id
)
self._device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
identifiers={(DOMAIN, pool_data.ico["serial_number"])},
manufacturer="Ondilo",
model="ICO",
name=pool_data.pool["name"],
sw_version=pool_data.ico["sw_version"],
)
removed_pools = known_pools - current_pools
for pool_id in removed_pools:
pool_data = self.data.pop(pool_id)
await pool_data.measures_coordinator.async_shutdown()
device_entry = self._device_registry.async_get_device(
identifiers={(DOMAIN, pool_data.ico["serial_number"])}
)
if device_entry:
self._device_registry.async_update_device(
device_id=device_entry.id,
remove_config_entry_id=self.config_entry.entry_id,
)
for pool_id in current_pools:
pool_data = data[pool_id]
measures_coordinator = pool_data.measures_coordinator
measures_coordinator.set_next_refresh(pool_data)
if not measures_coordinator.data:
await measures_coordinator.async_refresh()
return data
def _update_data(self) -> dict[str, OndiloIcoPoolData]:
"""Fetch pools data from API endpoint."""
res = {}
pools = self.api.get_pools()
_LOGGER.debug("Pools: %s", pools)
error: OndiloError | None = None
for pool in pools:
pool_id = pool["id"]
if (data := self.data) and pool_id in data:
pool_data = res[pool_id] = data[pool_id]
pool_data.pool = pool
# Skip requesting new ICO data for known pools
# to avoid unnecessary API calls.
continue
try:
ico = self.api.get_ICO_details(pool_id)
if not ico:
_LOGGER.debug(
"The pool id %s does not have any ICO attached", pool_id
)
continue
sensors = self.api.get_last_pool_measures(pool_id)
except OndiloError as err:
error = err
_LOGGER.debug("Error communicating with API for %s: %s", pool_id, err)
continue
res[pool_id] = OndiloIcoData(
ico=ico,
pool=pool,
sensors={sensor["data_type"]: sensor["value"] for sensor in sensors},
)
if not ico:
_LOGGER.debug("The pool id %s does not have any ICO attached", pool_id)
continue
res[pool_id] = OndiloIcoPoolData(ico=ico, pool=pool)
if not res:
if error:
raise UpdateFailed(f"Error communicating with API: {error}") from error
raise UpdateFailed("No data available")
return res
class OndiloIcoMeasuresCoordinator(DataUpdateCoordinator[OndiloIcoMeasurementData]):
"""Fetch Ondilo ICO measurement data for one pool from API."""
def __init__(
self,
hass: HomeAssistant,
config_entry: ConfigEntry,
api: OndiloClient,
pool_id: str,
) -> None:
"""Initialize."""
super().__init__(
hass,
config_entry=config_entry,
logger=_LOGGER,
name=f"{DOMAIN}_measures_{pool_id}",
)
self.api = api
self._next_refresh: datetime | None = None
self._pool_id = pool_id
async def _async_update_data(self) -> OndiloIcoMeasurementData:
"""Fetch measurement data from API endpoint."""
async with UPDATE_LOCK:
data = await self.hass.async_add_executor_job(self._update_data)
if next_refresh := self._next_refresh:
now = dt_util.utcnow()
# If we've missed the next refresh, schedule a refresh in one hour.
if next_refresh <= now:
next_refresh = now + timedelta(hours=1)
self.update_interval = next_refresh - now
return data
def _update_data(self) -> OndiloIcoMeasurementData:
"""Fetch measurement data from API endpoint."""
try:
sensors = self.api.get_last_pool_measures(self._pool_id)
except OndiloError as err:
raise UpdateFailed(f"Error communicating with API: {err}") from err
return OndiloIcoMeasurementData(
sensors={sensor["data_type"]: sensor["value"] for sensor in sensors},
)
def set_next_refresh(self, pool_data: OndiloIcoPoolData) -> None:
"""Set next refresh of this coordinator."""
last_update = datetime.fromisoformat(pool_data.pool["updated_at"])
self._next_refresh = last_update + TIME_TO_NEXT_UPDATE

View File

@ -15,14 +15,18 @@ from homeassistant.const import (
UnitOfElectricPotential,
UnitOfTemperature,
)
from homeassistant.core import HomeAssistant
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.device_registry import DeviceInfo
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from homeassistant.helpers.typing import StateType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import OndiloIcoCoordinator, OndiloIcoData
from .coordinator import (
OndiloIcoMeasuresCoordinator,
OndiloIcoPoolData,
OndiloIcoPoolsCoordinator,
)
SENSOR_TYPES: tuple[SensorEntityDescription, ...] = (
SensorEntityDescription(
@ -73,50 +77,67 @@ async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: AddEntitiesCallback
) -> None:
"""Set up the Ondilo ICO sensors."""
pools_coordinator: OndiloIcoPoolsCoordinator = hass.data[DOMAIN][entry.entry_id]
known_entities: set[str] = set()
coordinator: OndiloIcoCoordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(get_new_entities(pools_coordinator, known_entities))
async_add_entities(
OndiloICO(coordinator, pool_id, description)
for pool_id, pool in coordinator.data.items()
for description in SENSOR_TYPES
if description.key in pool.sensors
)
@callback
def add_new_entities():
"""Add any new entities after update of the pools coordinator."""
async_add_entities(get_new_entities(pools_coordinator, known_entities))
entry.async_on_unload(pools_coordinator.async_add_listener(add_new_entities))
class OndiloICO(CoordinatorEntity[OndiloIcoCoordinator], SensorEntity):
@callback
def get_new_entities(
pools_coordinator: OndiloIcoPoolsCoordinator,
known_entities: set[str],
) -> list[OndiloICO]:
"""Return new Ondilo ICO sensor entities."""
entities = []
for pool_id, pool_data in pools_coordinator.data.items():
for description in SENSOR_TYPES:
measurement_id = f"{pool_id}-{description.key}"
if (
measurement_id in known_entities
or (data := pool_data.measures_coordinator.data) is None
or description.key not in data.sensors
):
continue
known_entities.add(measurement_id)
entities.append(
OndiloICO(
pool_data.measures_coordinator, description, pool_id, pool_data
)
)
return entities
class OndiloICO(CoordinatorEntity[OndiloIcoMeasuresCoordinator], SensorEntity):
"""Representation of a Sensor."""
_attr_has_entity_name = True
def __init__(
self,
coordinator: OndiloIcoCoordinator,
pool_id: str,
coordinator: OndiloIcoMeasuresCoordinator,
description: SensorEntityDescription,
pool_id: str,
pool_data: OndiloIcoPoolData,
) -> None:
"""Initialize sensor entity with data from coordinator."""
super().__init__(coordinator)
self.entity_description = description
self._pool_id = pool_id
data = self.pool_data
self._attr_unique_id = f"{data.ico['serial_number']}-{description.key}"
self._attr_unique_id = f"{pool_data.ico['serial_number']}-{description.key}"
self._attr_device_info = DeviceInfo(
identifiers={(DOMAIN, data.ico["serial_number"])},
manufacturer="Ondilo",
model="ICO",
name=data.pool["name"],
sw_version=data.ico["sw_version"],
identifiers={(DOMAIN, pool_data.ico["serial_number"])},
)
@property
def pool_data(self) -> OndiloIcoData:
"""Get pool data."""
return self.coordinator.data[self._pool_id]
@property
def native_value(self) -> StateType:
"""Last value of the sensor."""
return self.pool_data.sensors[self.entity_description.key]
return self.coordinator.data.sensors[self.entity_description.key]

View File

@ -15,5 +15,5 @@
"latitude": 48.861783,
"longitude": 2.337421
},
"updated_at": "2024-01-01T01:00:00+0000"
"updated_at": "2024-01-01T01:05:00+0000"
}

View File

@ -1,8 +1,10 @@
"""Test Ondilo ICO initialization."""
from datetime import datetime, timedelta
from typing import Any
from unittest.mock import MagicMock
from freezegun.api import FrozenDateTimeFactory
from ondilo import OndiloError
import pytest
from syrupy import SnapshotAssertion
@ -13,7 +15,7 @@ from homeassistant.helpers import device_registry as dr
from . import setup_integration
from tests.common import MockConfigEntry
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_devices(
@ -63,6 +65,7 @@ async def test_get_pools_error(
async def test_init_with_no_ico_attached(
hass: HomeAssistant,
mock_ondilo_client: MagicMock,
device_registry: dr.DeviceRegistry,
config_entry: MockConfigEntry,
pool1: dict[str, Any],
) -> None:
@ -73,14 +76,104 @@ async def test_init_with_no_ico_attached(
mock_ondilo_client.get_ICO_details.return_value = None
await setup_integration(hass, config_entry, mock_ondilo_client)
device_entries = dr.async_entries_for_config_entry(
device_registry, config_entry.entry_id
)
# No devices should be created
assert len(device_entries) == 0
# No sensor should be created
assert len(hass.states.async_all()) == 0
# We should not have tried to retrieve pool measures
mock_ondilo_client.get_last_pool_measures.assert_not_called()
assert config_entry.state is ConfigEntryState.SETUP_RETRY
assert config_entry.state is ConfigEntryState.LOADED
@pytest.mark.parametrize("api", ["get_ICO_details", "get_last_pool_measures"])
async def test_adding_pool_after_setup(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_ondilo_client: MagicMock,
device_registry: dr.DeviceRegistry,
config_entry: MockConfigEntry,
pool1: dict[str, Any],
two_pools: list[dict[str, Any]],
ico_details1: dict[str, Any],
ico_details2: dict[str, Any],
) -> None:
"""Test adding one pool after integration setup."""
mock_ondilo_client.get_pools.return_value = pool1
mock_ondilo_client.get_ICO_details.return_value = ico_details1
await setup_integration(hass, config_entry, mock_ondilo_client)
device_entries = dr.async_entries_for_config_entry(
device_registry, config_entry.entry_id
)
# One pool is created with 7 entities.
assert len(device_entries) == 1
assert len(hass.states.async_all()) == 7
mock_ondilo_client.get_pools.return_value = two_pools
mock_ondilo_client.get_ICO_details.return_value = ico_details2
# Trigger a refresh of the pools coordinator.
freezer.tick(timedelta(minutes=20))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
device_entries = dr.async_entries_for_config_entry(
device_registry, config_entry.entry_id
)
# Two pool have been created with 7 entities each.
assert len(device_entries) == 2
assert len(hass.states.async_all()) == 14
async def test_removing_pool_after_setup(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_ondilo_client: MagicMock,
device_registry: dr.DeviceRegistry,
config_entry: MockConfigEntry,
pool1: dict[str, Any],
ico_details1: dict[str, Any],
) -> None:
"""Test removing one pool after integration setup."""
await setup_integration(hass, config_entry, mock_ondilo_client)
device_entries = dr.async_entries_for_config_entry(
device_registry, config_entry.entry_id
)
# Two pools are created with 7 entities each.
assert len(device_entries) == 2
assert len(hass.states.async_all()) == 14
mock_ondilo_client.get_pools.return_value = pool1
mock_ondilo_client.get_ICO_details.return_value = ico_details1
# Trigger a refresh of the pools coordinator.
freezer.tick(timedelta(minutes=20))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
device_entries = dr.async_entries_for_config_entry(
device_registry, config_entry.entry_id
)
# One pool is left with 7 entities.
assert len(device_entries) == 1
assert len(hass.states.async_all()) == 7
@pytest.mark.parametrize(
("api", "devices", "config_entry_state"),
[
("get_ICO_details", 0, ConfigEntryState.SETUP_RETRY),
("get_last_pool_measures", 1, ConfigEntryState.LOADED),
],
)
async def test_details_error_all_pools(
hass: HomeAssistant,
mock_ondilo_client: MagicMock,
@ -88,6 +181,8 @@ async def test_details_error_all_pools(
config_entry: MockConfigEntry,
pool1: dict[str, Any],
api: str,
devices: int,
config_entry_state: ConfigEntryState,
) -> None:
"""Test details and measures error for all pools."""
mock_ondilo_client.get_pools.return_value = pool1
@ -100,8 +195,8 @@ async def test_details_error_all_pools(
device_registry, config_entry.entry_id
)
assert not device_entries
assert config_entry.state is ConfigEntryState.SETUP_RETRY
assert len(device_entries) == devices
assert config_entry.state is config_entry_state
async def test_details_error_one_pool(
@ -131,12 +226,15 @@ async def test_details_error_one_pool(
async def test_measures_error_one_pool(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_ondilo_client: MagicMock,
device_registry: dr.DeviceRegistry,
config_entry: MockConfigEntry,
last_measures: list[dict[str, Any]],
) -> None:
"""Test measures error for one pool and success for the other."""
entity_id_1 = "sensor.pool_1_temperature"
entity_id_2 = "sensor.pool_2_temperature"
mock_ondilo_client.get_last_pool_measures.side_effect = [
OndiloError(
404,
@ -151,4 +249,170 @@ async def test_measures_error_one_pool(
device_registry, config_entry.entry_id
)
assert len(device_entries) == 1
assert len(device_entries) == 2
# One pool returned an error, the other is ok.
# 7 entities are created for the second pool.
assert len(hass.states.async_all()) == 7
assert hass.states.get(entity_id_1) is None
assert hass.states.get(entity_id_2) is not None
# All pools now return measures.
mock_ondilo_client.get_last_pool_measures.side_effect = None
# Move time to next pools coordinator refresh.
freezer.tick(timedelta(minutes=20))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
device_entries = dr.async_entries_for_config_entry(
device_registry, config_entry.entry_id
)
assert len(device_entries) == 2
# 14 entities in total, 7 entities per pool.
assert len(hass.states.async_all()) == 14
assert hass.states.get(entity_id_1) is not None
assert hass.states.get(entity_id_2) is not None
async def test_measures_scheduling(
hass: HomeAssistant,
freezer: FrozenDateTimeFactory,
mock_ondilo_client: MagicMock,
device_registry: dr.DeviceRegistry,
config_entry: MockConfigEntry,
) -> None:
"""Test refresh scheduling of measures coordinator."""
# Move time to 10 min after pool 1 was updated and 5 min after pool 2 was updated.
freezer.move_to("2024-01-01T01:10:00+00:00")
entity_id_1 = "sensor.pool_1_temperature"
entity_id_2 = "sensor.pool_2_temperature"
await setup_integration(hass, config_entry, mock_ondilo_client)
device_entries = dr.async_entries_for_config_entry(
device_registry, config_entry.entry_id
)
# Two pools are created with 7 entities each.
assert len(device_entries) == 2
assert len(hass.states.async_all()) == 14
state = hass.states.get(entity_id_1)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T01:10:00+00:00")
state = hass.states.get(entity_id_2)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T01:10:00+00:00")
# Tick time by 20 min.
# The measures coordinators for both pools should not have been refreshed again.
freezer.tick(timedelta(minutes=20))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
state = hass.states.get(entity_id_1)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T01:10:00+00:00")
state = hass.states.get(entity_id_2)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T01:10:00+00:00")
# Move time to 65 min after pool 1 was last updated.
# This is 5 min after we expect pool 1 to be updated again.
# The measures coordinator for pool 1 should refresh at this time.
# The measures coordinator for pool 2 should not have been refreshed again.
# The pools coordinator has updated the last update time
# of the pools to a stale time that is already passed.
freezer.move_to("2024-01-01T02:05:00+00:00")
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
state = hass.states.get(entity_id_1)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T02:05:00+00:00")
state = hass.states.get(entity_id_2)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T01:10:00+00:00")
# Tick time by 5 min.
# The measures coordinator for pool 1 should not have been refreshed again.
# The measures coordinator for pool 2 should refresh at this time.
# The pools coordinator has updated the last update time
# of the pools to a stale time that is already passed.
freezer.tick(timedelta(minutes=5))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
state = hass.states.get(entity_id_1)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T02:05:00+00:00")
state = hass.states.get(entity_id_2)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T02:10:00+00:00")
# Tick time by 55 min.
# The measures coordinator for pool 1 should refresh at this time.
# This is 1 hour after the last refresh of the measures coordinator for pool 1.
freezer.tick(timedelta(minutes=55))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
state = hass.states.get(entity_id_1)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T03:05:00+00:00")
state = hass.states.get(entity_id_2)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T02:10:00+00:00")
# Tick time by 5 min.
# The measures coordinator for pool 2 should refresh at this time.
# This is 1 hour after the last refresh of the measures coordinator for pool 2.
freezer.tick(timedelta(minutes=5))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
state = hass.states.get(entity_id_1)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T03:05:00+00:00")
state = hass.states.get(entity_id_2)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T03:10:00+00:00")
# Set an error on the pools coordinator endpoint.
# This will cause the pools coordinator to not update the next refresh.
# This should cause the measures coordinators to keep the 1 hour cadence.
mock_ondilo_client.get_pools.side_effect = OndiloError(
502,
(
"<html> <head><title>502 Bad Gateway</title></head> "
"<body> <center><h1>502 Bad Gateway</h1></center> </body> </html>"
),
)
# Tick time by 55 min.
# The measures coordinator for pool 1 should refresh at this time.
# This is 1 hour after the last refresh of the measures coordinator for pool 1.
freezer.tick(timedelta(minutes=55))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
state = hass.states.get(entity_id_1)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T04:05:00+00:00")
state = hass.states.get(entity_id_2)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T03:10:00+00:00")
# Tick time by 5 min.
# The measures coordinator for pool 2 should refresh at this time.
# This is 1 hour after the last refresh of the measures coordinator for pool 2.
freezer.tick(timedelta(minutes=5))
async_fire_time_changed(hass)
await hass.async_block_till_done(wait_background_tasks=True)
state = hass.states.get(entity_id_1)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T04:05:00+00:00")
state = hass.states.get(entity_id_2)
assert state is not None
assert state.last_reported == datetime.fromisoformat("2024-01-01T04:10:00+00:00")