core/tests/components/zha/test_switch.py

501 lines
16 KiB
Python
Raw Normal View History

"""Test ZHA switch."""
from unittest.mock import call, patch
import pytest
from zhaquirks.const import (
DEVICE_TYPE,
ENDPOINTS,
INPUT_CLUSTERS,
OUTPUT_CLUSTERS,
PROFILE_ID,
)
from zigpy.exceptions import ZigbeeException
import zigpy.profiles.zha as zha
from zigpy.quirks import CustomCluster, CustomDevice
import zigpy.types as t
import zigpy.zcl.clusters.general as general
from zigpy.zcl.clusters.manufacturer_specific import ManufacturerSpecificCluster
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
2020-12-07 16:43:35 +00:00
from homeassistant.components.zha.core.group import GroupMember
2021-12-11 16:06:39 +00:00
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE, Platform
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from .common import (
async_enable_traffic,
async_find_group_entity_id,
async_test_rejoin,
async_wait_for_updates,
find_entity_id,
get_zha_gateway,
send_attributes_report,
)
from .conftest import SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_TYPE
from tests.common import mock_coro
ON = 1
OFF = 0
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e8"
2022-06-17 16:41:10 +00:00
@pytest.fixture(autouse=True)
def switch_platform_only():
"""Only set up the switch and required base platforms to speed up tests."""
2022-06-17 16:41:10 +00:00
with patch(
"homeassistant.components.zha.PLATFORMS",
(
Platform.DEVICE_TRACKER,
Platform.SENSOR,
Platform.SELECT,
Platform.SWITCH,
),
):
yield
@pytest.fixture
def zigpy_device(zigpy_device_mock):
"""Device tracker zigpy device."""
endpoints = {
1: {
SIG_EP_INPUT: [general.Basic.cluster_id, general.OnOff.cluster_id],
SIG_EP_OUTPUT: [],
SIG_EP_TYPE: zha.DeviceType.ON_OFF_SWITCH,
}
}
return zigpy_device_mock(endpoints)
@pytest.fixture
async def coordinator(hass, zigpy_device_mock, zha_device_joined):
"""Test ZHA light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
SIG_EP_INPUT: [],
SIG_EP_OUTPUT: [],
SIG_EP_TYPE: zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee="00:15:8d:00:02:32:4f:32",
nwk=0x0000,
node_descriptor=b"\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_switch_1(hass, zigpy_device_mock, zha_device_joined):
"""Test ZHA switch platform."""
zigpy_device = zigpy_device_mock(
{
1: {
SIG_EP_INPUT: [general.OnOff.cluster_id, general.Groups.cluster_id],
SIG_EP_OUTPUT: [],
SIG_EP_TYPE: zha.DeviceType.ON_OFF_SWITCH,
}
},
ieee=IEEE_GROUPABLE_DEVICE,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
2020-12-07 16:43:35 +00:00
await hass.async_block_till_done()
return zha_device
@pytest.fixture
async def device_switch_2(hass, zigpy_device_mock, zha_device_joined):
"""Test ZHA switch platform."""
zigpy_device = zigpy_device_mock(
{
1: {
SIG_EP_INPUT: [general.OnOff.cluster_id, general.Groups.cluster_id],
SIG_EP_OUTPUT: [],
SIG_EP_TYPE: zha.DeviceType.ON_OFF_SWITCH,
}
},
ieee=IEEE_GROUPABLE_DEVICE2,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
2020-12-07 16:43:35 +00:00
await hass.async_block_till_done()
return zha_device
async def test_switch(
hass: HomeAssistant, zha_device_joined_restored, zigpy_device
) -> None:
"""Test ZHA switch platform."""
zha_device = await zha_device_joined_restored(zigpy_device)
cluster = zigpy_device.endpoints.get(1).on_off
2021-12-11 16:06:39 +00:00
entity_id = await find_entity_id(Platform.SWITCH, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == STATE_OFF
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the switch was created and that its state is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at switch
await send_attributes_report(hass, cluster, {1: 0, 0: 1, 2: 2})
assert hass.states.get(entity_id).state == STATE_ON
# turn off at switch
await send_attributes_report(hass, cluster, {1: 1, 0: 0, 2: 2})
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]),
2019-07-31 19:25:30 +00:00
):
# turn on via UI
2019-07-31 19:25:30 +00:00
await hass.services.async_call(
SWITCH_DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
2019-07-31 19:25:30 +00:00
)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False,
ON,
cluster.commands_by_name["on"].schema,
expect_reply=True,
manufacturer=None,
tries=1,
tsn=None,
2019-07-31 19:25:30 +00:00
)
# turn off from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x01, zcl_f.Status.SUCCESS]),
2019-07-31 19:25:30 +00:00
):
# turn off via UI
2019-07-31 19:25:30 +00:00
await hass.services.async_call(
SWITCH_DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
2019-07-31 19:25:30 +00:00
)
assert len(cluster.request.mock_calls) == 1
assert cluster.request.call_args == call(
False,
OFF,
cluster.commands_by_name["off"].schema,
expect_reply=True,
manufacturer=None,
tries=1,
tsn=None,
2019-07-31 19:25:30 +00:00
)
# test joining a new switch to the network and HA
await async_test_rejoin(hass, zigpy_device, [cluster], (1,))
class WindowDetectionFunctionQuirk(CustomDevice):
"""Quirk with window detection function attribute."""
class TuyaManufCluster(CustomCluster, ManufacturerSpecificCluster):
"""Tuya manufacturer specific cluster."""
cluster_id = 0xEF00
ep_attribute = "tuya_manufacturer"
attributes = {
0xEF01: ("window_detection_function", t.Bool),
0xEF02: ("window_detection_function_inverter", t.Bool),
}
def __init__(self, *args, **kwargs):
"""Initialize with task."""
super().__init__(*args, **kwargs)
self._attr_cache.update(
{0xEF01: False}
) # entity won't be created without this
replacement = {
ENDPOINTS: {
1: {
PROFILE_ID: zha.PROFILE_ID,
DEVICE_TYPE: zha.DeviceType.ON_OFF_SWITCH,
INPUT_CLUSTERS: [general.Basic.cluster_id, TuyaManufCluster],
OUTPUT_CLUSTERS: [],
},
}
}
@pytest.fixture
async def zigpy_device_tuya(hass, zigpy_device_mock, zha_device_joined):
"""Device tracker zigpy tuya device."""
zigpy_device = zigpy_device_mock(
{
1: {
SIG_EP_INPUT: [general.Basic.cluster_id],
SIG_EP_OUTPUT: [],
SIG_EP_TYPE: zha.DeviceType.ON_OFF_SWITCH,
}
},
manufacturer="_TZE200_b6wax7g0",
quirk=WindowDetectionFunctionQuirk,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
await hass.async_block_till_done()
return zigpy_device
@patch(
Implement "group members assume state" option for ZHA (#84938) * Initial "group members assume state" implementation for ZHA * Remove left-over debug flag (where polling was disabled) * Implement _send_member_assume_state_event() method and also use after turn_off * Only assume updated arguments from service call to group * Make code more readable and change checks slightly * Move "send member assume state" events to LightGroup on/off calls * Include new config option in tests * Check that member is available before updating to assumed state * Lower "update group from child delay" for debouncer to basically 0 when using assumed member state * Allow "child to group" updates regardless of config option This is not needed, as group members will not update their state, as long as they're transitioning. (If a group transitions, it also sets its members to transitioning mode) This fixes multiple issues. Previously, the state of a group was completely wrong when: - turn on group with 10 second transition - turn on members individually - turn off members individually - group state would not update correctly * Move "default update group from child delay" constant * Change to new constant name in test * Also update fan test to new constant name * Decrease "update group from child delay" to 10ms In my testing, 0.0 also works without any issues and correctly de-bounces child updates when using the "assume state option". This is just for avoiding multiple state changes when changing the group -> children issue individual updates. With 2 children in a group and delay 0, both child updates only cause one group re-calculation and state change. 0.01 (10ms) should be plenty for very slow systems to de-bounce the update (and in the worst case, it'll cause just another state change but nothing breaks) * Also implement "assuming state" for effect Not sure if anybody even uses this, but this one is a bit special because the effect is always deactivated if it's not provided in the light.turn_on call. * Move shortened delay for "assuming members" to a constant * Add basic test to verify that group members assume on/off state * Move _assume_group_state function declaration out of async_added_to_hass * Fix rare edge-case when rapidly toggling lights and light groups at the same time This prevents an issue where either the group transition would unset the transition flag or the single light would unset the group transition status midst-transition. Note: When a new individual transition is started, we want to unset the group flag, as we actually cancel that transition. * Check that effect list exists, add return type * Re-trigger CI due to timeout * Increase ASSUME_UPDATE_GROUP_FROM_CHILD_DELAY slightly The debouncer is used when updating group member states either by assuming them (in which case we want to barely have any delay), or between the time we get the results back from polling (where we want a slightly longer time). As it's not easily possible to distinguish if a group member was updated via assuming the state of the group or by the polling that follows, 50 ms seems to be a good middle point. * Add debug print for when updating group state * Fix issues with "off brightness" when switching between group/members This fixes a bunch of issues with "off brightness" and passes it down to the members correctly. For example, if a light group is turned off with a transition (so bulbs get their level set to 1), this will also set the "off brightness" of all individual bulbs to the last level that they were at. (It really fixes a lot of issues when using the "member assume group state" option. It's not really possible to fix them without that.) Furthermore, issues where polling was previously needed to get the correct state after "playing with transitions", should now get be resolved and get correct state when using the "members assume group state" option. Note: The only case which still can't be fixed is the following: If individual lights have off_with_transition set, but not the group, and the group is then turned on without a level, individual lights might fall back to brightness level 1 (<- at least now shows correctly in UI even before polling). Since all lights might need different brightness levels to be turned on, we can't use one group call. But making individual calls when turning on a ZHA group would cause a lot of traffic and thereby be counter-productive. In this case, light.turn_on should just be called with a level (or individual calls to the lights should be made). Another thing that was changed is to reset off_with_transition/off_brightness for a LightGroup when a member is turned on (even if the LightGroup wasn't turned on using its turn_on method). off_with_transition/off_brightness for individual bulbs is now also turned off when a light is detected to be on during polling. Lastly, the waiting for polled attributes could previously cause "invalid state" to be set (so mid-transition levels). This could happen when group and members are repeatedly toggled at similar times. These "invalid states" could cause wrong "off brightness" levels if transitions are also used. To fix this, we check after waiting for the polled attributes in async_get_state to see if a transition has started in the meanwhile. If so, the values can be discarded. A new poll will happen later and if using the "members assume group state" config option, the values should already be correct before the polling. * Enable "group members assume state" config option by default The config tests are also updated to expect the config option be enabled by default. For all tests, the config option is generally disabled though: There are only two group related tests. The one that tests this new feature overrides the config option to be enabled anyway. The other tests works in a similar way but also "sends" attribute reports, so we want to disable the feature for that test. (It would also run with it enabled (if the correct CHILD_UPDATE value is patched), but then it would test the same stuff as the other test, hence we're disabling the config option for that test.)
2023-01-16 15:48:18 +00:00
"homeassistant.components.zha.entity.DEFAULT_UPDATE_GROUP_FROM_CHILD_DELAY",
new=0,
)
2020-12-07 16:43:35 +00:00
async def test_zha_group_switch_entity(
hass: HomeAssistant, device_switch_1, device_switch_2, coordinator
) -> None:
"""Test the switch entity for a ZHA group."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
zha_gateway.coordinator_zha_device = coordinator
coordinator._zha_gateway = zha_gateway
device_switch_1._zha_gateway = zha_gateway
device_switch_2._zha_gateway = zha_gateway
member_ieee_addresses = [device_switch_1.ieee, device_switch_2.ieee]
2020-12-07 16:43:35 +00:00
members = [
GroupMember(device_switch_1.ieee, 1),
GroupMember(device_switch_2.ieee, 1),
]
# test creating a group with 2 members
2020-12-07 16:43:35 +00:00
zha_group = await zha_gateway.async_create_zigpy_group("Test Group", members)
await hass.async_block_till_done()
assert zha_group is not None
assert len(zha_group.members) == 2
for member in zha_group.members:
2020-12-07 16:43:35 +00:00
assert member.device.ieee in member_ieee_addresses
assert member.group == zha_group
assert member.endpoint is not None
2021-12-11 16:06:39 +00:00
entity_id = async_find_group_entity_id(hass, Platform.SWITCH, zha_group)
assert hass.states.get(entity_id) is not None
group_cluster_on_off = zha_group.endpoint[general.OnOff.cluster_id]
2020-12-07 16:43:35 +00:00
dev1_cluster_on_off = device_switch_1.device.endpoints[1].on_off
dev2_cluster_on_off = device_switch_2.device.endpoints[1].on_off
await async_enable_traffic(hass, [device_switch_1, device_switch_2], enabled=False)
await async_wait_for_updates(hass)
# test that the switches were created and that they are off
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
2020-12-07 16:43:35 +00:00
await async_enable_traffic(hass, [device_switch_1, device_switch_2])
await async_wait_for_updates(hass)
# test that the switches were created and are off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]),
):
# turn on via UI
await hass.services.async_call(
SWITCH_DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(group_cluster_on_off.request.mock_calls) == 1
assert group_cluster_on_off.request.call_args == call(
False,
ON,
group_cluster_on_off.commands_by_name["on"].schema,
expect_reply=True,
manufacturer=None,
tries=1,
tsn=None,
)
assert hass.states.get(entity_id).state == STATE_ON
# turn off from HA
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x01, zcl_f.Status.SUCCESS]),
):
# turn off via UI
await hass.services.async_call(
SWITCH_DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(group_cluster_on_off.request.mock_calls) == 1
assert group_cluster_on_off.request.call_args == call(
False,
OFF,
group_cluster_on_off.commands_by_name["off"].schema,
expect_reply=True,
manufacturer=None,
tries=1,
tsn=None,
)
assert hass.states.get(entity_id).state == STATE_OFF
# test some of the group logic to make sure we key off states correctly
2020-12-07 16:43:35 +00:00
await send_attributes_report(hass, dev1_cluster_on_off, {0: 1})
await send_attributes_report(hass, dev2_cluster_on_off, {0: 1})
await async_wait_for_updates(hass)
# test that group switch is on
assert hass.states.get(entity_id).state == STATE_ON
2020-12-07 16:43:35 +00:00
await send_attributes_report(hass, dev1_cluster_on_off, {0: 0})
await async_wait_for_updates(hass)
# test that group switch is still on
assert hass.states.get(entity_id).state == STATE_ON
2020-12-07 16:43:35 +00:00
await send_attributes_report(hass, dev2_cluster_on_off, {0: 0})
await async_wait_for_updates(hass)
# test that group switch is now off
assert hass.states.get(entity_id).state == STATE_OFF
2020-12-07 16:43:35 +00:00
await send_attributes_report(hass, dev1_cluster_on_off, {0: 1})
await async_wait_for_updates(hass)
# test that group switch is now back on
assert hass.states.get(entity_id).state == STATE_ON
async def test_switch_configurable(
hass: HomeAssistant, zha_device_joined_restored, zigpy_device_tuya
) -> None:
"""Test ZHA configurable switch platform."""
zha_device = await zha_device_joined_restored(zigpy_device_tuya)
cluster = zigpy_device_tuya.endpoints.get(1).tuya_manufacturer
entity_id = await find_entity_id(Platform.SWITCH, zha_device, hass)
assert entity_id is not None
assert hass.states.get(entity_id).state == STATE_OFF
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the switch was created and that its state is unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the state has changed from unavailable to off
assert hass.states.get(entity_id).state == STATE_OFF
# turn on at switch
await send_attributes_report(hass, cluster, {"window_detection_function": True})
assert hass.states.get(entity_id).state == STATE_ON
# turn off at switch
await send_attributes_report(hass, cluster, {"window_detection_function": False})
assert hass.states.get(entity_id).state == STATE_OFF
# turn on from HA
with patch(
"zigpy.zcl.Cluster.write_attributes",
return_value=mock_coro([zcl_f.Status.SUCCESS, zcl_f.Status.SUCCESS]),
):
# turn on via UI
await hass.services.async_call(
SWITCH_DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call(
{"window_detection_function": True}
)
# turn off from HA
with patch(
"zigpy.zcl.Cluster.write_attributes",
return_value=mock_coro([zcl_f.Status.SUCCESS, zcl_f.Status.SUCCESS]),
):
# turn off via UI
await hass.services.async_call(
SWITCH_DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.write_attributes.mock_calls) == 2
assert cluster.write_attributes.call_args == call(
{"window_detection_function": False}
)
cluster.read_attributes.reset_mock()
await async_setup_component(hass, "homeassistant", {})
await hass.async_block_till_done()
await hass.services.async_call(
"homeassistant", "update_entity", {"entity_id": entity_id}, blocking=True
)
# the mocking doesn't update the attr cache so this flips back to initial value
assert cluster.read_attributes.call_count == 2
assert [
call(
[
"window_detection_function",
],
allow_cache=False,
only_cache=False,
manufacturer=None,
),
call(
[
"window_detection_function_inverter",
],
allow_cache=False,
only_cache=False,
manufacturer=None,
),
] == cluster.read_attributes.call_args_list
cluster.write_attributes.reset_mock()
cluster.write_attributes.side_effect = ZigbeeException
await hass.services.async_call(
SWITCH_DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call(
{"window_detection_function": False}
)
# test inverter
cluster.write_attributes.reset_mock()
cluster._attr_cache.update({0xEF02: True})
await hass.services.async_call(
SWITCH_DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.write_attributes.mock_calls) == 1
assert cluster.write_attributes.call_args == call(
{"window_detection_function": True}
)
await hass.services.async_call(
SWITCH_DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert len(cluster.write_attributes.mock_calls) == 2
assert cluster.write_attributes.call_args == call(
{"window_detection_function": False}
)
# test joining a new switch to the network and HA
await async_test_rejoin(hass, zigpy_device_tuya, [cluster], (0,))