2019-02-03 12:03:31 +00:00
|
|
|
"""Test configuration for the ZHA component."""
|
2023-04-19 19:04:53 +00:00
|
|
|
from collections.abc import Callable
|
2021-09-17 19:17:34 +00:00
|
|
|
import itertools
|
2021-09-06 23:00:06 +00:00
|
|
|
import time
|
2023-04-21 06:24:39 +00:00
|
|
|
from unittest.mock import AsyncMock, MagicMock, patch
|
2021-01-01 21:31:56 +00:00
|
|
|
|
2019-02-03 12:03:31 +00:00
|
|
|
import pytest
|
2019-12-09 19:50:04 +00:00
|
|
|
import zigpy
|
|
|
|
from zigpy.application import ControllerApplication
|
2022-07-28 15:24:31 +00:00
|
|
|
import zigpy.backups
|
2020-05-06 10:23:53 +00:00
|
|
|
import zigpy.config
|
2021-09-06 23:00:06 +00:00
|
|
|
from zigpy.const import SIG_EP_INPUT, SIG_EP_OUTPUT, SIG_EP_PROFILE, SIG_EP_TYPE
|
|
|
|
import zigpy.device
|
2020-02-12 21:12:14 +00:00
|
|
|
import zigpy.group
|
2021-09-06 23:00:06 +00:00
|
|
|
import zigpy.profiles
|
2023-04-21 06:24:39 +00:00
|
|
|
import zigpy.quirks
|
2020-02-12 21:12:14 +00:00
|
|
|
import zigpy.types
|
2021-09-06 23:00:06 +00:00
|
|
|
import zigpy.zdo.types as zdo_t
|
2019-10-21 23:30:56 +00:00
|
|
|
|
2020-02-12 21:12:14 +00:00
|
|
|
import homeassistant.components.zha.core.const as zha_const
|
2020-02-21 23:06:57 +00:00
|
|
|
import homeassistant.components.zha.core.device as zha_core_device
|
2020-02-12 21:12:14 +00:00
|
|
|
from homeassistant.setup import async_setup_component
|
2019-10-21 23:30:56 +00:00
|
|
|
|
2022-09-19 07:51:31 +00:00
|
|
|
from . import common
|
|
|
|
|
2020-02-10 02:45:35 +00:00
|
|
|
from tests.common import MockConfigEntry
|
2021-03-02 08:02:04 +00:00
|
|
|
from tests.components.light.conftest import mock_light_profiles # noqa: F401
|
2020-02-10 02:45:35 +00:00
|
|
|
|
2019-12-09 19:50:04 +00:00
|
|
|
FIXTURE_GRP_ID = 0x1001
|
|
|
|
FIXTURE_GRP_NAME = "fixture group"
|
|
|
|
|
2019-02-03 12:03:31 +00:00
|
|
|
|
2022-03-31 15:26:27 +00:00
|
|
|
@pytest.fixture(scope="session", autouse=True)
|
|
|
|
def globally_load_quirks():
|
|
|
|
"""Load quirks automatically so that ZHA tests run deterministically in isolation.
|
|
|
|
|
|
|
|
If portions of the ZHA test suite that do not happen to load quirks are run
|
|
|
|
independently, bugs can emerge that will show up only when more of the test suite is
|
|
|
|
run.
|
|
|
|
"""
|
|
|
|
|
|
|
|
import zhaquirks
|
|
|
|
|
|
|
|
zhaquirks.setup()
|
|
|
|
|
|
|
|
|
2023-04-21 06:24:39 +00:00
|
|
|
class _FakeApp(ControllerApplication):
|
|
|
|
async def add_endpoint(self, descriptor: zdo_t.SimpleDescriptor):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def connect(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def disconnect(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def force_remove(self, dev: zigpy.device.Device):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def load_network_info(self, *, load_devices: bool = False):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def permit_ncp(self, time_s: int = 60):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def permit_with_key(
|
|
|
|
self, node: zigpy.types.EUI64, code: bytes, time_s: int = 60
|
|
|
|
):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def reset_network_info(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def send_packet(self, packet: zigpy.types.ZigbeePacket):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def start_network(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def write_network_info(self):
|
|
|
|
pass
|
|
|
|
|
|
|
|
async def request(
|
|
|
|
self,
|
|
|
|
device: zigpy.device.Device,
|
|
|
|
profile: zigpy.types.uint16_t,
|
|
|
|
cluster: zigpy.types.uint16_t,
|
|
|
|
src_ep: zigpy.types.uint8_t,
|
|
|
|
dst_ep: zigpy.types.uint8_t,
|
|
|
|
sequence: zigpy.types.uint8_t,
|
|
|
|
data: bytes,
|
|
|
|
*,
|
|
|
|
expect_reply: bool = True,
|
|
|
|
use_ieee: bool = False,
|
|
|
|
extended_timeout: bool = False,
|
|
|
|
):
|
|
|
|
pass
|
|
|
|
|
|
|
|
|
2020-02-12 21:12:14 +00:00
|
|
|
@pytest.fixture
|
|
|
|
def zigpy_app_controller():
|
|
|
|
"""Zigpy ApplicationController fixture."""
|
2023-04-21 06:24:39 +00:00
|
|
|
app = _FakeApp(
|
|
|
|
{
|
|
|
|
zigpy.config.CONF_DATABASE: None,
|
|
|
|
zigpy.config.CONF_DEVICE: {zigpy.config.CONF_DEVICE_PATH: "/dev/null"},
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
app.groups.add_group(FIXTURE_GRP_ID, FIXTURE_GRP_NAME, suppress_event=True)
|
|
|
|
|
|
|
|
app.state.node_info.nwk = 0x0000
|
|
|
|
app.state.node_info.ieee = zigpy.types.EUI64.convert("00:15:8d:00:02:32:4f:32")
|
|
|
|
app.state.network_info.pan_id = 0x1234
|
|
|
|
app.state.network_info.extended_pan_id = app.state.node_info.ieee
|
|
|
|
app.state.network_info.channel = 15
|
|
|
|
app.state.network_info.network_key.key = zigpy.types.KeyData(range(16))
|
|
|
|
|
|
|
|
with patch("zigpy.device.Device.request"):
|
|
|
|
yield app
|
2020-02-12 21:12:14 +00:00
|
|
|
|
|
|
|
|
2019-07-31 19:25:30 +00:00
|
|
|
@pytest.fixture(name="config_entry")
|
2020-02-10 02:45:35 +00:00
|
|
|
async def config_entry_fixture(hass):
|
2019-02-03 12:03:31 +00:00
|
|
|
"""Fixture representing a config entry."""
|
2020-02-12 21:12:14 +00:00
|
|
|
entry = MockConfigEntry(
|
2020-05-06 10:23:53 +00:00
|
|
|
version=2,
|
2020-02-12 21:12:14 +00:00
|
|
|
domain=zha_const.DOMAIN,
|
|
|
|
data={
|
2020-05-06 10:23:53 +00:00
|
|
|
zigpy.config.CONF_DEVICE: {zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB0"},
|
2020-05-09 13:44:19 +00:00
|
|
|
zha_const.CONF_RADIO_TYPE: "ezsp",
|
2020-02-12 21:12:14 +00:00
|
|
|
},
|
2021-04-27 14:58:59 +00:00
|
|
|
options={
|
|
|
|
zha_const.CUSTOM_CONFIGURATION: {
|
2022-07-21 23:46:16 +00:00
|
|
|
zha_const.ZHA_OPTIONS: {
|
|
|
|
zha_const.CONF_ENABLE_ENHANCED_LIGHT_TRANSITION: True,
|
Implement "group members assume state" option for ZHA (#84938)
* Initial "group members assume state" implementation for ZHA
* Remove left-over debug flag (where polling was disabled)
* Implement _send_member_assume_state_event() method and also use after turn_off
* Only assume updated arguments from service call to group
* Make code more readable and change checks slightly
* Move "send member assume state" events to LightGroup on/off calls
* Include new config option in tests
* Check that member is available before updating to assumed state
* Lower "update group from child delay" for debouncer to basically 0 when using assumed member state
* Allow "child to group" updates regardless of config option
This is not needed, as group members will not update their state, as long as they're transitioning. (If a group transitions, it also sets its members to transitioning mode)
This fixes multiple issues. Previously, the state of a group was completely wrong when:
- turn on group with 10 second transition
- turn on members individually
- turn off members individually
- group state would not update correctly
* Move "default update group from child delay" constant
* Change to new constant name in test
* Also update fan test to new constant name
* Decrease "update group from child delay" to 10ms
In my testing, 0.0 also works without any issues and correctly de-bounces child updates when using the "assume state option".
This is just for avoiding multiple state changes when changing the group -> children issue individual updates.
With 2 children in a group and delay 0, both child updates only cause one group re-calculation and state change.
0.01 (10ms) should be plenty for very slow systems to de-bounce the update (and in the worst case, it'll cause just another state change but nothing breaks)
* Also implement "assuming state" for effect
Not sure if anybody even uses this, but this one is a bit special because the effect is always deactivated if it's not provided in the light.turn_on call.
* Move shortened delay for "assuming members" to a constant
* Add basic test to verify that group members assume on/off state
* Move _assume_group_state function declaration out of async_added_to_hass
* Fix rare edge-case when rapidly toggling lights and light groups at the same time
This prevents an issue where either the group transition would unset the transition flag or the single light would unset the group transition status midst-transition.
Note: When a new individual transition is started, we want to unset the group flag, as we actually cancel that transition.
* Check that effect list exists, add return type
* Re-trigger CI due to timeout
* Increase ASSUME_UPDATE_GROUP_FROM_CHILD_DELAY slightly
The debouncer is used when updating group member states either by assuming them (in which case we want to barely have any delay), or between the time we get the results back from polling (where we want a slightly longer time).
As it's not easily possible to distinguish if a group member was updated via assuming the state of the group or by the polling that follows, 50 ms seems to be a good middle point.
* Add debug print for when updating group state
* Fix issues with "off brightness" when switching between group/members
This fixes a bunch of issues with "off brightness" and passes it down to the members correctly.
For example, if a light group is turned off with a transition (so bulbs get their level set to 1), this will also set the "off brightness" of all individual bulbs to the last level that they were at.
(It really fixes a lot of issues when using the "member assume group state" option. It's not really possible to fix them without that.)
Furthermore, issues where polling was previously needed to get the correct state after "playing with transitions", should now get be resolved and get correct state when using the "members assume group state" option.
Note: The only case which still can't be fixed is the following:
If individual lights have off_with_transition set, but not the group, and the group is then turned on without a level, individual lights might fall back to brightness level 1 (<- at least now shows correctly in UI even before polling).
Since all lights might need different brightness levels to be turned on, we can't use one group call. But making individual calls when turning on a ZHA group would cause a lot of traffic and thereby be counter-productive.
In this case, light.turn_on should just be called with a level (or individual calls to the lights should be made).
Another thing that was changed is to reset off_with_transition/off_brightness for a LightGroup when a member is turned on (even if the LightGroup wasn't turned on using its turn_on method).
off_with_transition/off_brightness for individual bulbs is now also turned off when a light is detected to be on during polling.
Lastly, the waiting for polled attributes could previously cause "invalid state" to be set (so mid-transition levels).
This could happen when group and members are repeatedly toggled at similar times. These "invalid states" could cause wrong "off brightness" levels if transitions are also used.
To fix this, we check after waiting for the polled attributes in async_get_state to see if a transition has started in the meanwhile. If so, the values can be discarded. A new poll will happen later and if using the "members assume group state" config option, the values should already be correct before the polling.
* Enable "group members assume state" config option by default
The config tests are also updated to expect the config option be enabled by default.
For all tests, the config option is generally disabled though:
There are only two group related tests. The one that tests this new feature overrides the config option to be enabled anyway.
The other tests works in a similar way but also "sends" attribute reports, so we want to disable the feature for that test.
(It would also run with it enabled (if the correct CHILD_UPDATE value is patched), but then it would test the same stuff as the other test, hence we're disabling the config option for that test.)
2023-01-16 15:48:18 +00:00
|
|
|
zha_const.CONF_GROUP_MEMBERS_ASSUME_STATE: False,
|
2022-07-21 23:46:16 +00:00
|
|
|
},
|
2021-04-27 14:58:59 +00:00
|
|
|
zha_const.ZHA_ALARM_OPTIONS: {
|
|
|
|
zha_const.CONF_ALARM_ARM_REQUIRES_CODE: False,
|
|
|
|
zha_const.CONF_ALARM_MASTER_CODE: "4321",
|
|
|
|
zha_const.CONF_ALARM_FAILED_TRIES: 2,
|
2022-07-21 23:46:16 +00:00
|
|
|
},
|
2021-04-27 14:58:59 +00:00
|
|
|
}
|
|
|
|
},
|
2020-02-12 21:12:14 +00:00
|
|
|
)
|
|
|
|
entry.add_to_hass(hass)
|
|
|
|
return entry
|
2019-02-03 12:03:31 +00:00
|
|
|
|
|
|
|
|
2020-02-10 02:45:35 +00:00
|
|
|
@pytest.fixture
|
2020-05-09 13:44:19 +00:00
|
|
|
def setup_zha(hass, config_entry, zigpy_app_controller):
|
2020-02-12 21:12:14 +00:00
|
|
|
"""Set up ZHA component."""
|
2020-02-21 23:06:57 +00:00
|
|
|
zha_config = {zha_const.CONF_ENABLE_QUIRKS: False}
|
2020-02-12 21:12:14 +00:00
|
|
|
|
2020-05-09 13:44:19 +00:00
|
|
|
p1 = patch(
|
|
|
|
"bellows.zigbee.application.ControllerApplication.new",
|
|
|
|
return_value=zigpy_app_controller,
|
|
|
|
)
|
2020-02-12 21:12:14 +00:00
|
|
|
|
2020-02-21 23:06:57 +00:00
|
|
|
async def _setup(config=None):
|
|
|
|
config = config or {}
|
2020-05-09 13:44:19 +00:00
|
|
|
with p1:
|
2020-02-21 23:06:57 +00:00
|
|
|
status = await async_setup_component(
|
|
|
|
hass, zha_const.DOMAIN, {zha_const.DOMAIN: {**zha_config, **config}}
|
|
|
|
)
|
2020-02-12 21:12:14 +00:00
|
|
|
assert status is True
|
|
|
|
await hass.async_block_till_done()
|
2020-02-10 02:45:35 +00:00
|
|
|
|
2020-02-12 21:12:14 +00:00
|
|
|
return _setup
|
2019-02-03 12:03:31 +00:00
|
|
|
|
|
|
|
|
2020-01-28 00:43:26 +00:00
|
|
|
@pytest.fixture
|
2023-04-19 14:47:07 +00:00
|
|
|
def cluster_handler():
|
|
|
|
"""ClusterHandler mock factory fixture."""
|
2020-01-28 00:43:26 +00:00
|
|
|
|
2023-04-19 14:47:07 +00:00
|
|
|
def cluster_handler(name: str, cluster_id: int, endpoint_id: int = 1):
|
2020-05-09 13:44:19 +00:00
|
|
|
ch = MagicMock()
|
2020-01-28 00:43:26 +00:00
|
|
|
ch.name = name
|
2023-04-19 14:47:07 +00:00
|
|
|
ch.generic_id = f"cluster_handler_0x{cluster_id:04x}"
|
2020-01-28 00:43:26 +00:00
|
|
|
ch.id = f"{endpoint_id}:0x{cluster_id:04x}"
|
2020-05-09 13:44:19 +00:00
|
|
|
ch.async_configure = AsyncMock()
|
|
|
|
ch.async_initialize = AsyncMock()
|
2020-01-28 00:43:26 +00:00
|
|
|
return ch
|
|
|
|
|
2023-04-19 14:47:07 +00:00
|
|
|
return cluster_handler
|
2020-01-28 00:43:26 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2020-02-12 21:12:14 +00:00
|
|
|
def zigpy_device_mock(zigpy_app_controller):
|
2020-01-28 00:43:26 +00:00
|
|
|
"""Make a fake device using the specified cluster classes."""
|
|
|
|
|
|
|
|
def _mock_dev(
|
|
|
|
endpoints,
|
|
|
|
ieee="00:0d:6f:00:0a:90:69:e7",
|
|
|
|
manufacturer="FakeManufacturer",
|
|
|
|
model="FakeModel",
|
2020-02-12 21:12:14 +00:00
|
|
|
node_descriptor=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
|
2020-03-25 11:23:54 +00:00
|
|
|
nwk=0xB79C,
|
2020-07-03 17:57:04 +00:00
|
|
|
patch_cluster=True,
|
2021-09-17 19:17:34 +00:00
|
|
|
quirk=None,
|
2020-01-28 00:43:26 +00:00
|
|
|
):
|
|
|
|
"""Make a fake device using the specified cluster classes."""
|
2021-09-06 23:00:06 +00:00
|
|
|
device = zigpy.device.Device(
|
|
|
|
zigpy_app_controller, zigpy.types.EUI64.convert(ieee), nwk
|
2020-02-12 21:12:14 +00:00
|
|
|
)
|
2021-09-06 23:00:06 +00:00
|
|
|
device.manufacturer = manufacturer
|
|
|
|
device.model = model
|
|
|
|
device.node_desc = zdo_t.NodeDescriptor.deserialize(node_descriptor)[0]
|
|
|
|
device.last_seen = time.time()
|
|
|
|
|
2020-01-28 00:43:26 +00:00
|
|
|
for epid, ep in endpoints.items():
|
2021-09-06 23:00:06 +00:00
|
|
|
endpoint = device.add_endpoint(epid)
|
|
|
|
endpoint.device_type = ep[SIG_EP_TYPE]
|
2023-04-19 14:47:07 +00:00
|
|
|
endpoint.profile_id = ep.get(SIG_EP_PROFILE, 0x0104)
|
2023-04-21 06:24:39 +00:00
|
|
|
endpoint.request = AsyncMock()
|
2020-01-28 00:43:26 +00:00
|
|
|
|
2021-09-06 23:00:06 +00:00
|
|
|
for cluster_id in ep.get(SIG_EP_INPUT, []):
|
2021-09-17 19:17:34 +00:00
|
|
|
endpoint.add_input_cluster(cluster_id)
|
2020-01-28 00:43:26 +00:00
|
|
|
|
2021-09-06 23:00:06 +00:00
|
|
|
for cluster_id in ep.get(SIG_EP_OUTPUT, []):
|
2021-09-17 19:17:34 +00:00
|
|
|
endpoint.add_output_cluster(cluster_id)
|
|
|
|
|
2023-04-19 14:47:07 +00:00
|
|
|
device.status = zigpy.device.Status.ENDPOINTS_INIT
|
|
|
|
|
2021-09-17 19:17:34 +00:00
|
|
|
if quirk:
|
|
|
|
device = quirk(zigpy_app_controller, device.ieee, device.nwk, device)
|
2023-04-21 06:24:39 +00:00
|
|
|
else:
|
|
|
|
# Allow zigpy to apply quirks if we don't pass one explicitly
|
|
|
|
device = zigpy.quirks.get_device(device)
|
2021-09-17 19:17:34 +00:00
|
|
|
|
|
|
|
if patch_cluster:
|
|
|
|
for endpoint in (ep for epid, ep in device.endpoints.items() if epid):
|
|
|
|
endpoint.request = AsyncMock(return_value=[0])
|
|
|
|
for cluster in itertools.chain(
|
|
|
|
endpoint.in_clusters.values(), endpoint.out_clusters.values()
|
|
|
|
):
|
2021-09-06 23:00:06 +00:00
|
|
|
common.patch_cluster(cluster)
|
2020-01-28 00:43:26 +00:00
|
|
|
|
|
|
|
return device
|
|
|
|
|
|
|
|
return _mock_dev
|
2020-02-10 02:45:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2020-02-12 21:12:14 +00:00
|
|
|
def zha_device_joined(hass, setup_zha):
|
|
|
|
"""Return a newly joined ZHA device."""
|
2020-02-10 02:45:35 +00:00
|
|
|
|
2020-02-12 21:12:14 +00:00
|
|
|
async def _zha_device(zigpy_dev):
|
2022-04-27 15:24:26 +00:00
|
|
|
zigpy_dev.last_seen = time.time()
|
2020-02-12 21:12:14 +00:00
|
|
|
await setup_zha()
|
2021-09-06 23:00:06 +00:00
|
|
|
zha_gateway = common.get_zha_gateway(hass)
|
2020-02-12 21:12:14 +00:00
|
|
|
await zha_gateway.async_device_initialized(zigpy_dev)
|
2020-02-10 02:45:35 +00:00
|
|
|
await hass.async_block_till_done()
|
|
|
|
return zha_gateway.get_device(zigpy_dev.ieee)
|
|
|
|
|
|
|
|
return _zha_device
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2022-07-10 17:46:22 +00:00
|
|
|
def zha_device_restored(hass, zigpy_app_controller, setup_zha):
|
2020-02-10 02:45:35 +00:00
|
|
|
"""Return a restored ZHA device."""
|
|
|
|
|
2020-06-11 21:21:08 +00:00
|
|
|
async def _zha_device(zigpy_dev, last_seen=None):
|
2020-02-12 21:12:14 +00:00
|
|
|
zigpy_app_controller.devices[zigpy_dev.ieee] = zigpy_dev
|
2020-06-11 21:21:08 +00:00
|
|
|
|
|
|
|
if last_seen is not None:
|
2022-07-10 17:46:22 +00:00
|
|
|
zigpy_dev.last_seen = last_seen
|
2020-06-11 21:21:08 +00:00
|
|
|
|
2020-02-12 21:12:14 +00:00
|
|
|
await setup_zha()
|
|
|
|
zha_gateway = hass.data[zha_const.DATA_ZHA][zha_const.DATA_ZHA_GATEWAY]
|
|
|
|
return zha_gateway.get_device(zigpy_dev.ieee)
|
|
|
|
|
|
|
|
return _zha_device
|
2020-02-10 02:45:35 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(params=["zha_device_joined", "zha_device_restored"])
|
|
|
|
def zha_device_joined_restored(request):
|
|
|
|
"""Join or restore ZHA device."""
|
2020-03-05 00:27:37 +00:00
|
|
|
named_method = request.getfixturevalue(request.param)
|
|
|
|
named_method.name = request.param
|
|
|
|
return named_method
|
2020-02-21 23:06:57 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2023-04-19 19:04:53 +00:00
|
|
|
def zha_device_mock(
|
|
|
|
hass, zigpy_device_mock
|
|
|
|
) -> Callable[..., zha_core_device.ZHADevice]:
|
2023-01-02 05:20:59 +00:00
|
|
|
"""Return a ZHA Device factory."""
|
2020-02-21 23:06:57 +00:00
|
|
|
|
|
|
|
def _zha_device(
|
|
|
|
endpoints=None,
|
|
|
|
ieee="00:11:22:33:44:55:66:77",
|
|
|
|
manufacturer="mock manufacturer",
|
|
|
|
model="mock model",
|
|
|
|
node_desc=b"\x02@\x807\x10\x7fd\x00\x00*d\x00\x00",
|
2020-07-03 17:57:04 +00:00
|
|
|
patch_cluster=True,
|
2023-04-19 19:04:53 +00:00
|
|
|
) -> zha_core_device.ZHADevice:
|
2020-02-21 23:06:57 +00:00
|
|
|
if endpoints is None:
|
|
|
|
endpoints = {
|
|
|
|
1: {
|
|
|
|
"in_clusters": [0, 1, 8, 768],
|
|
|
|
"out_clusters": [0x19],
|
|
|
|
"device_type": 0x0105,
|
|
|
|
},
|
|
|
|
2: {
|
|
|
|
"in_clusters": [0],
|
|
|
|
"out_clusters": [6, 8, 0x19, 768],
|
|
|
|
"device_type": 0x0810,
|
|
|
|
},
|
|
|
|
}
|
|
|
|
zigpy_device = zigpy_device_mock(
|
2020-07-03 17:57:04 +00:00
|
|
|
endpoints, ieee, manufacturer, model, node_desc, patch_cluster=patch_cluster
|
2020-02-21 23:06:57 +00:00
|
|
|
)
|
2020-05-09 13:44:19 +00:00
|
|
|
zha_device = zha_core_device.ZHADevice(hass, zigpy_device, MagicMock())
|
2020-02-21 23:06:57 +00:00
|
|
|
return zha_device
|
|
|
|
|
|
|
|
return _zha_device
|
2020-07-03 17:57:04 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def hass_disable_services(hass):
|
|
|
|
"""Mock service register."""
|
|
|
|
with patch.object(hass.services, "async_register"), patch.object(
|
|
|
|
hass.services, "has_service", return_value=True
|
|
|
|
):
|
|
|
|
yield hass
|