Make dispatcher setup lazy (#74374)

pull/74394/head
J. Nick Koston 2022-07-04 07:58:35 -05:00 committed by GitHub
parent dd57d7d77f
commit 6c3baf03aa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
2 changed files with 29 additions and 23 deletions

View File

@ -41,26 +41,13 @@ def async_dispatcher_connect(
"""
if DATA_DISPATCHER not in hass.data:
hass.data[DATA_DISPATCHER] = {}
job = HassJob(
catch_log_exception(
target,
lambda *args: "Exception in {} when dispatching '{}': {}".format(
# Functions wrapped in partial do not have a __name__
getattr(target, "__name__", None) or str(target),
signal,
args,
),
)
)
hass.data[DATA_DISPATCHER].setdefault(signal, []).append(job)
hass.data[DATA_DISPATCHER].setdefault(signal, {})[target] = None
@callback
def async_remove_dispatcher() -> None:
"""Remove signal listener."""
try:
hass.data[DATA_DISPATCHER][signal].remove(job)
del hass.data[DATA_DISPATCHER][signal][target]
except (KeyError, ValueError):
# KeyError is key target listener did not exist
# ValueError if listener did not exist within signal
@ -75,6 +62,21 @@ def dispatcher_send(hass: HomeAssistant, signal: str, *args: Any) -> None:
hass.loop.call_soon_threadsafe(async_dispatcher_send, hass, signal, *args)
def _generate_job(signal: str, target: Callable[..., Any]) -> HassJob:
"""Generate a HassJob for a signal and target."""
return HassJob(
catch_log_exception(
target,
lambda *args: "Exception in {} when dispatching '{}': {}".format(
# Functions wrapped in partial do not have a __name__
getattr(target, "__name__", None) or str(target),
signal,
args,
),
)
)
@callback
@bind_hass
def async_dispatcher_send(hass: HomeAssistant, signal: str, *args: Any) -> None:
@ -82,7 +84,9 @@ def async_dispatcher_send(hass: HomeAssistant, signal: str, *args: Any) -> None:
This method must be run in the event loop.
"""
target_list = hass.data.get(DATA_DISPATCHER, {}).get(signal, [])
for job in target_list:
target_list = hass.data.get(DATA_DISPATCHER, {}).get(signal, {})
for target, job in target_list.items():
if job is None:
job = _generate_job(signal, target)
target_list[target] = job
hass.async_add_hass_job(job, *args)

View File

@ -311,7 +311,7 @@ async def test_internal_discovery_callback_fill_out_group_fail(
await hass.async_block_till_done()
# when called with incomplete info, it should use HTTP to get missing
discover = signal.mock_calls[0][1][0]
discover = signal.mock_calls[-1][1][0]
assert discover == full_info
get_multizone_status_mock.assert_called_once()
@ -352,7 +352,7 @@ async def test_internal_discovery_callback_fill_out_group(
await hass.async_block_till_done()
# when called with incomplete info, it should use HTTP to get missing
discover = signal.mock_calls[0][1][0]
discover = signal.mock_calls[-1][1][0]
assert discover == full_info
get_multizone_status_mock.assert_called_once()
@ -423,23 +423,25 @@ async def test_internal_discovery_callback_fill_out_cast_type_manufacturer(
# when called with incomplete info, it should use HTTP to get missing
get_cast_type_mock.assert_called_once()
assert get_cast_type_mock.call_count == 1
discover = signal.mock_calls[0][1][0]
discover = signal.mock_calls[2][1][0]
assert discover == full_info
assert "Fetched cast details for unknown model 'Chromecast'" in caplog.text
signal.reset_mock()
# Call again, the model name should be fetched from cache
discover_cast(FAKE_MDNS_SERVICE, info)
await hass.async_block_till_done()
assert get_cast_type_mock.call_count == 1 # No additional calls
discover = signal.mock_calls[1][1][0]
discover = signal.mock_calls[0][1][0]
assert discover == full_info
signal.reset_mock()
# Call for another model, need to call HTTP again
get_cast_type_mock.return_value = full_info2.cast_info
discover_cast(FAKE_MDNS_SERVICE, info2)
await hass.async_block_till_done()
assert get_cast_type_mock.call_count == 2
discover = signal.mock_calls[2][1][0]
discover = signal.mock_calls[0][1][0]
assert discover == full_info2