Include channel in response to WS thread/list_datasets (#90493)

pull/90517/head
Erik Montnemery 2023-03-30 15:16:27 +02:00 committed by GitHub
parent 642984a042
commit 976efb437b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 43 additions and 0 deletions

View File

@ -1,6 +1,7 @@
"""Persistently store thread datasets.""" """Persistently store thread datasets."""
from __future__ import annotations from __future__ import annotations
from contextlib import suppress
import dataclasses import dataclasses
from datetime import datetime from datetime import datetime
from functools import cached_property from functools import cached_property
@ -35,6 +36,15 @@ class DatasetEntry:
created: datetime = dataclasses.field(default_factory=dt_util.utcnow) created: datetime = dataclasses.field(default_factory=dt_util.utcnow)
id: str = dataclasses.field(default_factory=ulid_util.ulid) id: str = dataclasses.field(default_factory=ulid_util.ulid)
@property
def channel(self) -> int | None:
"""Return channel as an integer."""
if (channel := self.dataset.get(tlv_parser.MeshcopTLVType.CHANNEL)) is None:
return None
with suppress(ValueError):
return int(channel, 16)
return None
@cached_property @cached_property
def dataset(self) -> dict[tlv_parser.MeshcopTLVType, str]: def dataset(self) -> dict[tlv_parser.MeshcopTLVType, str]:
"""Return the dataset in dict format.""" """Return the dataset in dict format."""

View File

@ -144,6 +144,7 @@ async def ws_list_datasets(
for dataset in store.datasets.values(): for dataset in store.datasets.values():
result.append( result.append(
{ {
"channel": dataset.channel,
"created": dataset.created, "created": dataset.created,
"dataset_id": dataset.id, "dataset_id": dataset.id,
"extended_pan_id": dataset.extended_pan_id, "extended_pan_id": dataset.extended_pan_id,

View File

@ -19,6 +19,18 @@ DATASET_1_REORDERED = (
"10445F2B5CA6F2A93A55CE570A70EFEECB0C0402A0F7F801021234" "10445F2B5CA6F2A93A55CE570A70EFEECB0C0402A0F7F801021234"
) )
DATASET_1_BAD_CHANNEL = (
"0E080000000000010000000035060004001FFFE0020811111111222222220708FDAD70BF"
"E5AA15DD051000112233445566778899AABBCCDDEEFF030E4F70656E54687265616444656D6F01"
"0212340410445F2B5CA6F2A93A55CE570A70EFEECB0C0402A0F7F8"
)
DATASET_1_NO_CHANNEL = (
"0E08000000000001000035060004001FFFE0020811111111222222220708FDAD70BF"
"E5AA15DD051000112233445566778899AABBCCDDEEFF030E4F70656E54687265616444656D6F01"
"0212340410445F2B5CA6F2A93A55CE570A70EFEECB0C0402A0F7F8"
)
async def test_add_invalid_dataset(hass: HomeAssistant) -> None: async def test_add_invalid_dataset(hass: HomeAssistant) -> None:
"""Test adding an invalid dataset.""" """Test adding an invalid dataset."""
@ -109,6 +121,8 @@ async def test_dataset_properties(hass: HomeAssistant) -> None:
{"source": "Google", "tlv": DATASET_1}, {"source": "Google", "tlv": DATASET_1},
{"source": "Multipan", "tlv": DATASET_2}, {"source": "Multipan", "tlv": DATASET_2},
{"source": "🎅", "tlv": DATASET_3}, {"source": "🎅", "tlv": DATASET_3},
{"source": "test1", "tlv": DATASET_1_BAD_CHANNEL},
{"source": "test2", "tlv": DATASET_1_NO_CHANNEL},
] ]
for dataset in datasets: for dataset in datasets:
@ -122,25 +136,40 @@ async def test_dataset_properties(hass: HomeAssistant) -> None:
dataset_2 = dataset dataset_2 = dataset
if dataset.source == "🎅": if dataset.source == "🎅":
dataset_3 = dataset dataset_3 = dataset
if dataset.source == "test1":
dataset_4 = dataset
if dataset.source == "test2":
dataset_5 = dataset
dataset = store.async_get(dataset_1.id) dataset = store.async_get(dataset_1.id)
assert dataset == dataset_1 assert dataset == dataset_1
assert dataset.channel == 15
assert dataset.extended_pan_id == "1111111122222222" assert dataset.extended_pan_id == "1111111122222222"
assert dataset.network_name == "OpenThreadDemo" assert dataset.network_name == "OpenThreadDemo"
assert dataset.pan_id == "1234" assert dataset.pan_id == "1234"
dataset = store.async_get(dataset_2.id) dataset = store.async_get(dataset_2.id)
assert dataset == dataset_2 assert dataset == dataset_2
assert dataset.channel == 15
assert dataset.extended_pan_id == "1111111122222222" assert dataset.extended_pan_id == "1111111122222222"
assert dataset.network_name == "HomeAssistant!" assert dataset.network_name == "HomeAssistant!"
assert dataset.pan_id == "1234" assert dataset.pan_id == "1234"
dataset = store.async_get(dataset_3.id) dataset = store.async_get(dataset_3.id)
assert dataset == dataset_3 assert dataset == dataset_3
assert dataset.channel == 15
assert dataset.extended_pan_id == "1111111122222222" assert dataset.extended_pan_id == "1111111122222222"
assert dataset.network_name == "~🐣🐥🐤~" assert dataset.network_name == "~🐣🐥🐤~"
assert dataset.pan_id == "1234" assert dataset.pan_id == "1234"
dataset = store.async_get(dataset_4.id)
assert dataset == dataset_4
assert dataset.channel is None
dataset = store.async_get(dataset_5.id)
assert dataset == dataset_5
assert dataset.channel is None
async def test_load_datasets(hass: HomeAssistant) -> None: async def test_load_datasets(hass: HomeAssistant) -> None:
"""Make sure that we can load/save data correctly.""" """Make sure that we can load/save data correctly."""

View File

@ -153,6 +153,7 @@ async def test_list_get_dataset(
assert msg["result"] == { assert msg["result"] == {
"datasets": [ "datasets": [
{ {
"channel": 15,
"created": dataset_1.created.isoformat(), "created": dataset_1.created.isoformat(),
"dataset_id": dataset_1.id, "dataset_id": dataset_1.id,
"extended_pan_id": "1111111122222222", "extended_pan_id": "1111111122222222",
@ -162,6 +163,7 @@ async def test_list_get_dataset(
"source": "Google", "source": "Google",
}, },
{ {
"channel": 15,
"created": dataset_2.created.isoformat(), "created": dataset_2.created.isoformat(),
"dataset_id": dataset_2.id, "dataset_id": dataset_2.id,
"extended_pan_id": "1111111122222222", "extended_pan_id": "1111111122222222",
@ -171,6 +173,7 @@ async def test_list_get_dataset(
"source": "Multipan", "source": "Multipan",
}, },
{ {
"channel": 15,
"created": dataset_3.created.isoformat(), "created": dataset_3.created.isoformat(),
"dataset_id": dataset_3.id, "dataset_id": dataset_3.id,
"extended_pan_id": "1111111122222222", "extended_pan_id": "1111111122222222",