2017-02-26 22:38:06 +00:00
|
|
|
"""The tests for the Recorder component."""
|
|
|
|
# pylint: disable=protected-access
|
|
|
|
import asyncio
|
|
|
|
from unittest.mock import patch, call
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
from sqlalchemy import create_engine
|
|
|
|
|
|
|
|
from homeassistant.bootstrap import async_setup_component
|
|
|
|
from homeassistant.components.recorder import wait_connection_ready, migration
|
|
|
|
from homeassistant.components.recorder.models import SCHEMA_VERSION
|
|
|
|
from homeassistant.components.recorder.const import DATA_INSTANCE
|
|
|
|
from tests.components.recorder import models_original
|
|
|
|
|
|
|
|
|
|
|
|
def create_engine_test(*args, **kwargs):
|
|
|
|
"""Test version of create_engine that initializes with old schema.
|
|
|
|
|
|
|
|
This simulates an existing db with the old schema.
|
|
|
|
"""
|
|
|
|
engine = create_engine(*args, **kwargs)
|
|
|
|
models_original.Base.metadata.create_all(engine)
|
|
|
|
return engine
|
|
|
|
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def test_schema_update_calls(hass):
|
2018-01-27 19:58:27 +00:00
|
|
|
"""Test that schema migrations occur in correct order."""
|
2017-02-26 22:38:06 +00:00
|
|
|
with patch('sqlalchemy.create_engine', new=create_engine_test), \
|
|
|
|
patch('homeassistant.components.recorder.migration._apply_update') as \
|
|
|
|
update:
|
|
|
|
yield from async_setup_component(hass, 'recorder', {
|
|
|
|
'recorder': {
|
|
|
|
'db_url': 'sqlite://'
|
|
|
|
}
|
|
|
|
})
|
|
|
|
yield from wait_connection_ready(hass)
|
|
|
|
|
|
|
|
update.assert_has_calls([
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
call(hass.data[DATA_INSTANCE].engine, version+1, 0) for version
|
2017-02-26 22:38:06 +00:00
|
|
|
in range(0, SCHEMA_VERSION)])
|
|
|
|
|
|
|
|
|
|
|
|
@asyncio.coroutine
|
|
|
|
def test_schema_migrate(hass):
|
|
|
|
"""Test the full schema migration logic.
|
|
|
|
|
|
|
|
We're just testing that the logic can execute successfully here without
|
|
|
|
throwing exceptions. Maintaining a set of assertions based on schema
|
|
|
|
inspection could quickly become quite cumbersome.
|
|
|
|
"""
|
|
|
|
with patch('sqlalchemy.create_engine', new=create_engine_test), \
|
|
|
|
patch('homeassistant.components.recorder.Recorder._setup_run') as \
|
|
|
|
setup_run:
|
|
|
|
yield from async_setup_component(hass, 'recorder', {
|
|
|
|
'recorder': {
|
|
|
|
'db_url': 'sqlite://'
|
|
|
|
}
|
|
|
|
})
|
|
|
|
yield from wait_connection_ready(hass)
|
|
|
|
assert setup_run.called
|
|
|
|
|
|
|
|
|
|
|
|
def test_invalid_update():
|
|
|
|
"""Test that an invalid new version raises an exception."""
|
|
|
|
with pytest.raises(ValueError):
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
migration._apply_update(None, -1, 0)
|