2015-02-01 04:06:30 +00:00
|
|
|
"""
|
|
|
|
Provide pre-made queries on top of the recorder component.
|
2015-10-25 14:04:37 +00:00
|
|
|
|
|
|
|
For more details about this component, please refer to the documentation at
|
2015-11-09 12:12:18 +00:00
|
|
|
https://home-assistant.io/components/history/
|
2015-02-01 04:06:30 +00:00
|
|
|
"""
|
2016-10-24 06:48:01 +00:00
|
|
|
import asyncio
|
2016-02-19 05:27:50 +00:00
|
|
|
from collections import defaultdict
|
2015-04-29 02:12:05 +00:00
|
|
|
from datetime import timedelta
|
2015-02-02 02:00:30 +00:00
|
|
|
from itertools import groupby
|
2017-02-14 05:48:53 +00:00
|
|
|
import logging
|
|
|
|
import time
|
|
|
|
|
2016-10-13 15:54:45 +00:00
|
|
|
import voluptuous as vol
|
2015-01-31 18:31:16 +00:00
|
|
|
|
2017-01-03 22:19:28 +00:00
|
|
|
from homeassistant.const import (
|
|
|
|
HTTP_BAD_REQUEST, CONF_DOMAINS, CONF_ENTITIES, CONF_EXCLUDE, CONF_INCLUDE)
|
2016-02-19 05:27:50 +00:00
|
|
|
import homeassistant.util.dt as dt_util
|
2016-07-02 18:22:51 +00:00
|
|
|
from homeassistant.components import recorder, script
|
2016-07-17 05:32:25 +00:00
|
|
|
from homeassistant.components.frontend import register_built_in_panel
|
2016-05-14 07:58:36 +00:00
|
|
|
from homeassistant.components.http import HomeAssistantView
|
2016-10-13 15:54:45 +00:00
|
|
|
from homeassistant.const import ATTR_HIDDEN
|
2017-02-26 22:38:06 +00:00
|
|
|
from homeassistant.components.recorder.util import session_scope, execute
|
2015-01-31 18:31:16 +00:00
|
|
|
|
2017-02-14 05:48:53 +00:00
|
|
|
_LOGGER = logging.getLogger(__name__)
|
|
|
|
|
2015-01-31 18:31:16 +00:00
|
|
|
DOMAIN = 'history'
|
|
|
|
DEPENDENCIES = ['recorder', 'http']
|
|
|
|
|
2016-10-13 15:54:45 +00:00
|
|
|
CONFIG_SCHEMA = vol.Schema({
|
2017-02-21 07:40:27 +00:00
|
|
|
DOMAIN: recorder.FILTER_SCHEMA,
|
2016-10-13 15:54:45 +00:00
|
|
|
}, extra=vol.ALLOW_EXTRA)
|
|
|
|
|
|
|
|
SIGNIFICANT_DOMAINS = ('thermostat', 'climate')
|
2016-03-05 17:49:04 +00:00
|
|
|
IGNORE_DOMAINS = ('zone', 'scene',)
|
2016-01-23 20:36:43 +00:00
|
|
|
|
2015-01-31 18:31:16 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
def last_recorder_run(hass):
|
2017-04-30 05:04:49 +00:00
|
|
|
"""Retrieve the last closed recorder run from the database."""
|
2017-02-26 22:38:06 +00:00
|
|
|
from homeassistant.components.recorder.models import RecorderRuns
|
|
|
|
|
|
|
|
with session_scope(hass=hass) as session:
|
|
|
|
res = (session.query(RecorderRuns)
|
|
|
|
.order_by(RecorderRuns.end.desc()).first())
|
2017-02-21 07:40:27 +00:00
|
|
|
if res is None:
|
|
|
|
return None
|
|
|
|
session.expunge(res)
|
|
|
|
return res
|
2015-01-31 18:31:16 +00:00
|
|
|
|
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
def get_significant_states(hass, start_time, end_time=None, entity_id=None,
|
2016-10-13 15:54:45 +00:00
|
|
|
filters=None):
|
2016-03-07 17:49:31 +00:00
|
|
|
"""
|
|
|
|
Return states changes during UTC period start_time - end_time.
|
2016-01-23 20:36:43 +00:00
|
|
|
|
|
|
|
Significant states are all states where there is a state change,
|
|
|
|
as well as all states from certain domains (for instance
|
|
|
|
thermostat so that we get current temperature in our graphs).
|
|
|
|
"""
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
timer_start = time.perf_counter()
|
2017-02-26 22:38:06 +00:00
|
|
|
from homeassistant.components.recorder.models import States
|
|
|
|
|
2016-10-13 15:54:45 +00:00
|
|
|
entity_ids = (entity_id.lower(), ) if entity_id is not None else None
|
2016-01-23 20:36:43 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
with session_scope(hass=hass) as session:
|
|
|
|
query = session.query(States).filter(
|
|
|
|
(States.domain.in_(SIGNIFICANT_DOMAINS) |
|
|
|
|
(States.last_changed == States.last_updated)) &
|
|
|
|
(States.last_updated > start_time))
|
|
|
|
|
|
|
|
if filters:
|
|
|
|
query = filters.apply(query, entity_ids)
|
|
|
|
|
|
|
|
if end_time is not None:
|
|
|
|
query = query.filter(States.last_updated < end_time)
|
2016-01-23 20:36:43 +00:00
|
|
|
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
query = query.order_by(States.last_updated)
|
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
states = (
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
state for state in execute(query)
|
2017-02-26 22:38:06 +00:00
|
|
|
if (_is_significant(state) and
|
|
|
|
not state.attributes.get(ATTR_HIDDEN, False)))
|
2016-01-23 20:36:43 +00:00
|
|
|
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
if _LOGGER.isEnabledFor(logging.DEBUG):
|
|
|
|
elapsed = time.perf_counter() - timer_start
|
|
|
|
_LOGGER.debug(
|
|
|
|
'get_significant_states took %fs', elapsed)
|
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
return states_to_json(hass, states, start_time, entity_id, filters)
|
2016-01-23 20:36:43 +00:00
|
|
|
|
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
def state_changes_during_period(hass, start_time, end_time=None,
|
|
|
|
entity_id=None):
|
2016-03-08 16:55:57 +00:00
|
|
|
"""Return states changes during UTC period start_time - end_time."""
|
2017-02-26 22:38:06 +00:00
|
|
|
from homeassistant.components.recorder.models import States
|
2015-02-02 02:00:30 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
with session_scope(hass=hass) as session:
|
|
|
|
query = session.query(States).filter(
|
|
|
|
(States.last_changed == States.last_updated) &
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
(States.last_updated > start_time))
|
2015-02-02 02:00:30 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
if end_time is not None:
|
|
|
|
query = query.filter(States.last_updated < end_time)
|
2015-02-02 02:00:30 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
if entity_id is not None:
|
|
|
|
query = query.filter_by(entity_id=entity_id.lower())
|
2015-02-02 02:00:30 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
states = execute(
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
query.order_by(States.last_updated))
|
2015-02-06 06:53:36 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
return states_to_json(hass, states, start_time, entity_id)
|
2015-02-06 06:53:36 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
|
|
|
|
def get_states(hass, utc_point_in_time, entity_ids=None, run=None,
|
|
|
|
filters=None):
|
2016-03-08 16:55:57 +00:00
|
|
|
"""Return the states at a specific point in time."""
|
2017-02-26 22:38:06 +00:00
|
|
|
from homeassistant.components.recorder.models import States
|
|
|
|
|
2015-02-07 21:23:01 +00:00
|
|
|
if run is None:
|
2017-02-26 22:38:06 +00:00
|
|
|
run = recorder.run_information(hass, utc_point_in_time)
|
2015-02-06 06:53:36 +00:00
|
|
|
|
2015-04-29 02:12:05 +00:00
|
|
|
# History did not run before utc_point_in_time
|
2015-03-29 16:42:24 +00:00
|
|
|
if run is None:
|
|
|
|
return []
|
|
|
|
|
2016-07-02 18:22:51 +00:00
|
|
|
from sqlalchemy import and_, func
|
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
with session_scope(hass=hass) as session:
|
2017-07-26 15:22:01 +00:00
|
|
|
if entity_ids and len(entity_ids) == 1:
|
|
|
|
# Use an entirely different (and extremely fast) query if we only
|
|
|
|
# have a single entity id
|
|
|
|
most_recent_state_ids = session.query(
|
|
|
|
States.state_id.label('max_state_id')
|
|
|
|
).filter(
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
(States.last_updated < utc_point_in_time) &
|
2017-07-26 15:22:01 +00:00
|
|
|
(States.entity_id.in_(entity_ids))
|
|
|
|
).order_by(
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
States.last_updated.desc())
|
2017-07-26 15:22:01 +00:00
|
|
|
|
|
|
|
most_recent_state_ids = most_recent_state_ids.limit(1)
|
|
|
|
|
|
|
|
else:
|
|
|
|
# We have more than one entity to look at (most commonly we want
|
|
|
|
# all entities,) so we need to do a search on all states since the
|
|
|
|
# last recorder run started.
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
|
|
|
|
most_recent_states_by_date = session.query(
|
|
|
|
States.entity_id.label('max_entity_id'),
|
|
|
|
func.max(States.last_updated).label('max_last_updated')
|
2017-07-26 15:22:01 +00:00
|
|
|
).filter(
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
(States.last_updated >= run.start) &
|
|
|
|
(States.last_updated < utc_point_in_time)
|
|
|
|
)
|
2017-07-26 15:22:01 +00:00
|
|
|
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
if entity_ids:
|
|
|
|
most_recent_states_by_date.filter(
|
|
|
|
States.entity_id.in_(entity_ids))
|
|
|
|
|
|
|
|
most_recent_states_by_date = most_recent_states_by_date.group_by(
|
|
|
|
States.entity_id)
|
|
|
|
|
|
|
|
most_recent_states_by_date = most_recent_states_by_date.subquery()
|
|
|
|
|
|
|
|
most_recent_state_ids = session.query(
|
|
|
|
func.max(States.state_id).label('max_state_id')
|
|
|
|
).join(most_recent_states_by_date, and_(
|
|
|
|
States.entity_id == most_recent_states_by_date.c.max_entity_id,
|
|
|
|
States.last_updated == most_recent_states_by_date.c.
|
|
|
|
max_last_updated))
|
2017-07-26 15:22:01 +00:00
|
|
|
|
|
|
|
most_recent_state_ids = most_recent_state_ids.group_by(
|
|
|
|
States.entity_id)
|
|
|
|
|
|
|
|
most_recent_state_ids = most_recent_state_ids.subquery()
|
2016-07-02 18:22:51 +00:00
|
|
|
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
query = session.query(States).join(
|
|
|
|
most_recent_state_ids,
|
|
|
|
States.state_id == most_recent_state_ids.c.max_state_id
|
|
|
|
).filter((~States.domain.in_(IGNORE_DOMAINS)))
|
|
|
|
|
|
|
|
if filters:
|
|
|
|
query = filters.apply(query, entity_ids)
|
2015-02-07 21:23:01 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
return [state for state in execute(query)
|
|
|
|
if not state.attributes.get(ATTR_HIDDEN, False)]
|
2015-02-07 21:23:01 +00:00
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
|
|
|
|
def states_to_json(hass, states, start_time, entity_id, filters=None):
|
2016-03-08 16:55:57 +00:00
|
|
|
"""Convert SQL results into JSON friendly data structure.
|
2016-01-23 20:36:43 +00:00
|
|
|
|
|
|
|
This takes our state list and turns it into a JSON friendly data
|
|
|
|
structure {'entity_id': [list of states], 'entity_id2': [list of states]}
|
|
|
|
|
|
|
|
We also need to go back and create a synthetic zero data point for
|
|
|
|
each list of states, otherwise our graphs won't start on the Y
|
|
|
|
axis correctly.
|
|
|
|
"""
|
|
|
|
result = defaultdict(list)
|
|
|
|
|
|
|
|
entity_ids = [entity_id] if entity_id is not None else None
|
|
|
|
|
|
|
|
# Get the states at the start time
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
timer_start = time.perf_counter()
|
2017-02-26 22:38:06 +00:00
|
|
|
for state in get_states(hass, start_time, entity_ids, filters=filters):
|
2016-01-23 20:36:43 +00:00
|
|
|
state.last_changed = start_time
|
|
|
|
state.last_updated = start_time
|
|
|
|
result[state.entity_id].append(state)
|
|
|
|
|
History query and schema optimizations for huge performance boost (#8748)
* Add DEBUG-level log for db row to native object conversion
This is now the bottleneck (by a large margin) for big history queries, so I'm leaving this log feature in to help diagnose users with a slow history page
* Rewrite of the "first synthetic datapoint" query for multiple entities
The old method was written in a manner that prevented an index from being used in the inner-most GROUP BY statement, causing massive performance issues especially when querying for a large time period.
The new query does have one material change that will cause it to return different results than before: instead of using max(state_id) to get the latest entry, we now get the max(last_updated). This is more appropriate (primary key should not be assumed to be in order of event firing) and allows an index to be used on the inner-most query. I added another JOIN layer to account for cases where there are two entries on the exact same `last_created` for a given entity. In this case we do use `state_id` as a tiebreaker.
For performance reasons the domain filters were also moved to the outermost query, as it's way more efficient to do it there than on the innermost query as before (due to indexing with GROUP BY problems)
The result is a query that only needs to do a filesort on the final result set, which will only be as many rows as there are entities.
* Remove the ORDER BY entity_id when fetching states, and add logging
Having this ORDER BY in the query prevents it from using an index due to the range filter, so it has been removed.
We already do a `groupby` in the `states_to_json` method which accomplishes exactly what the ORDER BY in the query was trying to do anyway, so this change causes no functional difference.
Also added DEBUG-level logging to allow diagnosing a user's slow history page.
* Add DEBUG-level logging for the synthetic-first-datapoint query
For diagnosing a user's slow history page
* Missed a couple instances of `created` that should be `last_updated`
* Remove `entity_id` sorting from state_changes; match significant_update
This is the same change as 09b3498f410106881fc5e095c49a8d527fa89644 , but applied to the `state_changes_during_period` method which I missed before. This should give the same performance boost to the history sensor component!
* Bugfix in History query used for History Sensor
The date filter was using a different column for the upper and lower bounds. It would work, but it would be slow!
* Update Recorder purge script to use more appropriate columns
Two reasons: 1. the `created` column's meaning is fairly arbitrary and does not represent when an event or state change actually ocurred. It seems more correct to purge based on the event date than the time the database row was written.
2. The new columns are indexed, which will speed up this purge script by orders of magnitude
* Updating db model to match new query optimizations
A few things here: 1. New schema version with a new index and several removed indexes
2. A new method in the migration script to drop old indexes
3. Added an INFO-level log message when a new index will be added, as this can take quite some time on a Raspberry Pi
2017-08-05 06:16:53 +00:00
|
|
|
if _LOGGER.isEnabledFor(logging.DEBUG):
|
|
|
|
elapsed = time.perf_counter() - timer_start
|
|
|
|
_LOGGER.debug(
|
|
|
|
'getting %d first datapoints took %fs', len(result), elapsed)
|
|
|
|
|
2016-01-23 20:36:43 +00:00
|
|
|
# Append all changes to it
|
2017-07-06 03:02:16 +00:00
|
|
|
for ent_id, group in groupby(states, lambda state: state.entity_id):
|
|
|
|
result[ent_id].extend(group)
|
2016-01-23 20:36:43 +00:00
|
|
|
return result
|
|
|
|
|
|
|
|
|
2017-02-26 22:38:06 +00:00
|
|
|
def get_state(hass, utc_point_in_time, entity_id, run=None):
|
2016-03-07 17:49:31 +00:00
|
|
|
"""Return a state at a specific point in time."""
|
2017-02-26 22:38:06 +00:00
|
|
|
states = list(get_states(hass, utc_point_in_time, (entity_id,), run))
|
2015-02-07 21:23:01 +00:00
|
|
|
return states[0] if states else None
|
2015-02-02 02:00:30 +00:00
|
|
|
|
|
|
|
|
2015-04-07 08:01:23 +00:00
|
|
|
# pylint: disable=unused-argument
|
2015-01-31 18:31:16 +00:00
|
|
|
def setup(hass, config):
|
2017-04-30 05:04:49 +00:00
|
|
|
"""Set up the history hooks."""
|
2016-10-13 15:54:45 +00:00
|
|
|
filters = Filters()
|
|
|
|
exclude = config[DOMAIN].get(CONF_EXCLUDE)
|
|
|
|
if exclude:
|
|
|
|
filters.excluded_entities = exclude[CONF_ENTITIES]
|
|
|
|
filters.excluded_domains = exclude[CONF_DOMAINS]
|
|
|
|
include = config[DOMAIN].get(CONF_INCLUDE)
|
|
|
|
if include:
|
|
|
|
filters.included_entities = include[CONF_ENTITIES]
|
|
|
|
filters.included_domains = include[CONF_DOMAINS]
|
|
|
|
|
2016-11-25 21:04:06 +00:00
|
|
|
hass.http.register_view(HistoryPeriodView(filters))
|
2016-07-17 05:32:25 +00:00
|
|
|
register_built_in_panel(hass, 'history', 'History', 'mdi:poll-box')
|
2015-02-02 02:00:30 +00:00
|
|
|
|
2015-02-01 04:06:30 +00:00
|
|
|
return True
|
2015-01-31 18:31:16 +00:00
|
|
|
|
|
|
|
|
2016-05-14 07:58:36 +00:00
|
|
|
class HistoryPeriodView(HomeAssistantView):
|
|
|
|
"""Handle history period requests."""
|
2015-02-02 02:00:30 +00:00
|
|
|
|
2016-05-14 07:58:36 +00:00
|
|
|
url = '/api/history/period'
|
2016-05-28 17:37:22 +00:00
|
|
|
name = 'api:history:view-period'
|
2016-10-24 06:48:01 +00:00
|
|
|
extra_urls = ['/api/history/period/{datetime}']
|
2015-02-02 02:00:30 +00:00
|
|
|
|
2016-11-25 21:04:06 +00:00
|
|
|
def __init__(self, filters):
|
2016-10-13 15:54:45 +00:00
|
|
|
"""Initilalize the history period view."""
|
|
|
|
self.filters = filters
|
|
|
|
|
2016-10-24 06:48:01 +00:00
|
|
|
@asyncio.coroutine
|
2016-07-28 03:43:46 +00:00
|
|
|
def get(self, request, datetime=None):
|
2016-05-14 07:58:36 +00:00
|
|
|
"""Return history over a period of time."""
|
2017-02-14 05:48:53 +00:00
|
|
|
timer_start = time.perf_counter()
|
2016-10-24 06:48:01 +00:00
|
|
|
if datetime:
|
|
|
|
datetime = dt_util.parse_datetime(datetime)
|
|
|
|
|
|
|
|
if datetime is None:
|
|
|
|
return self.json_message('Invalid datetime', HTTP_BAD_REQUEST)
|
|
|
|
|
2017-01-30 17:12:07 +00:00
|
|
|
now = dt_util.utcnow()
|
|
|
|
|
2016-05-28 17:37:22 +00:00
|
|
|
one_day = timedelta(days=1)
|
2016-07-28 03:43:46 +00:00
|
|
|
if datetime:
|
|
|
|
start_time = dt_util.as_utc(datetime)
|
2016-05-14 07:58:36 +00:00
|
|
|
else:
|
2017-01-30 17:12:07 +00:00
|
|
|
start_time = now - one_day
|
|
|
|
|
|
|
|
if start_time > now:
|
|
|
|
return self.json([])
|
2015-06-16 05:40:57 +00:00
|
|
|
|
2017-05-26 20:12:17 +00:00
|
|
|
end_time = request.query.get('end_time')
|
2017-02-14 06:10:39 +00:00
|
|
|
if end_time:
|
|
|
|
end_time = dt_util.as_utc(
|
|
|
|
dt_util.parse_datetime(end_time))
|
|
|
|
if end_time is None:
|
|
|
|
return self.json_message('Invalid end_time', HTTP_BAD_REQUEST)
|
|
|
|
else:
|
|
|
|
end_time = start_time + one_day
|
2017-05-26 20:12:17 +00:00
|
|
|
entity_id = request.query.get('filter_entity_id')
|
2016-10-24 06:48:01 +00:00
|
|
|
|
2017-05-26 15:28:07 +00:00
|
|
|
result = yield from request.app['hass'].async_add_job(
|
|
|
|
get_significant_states, request.app['hass'], start_time, end_time,
|
|
|
|
entity_id, self.filters)
|
2017-02-14 05:48:53 +00:00
|
|
|
result = result.values()
|
|
|
|
if _LOGGER.isEnabledFor(logging.DEBUG):
|
|
|
|
elapsed = time.perf_counter() - timer_start
|
|
|
|
_LOGGER.debug(
|
|
|
|
'Extracted %d states in %fs', sum(map(len, result)), elapsed)
|
|
|
|
return self.json(result)
|
2016-10-13 15:54:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
class Filters(object):
|
|
|
|
"""Container for the configured include and exclude filters."""
|
|
|
|
|
|
|
|
def __init__(self):
|
|
|
|
"""Initialise the include and exclude filters."""
|
|
|
|
self.excluded_entities = []
|
|
|
|
self.excluded_domains = []
|
|
|
|
self.included_entities = []
|
|
|
|
self.included_domains = []
|
|
|
|
|
|
|
|
def apply(self, query, entity_ids=None):
|
2016-10-20 17:10:12 +00:00
|
|
|
"""Apply the include/exclude filter on domains and entities on query.
|
2016-10-13 15:54:45 +00:00
|
|
|
|
|
|
|
Following rules apply:
|
|
|
|
* only the include section is configured - just query the specified
|
|
|
|
entities or domains.
|
|
|
|
* only the exclude section is configured - filter the specified
|
|
|
|
entities and domains from all the entities in the system.
|
|
|
|
* if include and exclude is defined - select the entities specified in
|
|
|
|
the include and filter out the ones from the exclude list.
|
|
|
|
"""
|
2017-02-26 22:38:06 +00:00
|
|
|
from homeassistant.components.recorder.models import States
|
|
|
|
|
2016-10-13 15:54:45 +00:00
|
|
|
# specific entities requested - do not in/exclude anything
|
|
|
|
if entity_ids is not None:
|
2017-02-26 22:38:06 +00:00
|
|
|
return query.filter(States.entity_id.in_(entity_ids))
|
|
|
|
query = query.filter(~States.domain.in_(IGNORE_DOMAINS))
|
2016-10-13 15:54:45 +00:00
|
|
|
|
|
|
|
filter_query = None
|
|
|
|
# filter if only excluded domain is configured
|
|
|
|
if self.excluded_domains and not self.included_domains:
|
2017-02-26 22:38:06 +00:00
|
|
|
filter_query = ~States.domain.in_(self.excluded_domains)
|
2016-10-13 15:54:45 +00:00
|
|
|
if self.included_entities:
|
2017-02-26 22:38:06 +00:00
|
|
|
filter_query &= States.entity_id.in_(self.included_entities)
|
2016-10-13 15:54:45 +00:00
|
|
|
# filter if only included domain is configured
|
|
|
|
elif not self.excluded_domains and self.included_domains:
|
2017-02-26 22:38:06 +00:00
|
|
|
filter_query = States.domain.in_(self.included_domains)
|
2016-10-13 15:54:45 +00:00
|
|
|
if self.included_entities:
|
2017-02-26 22:38:06 +00:00
|
|
|
filter_query |= States.entity_id.in_(self.included_entities)
|
2016-10-13 15:54:45 +00:00
|
|
|
# filter if included and excluded domain is configured
|
|
|
|
elif self.excluded_domains and self.included_domains:
|
2017-02-26 22:38:06 +00:00
|
|
|
filter_query = ~States.domain.in_(self.excluded_domains)
|
2016-10-13 15:54:45 +00:00
|
|
|
if self.included_entities:
|
2017-02-26 22:38:06 +00:00
|
|
|
filter_query &= (States.domain.in_(self.included_domains) |
|
|
|
|
States.entity_id.in_(self.included_entities))
|
2016-10-13 15:54:45 +00:00
|
|
|
else:
|
2017-02-26 22:38:06 +00:00
|
|
|
filter_query &= (States.domain.in_(self.included_domains) & ~
|
|
|
|
States.domain.in_(self.excluded_domains))
|
2016-10-13 15:54:45 +00:00
|
|
|
# no domain filter just included entities
|
|
|
|
elif not self.excluded_domains and not self.included_domains and \
|
|
|
|
self.included_entities:
|
2017-02-26 22:38:06 +00:00
|
|
|
filter_query = States.entity_id.in_(self.included_entities)
|
2016-10-13 15:54:45 +00:00
|
|
|
if filter_query is not None:
|
|
|
|
query = query.filter(filter_query)
|
|
|
|
# finally apply excluded entities filter if configured
|
|
|
|
if self.excluded_entities:
|
2017-02-26 22:38:06 +00:00
|
|
|
query = query.filter(~States.entity_id.in_(self.excluded_entities))
|
2016-10-13 15:54:45 +00:00
|
|
|
return query
|
2016-03-05 18:28:48 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _is_significant(state):
|
2016-03-08 16:55:57 +00:00
|
|
|
"""Test if state is significant for history charts.
|
2016-03-05 18:28:48 +00:00
|
|
|
|
|
|
|
Will only test for things that are not filtered out in SQL.
|
|
|
|
"""
|
|
|
|
# scripts that are not cancellable will never change state
|
|
|
|
return (state.domain != 'script' or
|
|
|
|
state.attributes.get(script.ATTR_CAN_CANCEL))
|