2015-04-23 13:56:34 +00:00
|
|
|
/* mbed Microcontroller Library
|
|
|
|
* Copyright (c) 2015 ARM Limited
|
2018-11-09 11:27:45 +00:00
|
|
|
* SPDX-License-Identifier: Apache-2.0
|
2015-04-23 13:56:34 +00:00
|
|
|
*
|
|
|
|
* Licensed under the Apache License, Version 2.0 (the "License");
|
|
|
|
* you may not use this file except in compliance with the License.
|
|
|
|
* You may obtain a copy of the License at
|
|
|
|
*
|
|
|
|
* http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
*
|
|
|
|
* Unless required by applicable law or agreed to in writing, software
|
|
|
|
* distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
* See the License for the specific language governing permissions and
|
|
|
|
* limitations under the License.
|
|
|
|
*/
|
2017-05-15 14:28:48 +00:00
|
|
|
#include <stdio.h>
|
2015-04-23 13:56:34 +00:00
|
|
|
#include <stddef.h>
|
2016-10-01 07:11:36 +00:00
|
|
|
#include "hal/ticker_api.h"
|
2017-01-27 11:10:28 +00:00
|
|
|
#include "platform/mbed_critical.h"
|
2018-08-27 18:47:19 +00:00
|
|
|
#include "platform/mbed_assert.h"
|
2015-04-23 13:56:34 +00:00
|
|
|
|
2017-05-15 16:08:53 +00:00
|
|
|
static void schedule_interrupt(const ticker_data_t *const ticker);
|
|
|
|
static void update_present_time(const ticker_data_t *const ticker);
|
2017-03-31 13:20:42 +00:00
|
|
|
|
|
|
|
/*
|
2018-06-20 10:24:18 +00:00
|
|
|
* Initialize a ticker instance.
|
2017-03-31 13:20:42 +00:00
|
|
|
*/
|
2017-05-15 14:28:48 +00:00
|
|
|
static void initialize(const ticker_data_t *ticker)
|
|
|
|
{
|
2018-06-20 10:24:18 +00:00
|
|
|
// return if the queue has already been initialized, in that case the
|
2017-03-31 13:20:42 +00:00
|
|
|
// interface used by the queue is already initialized.
|
2018-06-20 10:24:18 +00:00
|
|
|
if (ticker->queue->initialized) {
|
2017-03-31 13:20:42 +00:00
|
|
|
return;
|
|
|
|
}
|
2018-07-13 18:58:37 +00:00
|
|
|
if (ticker->queue->suspended) {
|
|
|
|
return;
|
|
|
|
}
|
2017-03-31 13:20:42 +00:00
|
|
|
|
|
|
|
ticker->interface->init();
|
2017-09-13 23:31:38 +00:00
|
|
|
|
|
|
|
const ticker_info_t *info = ticker->interface->get_info();
|
|
|
|
uint32_t frequency = info->frequency;
|
|
|
|
if (info->frequency == 0) {
|
|
|
|
MBED_ASSERT(0);
|
|
|
|
frequency = 1000000;
|
|
|
|
}
|
|
|
|
|
2018-05-02 12:39:09 +00:00
|
|
|
uint8_t frequency_shifts = 0;
|
|
|
|
for (uint8_t i = 31; i > 0; --i) {
|
2018-08-27 15:34:58 +00:00
|
|
|
if ((1U << i) == frequency) {
|
2018-05-02 09:15:19 +00:00
|
|
|
frequency_shifts = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2017-09-13 23:31:38 +00:00
|
|
|
uint32_t bits = info->bits;
|
|
|
|
if ((info->bits > 32) || (info->bits < 4)) {
|
|
|
|
MBED_ASSERT(0);
|
|
|
|
bits = 32;
|
|
|
|
}
|
|
|
|
uint32_t max_delta = 0x7 << (bits - 4); // 7/16th
|
|
|
|
uint64_t max_delta_us =
|
2018-06-20 10:24:18 +00:00
|
|
|
((uint64_t)max_delta * 1000000 + frequency - 1) / frequency;
|
2017-09-13 23:31:38 +00:00
|
|
|
|
2017-03-31 13:20:42 +00:00
|
|
|
ticker->queue->event_handler = NULL;
|
|
|
|
ticker->queue->head = NULL;
|
2017-09-13 23:31:38 +00:00
|
|
|
ticker->queue->tick_last_read = ticker->interface->read();
|
|
|
|
ticker->queue->tick_remainder = 0;
|
|
|
|
ticker->queue->frequency = frequency;
|
2018-05-02 09:15:19 +00:00
|
|
|
ticker->queue->frequency_shifts = frequency_shifts;
|
2017-09-13 23:31:38 +00:00
|
|
|
ticker->queue->bitmask = ((uint64_t)1 << bits) - 1;
|
|
|
|
ticker->queue->max_delta = max_delta;
|
|
|
|
ticker->queue->max_delta_us = max_delta_us;
|
2017-05-15 16:08:53 +00:00
|
|
|
ticker->queue->present_time = 0;
|
2018-07-25 15:56:30 +00:00
|
|
|
ticker->queue->dispatching = false;
|
2018-07-13 18:58:37 +00:00
|
|
|
ticker->queue->suspended = false;
|
2017-03-31 13:20:42 +00:00
|
|
|
ticker->queue->initialized = true;
|
2018-06-20 10:24:18 +00:00
|
|
|
|
2017-05-15 16:08:53 +00:00
|
|
|
update_present_time(ticker);
|
|
|
|
schedule_interrupt(ticker);
|
2017-03-31 13:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-06-20 10:24:18 +00:00
|
|
|
* Set the event handler function of a ticker instance.
|
2017-03-31 13:20:42 +00:00
|
|
|
*/
|
2017-05-15 14:28:48 +00:00
|
|
|
static void set_handler(const ticker_data_t *const ticker, ticker_event_handler handler)
|
|
|
|
{
|
2017-03-31 13:20:42 +00:00
|
|
|
ticker->queue->event_handler = handler;
|
|
|
|
}
|
|
|
|
|
|
|
|
/*
|
2017-05-15 16:08:53 +00:00
|
|
|
* Convert a 32 bit timestamp into a 64 bit timestamp.
|
|
|
|
*
|
2018-06-20 10:24:18 +00:00
|
|
|
* A 64 bit timestamp is used as the point of time of reference while the
|
|
|
|
* timestamp to convert is relative to this point of time.
|
|
|
|
*
|
|
|
|
* The lower 32 bits of the timestamp returned will be equal to the timestamp to
|
|
|
|
* convert.
|
2017-05-15 16:08:53 +00:00
|
|
|
*
|
2018-06-20 10:24:18 +00:00
|
|
|
* If the timestamp to convert is less than the lower 32 bits of the time
|
|
|
|
* reference then the timestamp to convert is seen as an overflowed value and
|
|
|
|
* the upper 32 bit of the timestamp returned will be equal to the upper 32 bit
|
|
|
|
* of the reference point + 1.
|
|
|
|
* Otherwise, the upper 32 bit returned will be equal to the upper 32 bit of the
|
|
|
|
* reference point.
|
2017-05-15 16:08:53 +00:00
|
|
|
*
|
|
|
|
* @param ref: The 64 bit timestamp of reference.
|
|
|
|
* @param timestamp: The timestamp to convert.
|
2017-03-31 13:20:42 +00:00
|
|
|
*/
|
2017-05-15 16:08:53 +00:00
|
|
|
static us_timestamp_t convert_timestamp(us_timestamp_t ref, timestamp_t timestamp)
|
2017-05-15 14:28:48 +00:00
|
|
|
{
|
2017-05-15 16:08:53 +00:00
|
|
|
bool overflow = timestamp < ((timestamp_t) ref) ? true : false;
|
2017-03-31 13:20:42 +00:00
|
|
|
|
2017-05-15 16:08:53 +00:00
|
|
|
us_timestamp_t result = (ref & ~((us_timestamp_t)UINT32_MAX)) | timestamp;
|
2018-06-20 10:24:18 +00:00
|
|
|
if (overflow) {
|
|
|
|
result += (1ULL << 32);
|
2017-03-31 13:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
return result;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2017-05-15 16:08:53 +00:00
|
|
|
* Update the present timestamp value of a ticker.
|
2017-03-31 13:20:42 +00:00
|
|
|
*/
|
2017-05-15 16:08:53 +00:00
|
|
|
static void update_present_time(const ticker_data_t *const ticker)
|
2017-09-13 23:31:38 +00:00
|
|
|
{
|
|
|
|
ticker_event_queue_t *queue = ticker->queue;
|
2018-07-13 18:58:37 +00:00
|
|
|
if (queue->suspended) {
|
|
|
|
return;
|
|
|
|
}
|
2017-09-13 23:31:38 +00:00
|
|
|
uint32_t ticker_time = ticker->interface->read();
|
|
|
|
if (ticker_time == ticker->queue->tick_last_read) {
|
|
|
|
// No work to do
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
|
|
|
uint64_t elapsed_ticks = (ticker_time - queue->tick_last_read) & queue->bitmask;
|
|
|
|
queue->tick_last_read = ticker_time;
|
|
|
|
|
2017-09-18 22:52:57 +00:00
|
|
|
uint64_t elapsed_us;
|
|
|
|
if (1000000 == queue->frequency) {
|
|
|
|
// Optimized for 1MHz
|
|
|
|
|
|
|
|
elapsed_us = elapsed_ticks;
|
2018-05-02 09:15:19 +00:00
|
|
|
} else if (0 != queue->frequency_shifts) {
|
|
|
|
// Optimized for frequencies divisible by 2
|
2017-09-18 22:52:57 +00:00
|
|
|
uint64_t us_x_ticks = elapsed_ticks * 1000000;
|
2018-05-02 09:15:19 +00:00
|
|
|
elapsed_us = us_x_ticks >> queue->frequency_shifts;
|
2017-09-18 22:52:57 +00:00
|
|
|
|
|
|
|
// Update remainder
|
2018-05-02 09:15:19 +00:00
|
|
|
queue->tick_remainder += us_x_ticks - (elapsed_us << queue->frequency_shifts);
|
2017-09-18 22:52:57 +00:00
|
|
|
if (queue->tick_remainder >= queue->frequency) {
|
|
|
|
elapsed_us += 1;
|
|
|
|
queue->tick_remainder -= queue->frequency;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// General case
|
|
|
|
|
|
|
|
uint64_t us_x_ticks = elapsed_ticks * 1000000;
|
|
|
|
elapsed_us = us_x_ticks / queue->frequency;
|
|
|
|
|
|
|
|
// Update remainder
|
|
|
|
queue->tick_remainder += us_x_ticks - elapsed_us * queue->frequency;
|
|
|
|
if (queue->tick_remainder >= queue->frequency) {
|
|
|
|
elapsed_us += 1;
|
|
|
|
queue->tick_remainder -= queue->frequency;
|
|
|
|
}
|
2017-09-13 23:31:38 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
// Update current time
|
|
|
|
queue->present_time += elapsed_us;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-07-26 02:30:57 +00:00
|
|
|
* Given the absolute timestamp compute the hal tick timestamp rounded up.
|
2017-09-13 23:31:38 +00:00
|
|
|
*/
|
2018-07-26 02:30:57 +00:00
|
|
|
static timestamp_t compute_tick_round_up(const ticker_data_t *const ticker, us_timestamp_t timestamp)
|
2017-09-13 23:31:38 +00:00
|
|
|
{
|
|
|
|
ticker_event_queue_t *queue = ticker->queue;
|
|
|
|
us_timestamp_t delta_us = timestamp - queue->present_time;
|
|
|
|
|
|
|
|
timestamp_t delta = ticker->queue->max_delta;
|
|
|
|
if (delta_us <= ticker->queue->max_delta_us) {
|
|
|
|
// Checking max_delta_us ensures the operation will not overflow
|
2017-09-18 22:52:57 +00:00
|
|
|
|
|
|
|
if (1000000 == queue->frequency) {
|
|
|
|
// Optimized for 1MHz
|
|
|
|
|
|
|
|
delta = delta_us;
|
|
|
|
if (delta > ticker->queue->max_delta) {
|
|
|
|
delta = ticker->queue->max_delta;
|
|
|
|
}
|
2018-05-02 09:15:19 +00:00
|
|
|
} else if (0 != queue->frequency_shifts) {
|
|
|
|
// Optimized frequencies divisible by 2
|
2017-09-18 22:52:57 +00:00
|
|
|
|
2018-07-26 02:30:57 +00:00
|
|
|
delta = ((delta_us << ticker->queue->frequency_shifts) + 1000000 - 1) / 1000000;
|
2017-09-18 22:52:57 +00:00
|
|
|
if (delta > ticker->queue->max_delta) {
|
|
|
|
delta = ticker->queue->max_delta;
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
// General case
|
|
|
|
|
2018-07-26 02:30:57 +00:00
|
|
|
delta = (delta_us * queue->frequency + 1000000 - 1) / 1000000;
|
2017-09-18 22:52:57 +00:00
|
|
|
if (delta > ticker->queue->max_delta) {
|
|
|
|
delta = ticker->queue->max_delta;
|
|
|
|
}
|
2017-09-13 23:31:38 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return (queue->tick_last_read + delta) & queue->bitmask;
|
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Return 1 if the tick has incremented to or past match_tick, otherwise 0.
|
|
|
|
*/
|
|
|
|
int _ticker_match_interval_passed(timestamp_t prev_tick, timestamp_t cur_tick, timestamp_t match_tick)
|
|
|
|
{
|
|
|
|
if (match_tick > prev_tick) {
|
|
|
|
return (cur_tick >= match_tick) || (cur_tick < prev_tick);
|
|
|
|
} else {
|
|
|
|
return (cur_tick < prev_tick) && (cur_tick >= match_tick);
|
|
|
|
}
|
2017-03-31 13:20:42 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
2018-06-20 10:24:18 +00:00
|
|
|
* Compute the time when the interrupt has to be triggered and schedule it.
|
|
|
|
*
|
|
|
|
* If there is no event in the queue or the next event to execute is in more
|
2017-09-13 23:31:38 +00:00
|
|
|
* than ticker.queue.max_delta ticks from now then the ticker irq will be
|
|
|
|
* scheduled in ticker.queue.max_delta ticks. Otherwise the irq will be
|
|
|
|
* scheduled to happen when the running counter reach the timestamp of the
|
|
|
|
* first event in the queue.
|
2018-06-20 10:24:18 +00:00
|
|
|
*
|
|
|
|
* @note If there is no event in the queue then the interrupt is scheduled to
|
2017-09-13 23:31:38 +00:00
|
|
|
* in ticker.queue.max_delta. This is necessary to keep track
|
2017-05-15 16:08:53 +00:00
|
|
|
* of the timer overflow.
|
2017-03-31 13:20:42 +00:00
|
|
|
*/
|
2017-05-15 16:08:53 +00:00
|
|
|
static void schedule_interrupt(const ticker_data_t *const ticker)
|
2017-05-15 14:28:48 +00:00
|
|
|
{
|
2017-09-13 23:31:38 +00:00
|
|
|
ticker_event_queue_t *queue = ticker->queue;
|
2018-07-13 18:58:37 +00:00
|
|
|
if (queue->suspended || ticker->queue->dispatching) {
|
2018-07-25 15:56:30 +00:00
|
|
|
// Don't schedule the next interrupt until dispatching is
|
|
|
|
// finished. This prevents repeated calls to interface->set_interrupt
|
|
|
|
return;
|
|
|
|
}
|
|
|
|
|
2017-05-15 16:08:53 +00:00
|
|
|
update_present_time(ticker);
|
|
|
|
|
|
|
|
if (ticker->queue->head) {
|
2017-05-16 09:31:07 +00:00
|
|
|
us_timestamp_t present = ticker->queue->present_time;
|
2017-09-13 23:31:38 +00:00
|
|
|
us_timestamp_t match_time = ticker->queue->head->timestamp;
|
2017-05-16 09:31:07 +00:00
|
|
|
|
|
|
|
// if the event at the head of the queue is in the past then schedule
|
|
|
|
// it immediately.
|
2017-09-13 23:31:38 +00:00
|
|
|
if (match_time <= present) {
|
Ticker: add fire interrupt now function
fire_interrupt function should be used for events in the past. As we have now
64bit timestamp, we can figure out what is in the past, and ask a target to invoke
an interrupt immediately. The previous attemps in the target HAL tickers were not ideal, as it can wrap around easily (16 or 32 bit counters). This new
functionality should solve this problem.
set_interrupt for tickers in HAL code should not handle anything but the next match interrupt. If it was in the past is handled by the upper layer.
It is possible that we are setting next event to the close future, so once it is set it is already in the past. Therefore we add a check after set interrupt to verify it is in future.
If it is not, we fire interrupt immediately. This results in
two events - first one immediate, correct one. The second one might be scheduled in far future (almost entire ticker range),
that should be discarded.
The specification for the fire_interrupts are:
- should set pending bit for the ticker interrupt (as soon as possible),
the event we are scheduling is already in the past, and we do not want to skip
any events
- no arguments are provided, neither return value, not needed
- ticker should be initialized prior calling this function (no need to check if it is already initialized)
All our targets provide this new functionality, removing old misleading if (timestamp is in the past) checks.
2017-06-27 11:18:59 +00:00
|
|
|
ticker->interface->fire_interrupt();
|
|
|
|
return;
|
2017-05-15 16:08:53 +00:00
|
|
|
}
|
2017-03-31 13:20:42 +00:00
|
|
|
|
2018-07-26 02:30:57 +00:00
|
|
|
timestamp_t match_tick = compute_tick_round_up(ticker, match_time);
|
|
|
|
|
|
|
|
// The same tick should never occur since match_tick is rounded up.
|
|
|
|
// If the same tick is returned scheduling will not work correctly.
|
|
|
|
MBED_ASSERT(match_tick != queue->tick_last_read);
|
2018-02-12 14:40:11 +00:00
|
|
|
|
2017-09-13 23:31:38 +00:00
|
|
|
ticker->interface->set_interrupt(match_tick);
|
|
|
|
timestamp_t cur_tick = ticker->interface->read();
|
|
|
|
|
|
|
|
if (_ticker_match_interval_passed(queue->tick_last_read, cur_tick, match_tick)) {
|
|
|
|
ticker->interface->fire_interrupt();
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
uint32_t match_tick =
|
2018-06-20 10:24:18 +00:00
|
|
|
(queue->tick_last_read + queue->max_delta) & queue->bitmask;
|
2017-09-13 23:31:38 +00:00
|
|
|
ticker->interface->set_interrupt(match_tick);
|
Ticker: add fire interrupt now function
fire_interrupt function should be used for events in the past. As we have now
64bit timestamp, we can figure out what is in the past, and ask a target to invoke
an interrupt immediately. The previous attemps in the target HAL tickers were not ideal, as it can wrap around easily (16 or 32 bit counters). This new
functionality should solve this problem.
set_interrupt for tickers in HAL code should not handle anything but the next match interrupt. If it was in the past is handled by the upper layer.
It is possible that we are setting next event to the close future, so once it is set it is already in the past. Therefore we add a check after set interrupt to verify it is in future.
If it is not, we fire interrupt immediately. This results in
two events - first one immediate, correct one. The second one might be scheduled in far future (almost entire ticker range),
that should be discarded.
The specification for the fire_interrupts are:
- should set pending bit for the ticker interrupt (as soon as possible),
the event we are scheduling is already in the past, and we do not want to skip
any events
- no arguments are provided, neither return value, not needed
- ticker should be initialized prior calling this function (no need to check if it is already initialized)
All our targets provide this new functionality, removing old misleading if (timestamp is in the past) checks.
2017-06-27 11:18:59 +00:00
|
|
|
}
|
2017-03-31 13:20:42 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 14:28:48 +00:00
|
|
|
void ticker_set_handler(const ticker_data_t *const ticker, ticker_event_handler handler)
|
|
|
|
{
|
2017-03-31 13:20:42 +00:00
|
|
|
initialize(ticker);
|
2018-01-19 12:16:05 +00:00
|
|
|
|
|
|
|
core_util_critical_section_enter();
|
2017-03-31 13:20:42 +00:00
|
|
|
set_handler(ticker, handler);
|
2018-01-19 12:16:05 +00:00
|
|
|
core_util_critical_section_exit();
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 14:28:48 +00:00
|
|
|
void ticker_irq_handler(const ticker_data_t *const ticker)
|
|
|
|
{
|
2018-01-19 12:16:05 +00:00
|
|
|
core_util_critical_section_enter();
|
|
|
|
|
2017-03-31 13:20:42 +00:00
|
|
|
ticker->interface->clear_interrupt();
|
2018-07-13 18:58:37 +00:00
|
|
|
if (ticker->queue->suspended) {
|
|
|
|
core_util_critical_section_exit();
|
|
|
|
return;
|
|
|
|
}
|
2015-04-23 13:56:34 +00:00
|
|
|
|
|
|
|
/* Go through all the pending TimerEvents */
|
2018-07-25 15:56:30 +00:00
|
|
|
ticker->queue->dispatching = true;
|
2015-04-23 13:56:34 +00:00
|
|
|
while (1) {
|
2017-03-31 13:20:42 +00:00
|
|
|
if (ticker->queue->head == NULL) {
|
|
|
|
break;
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
|
|
|
|
2018-06-20 10:24:18 +00:00
|
|
|
// update the current timestamp used by the queue
|
2017-05-15 16:08:53 +00:00
|
|
|
update_present_time(ticker);
|
2017-03-31 13:20:42 +00:00
|
|
|
|
2018-06-20 10:24:18 +00:00
|
|
|
if (ticker->queue->head->timestamp <= ticker->queue->present_time) {
|
2015-04-23 13:56:34 +00:00
|
|
|
// This event was in the past:
|
|
|
|
// point to the following one and execute its handler
|
2017-03-31 13:20:42 +00:00
|
|
|
ticker_event_t *p = ticker->queue->head;
|
|
|
|
ticker->queue->head = ticker->queue->head->next;
|
|
|
|
if (ticker->queue->event_handler != NULL) {
|
|
|
|
(*ticker->queue->event_handler)(p->id); // NOTE: the handler can set new events
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
|
|
|
/* Note: We continue back to examining the head because calling the
|
|
|
|
* event handler may have altered the chain of pending events. */
|
|
|
|
} else {
|
2017-03-31 13:20:42 +00:00
|
|
|
break;
|
2018-06-20 10:24:18 +00:00
|
|
|
}
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
2018-07-25 15:56:30 +00:00
|
|
|
ticker->queue->dispatching = false;
|
2017-03-31 13:20:42 +00:00
|
|
|
|
2017-05-15 16:08:53 +00:00
|
|
|
schedule_interrupt(ticker);
|
2018-01-19 12:16:05 +00:00
|
|
|
|
|
|
|
core_util_critical_section_exit();
|
2017-03-31 13:20:42 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 14:28:48 +00:00
|
|
|
void ticker_insert_event(const ticker_data_t *const ticker, ticker_event_t *obj, timestamp_t timestamp, uint32_t id)
|
|
|
|
{
|
2017-03-31 13:20:42 +00:00
|
|
|
core_util_critical_section_enter();
|
|
|
|
|
|
|
|
// update the current timestamp
|
2017-05-15 16:08:53 +00:00
|
|
|
update_present_time(ticker);
|
|
|
|
us_timestamp_t absolute_timestamp = convert_timestamp(
|
2018-06-20 10:24:18 +00:00
|
|
|
ticker->queue->present_time,
|
|
|
|
timestamp
|
|
|
|
);
|
2017-03-31 13:20:42 +00:00
|
|
|
|
|
|
|
// defer to ticker_insert_event_us
|
|
|
|
ticker_insert_event_us(
|
2018-06-20 10:24:18 +00:00
|
|
|
ticker,
|
2017-03-31 13:20:42 +00:00
|
|
|
obj, absolute_timestamp, id
|
|
|
|
);
|
2018-01-19 12:16:05 +00:00
|
|
|
|
|
|
|
core_util_critical_section_exit();
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 14:28:48 +00:00
|
|
|
void ticker_insert_event_us(const ticker_data_t *const ticker, ticker_event_t *obj, us_timestamp_t timestamp, uint32_t id)
|
|
|
|
{
|
2016-05-18 11:50:31 +00:00
|
|
|
core_util_critical_section_enter();
|
2015-04-23 13:56:34 +00:00
|
|
|
|
2017-03-31 13:20:42 +00:00
|
|
|
// update the current timestamp
|
2017-05-15 16:08:53 +00:00
|
|
|
update_present_time(ticker);
|
2017-03-31 13:20:42 +00:00
|
|
|
|
2015-04-23 13:56:34 +00:00
|
|
|
// initialise our data
|
|
|
|
obj->timestamp = timestamp;
|
|
|
|
obj->id = id;
|
|
|
|
|
|
|
|
/* Go through the list until we either reach the end, or find
|
|
|
|
an element this should come before (which is possibly the
|
|
|
|
head). */
|
2017-03-31 13:20:42 +00:00
|
|
|
ticker_event_t *prev = NULL, *p = ticker->queue->head;
|
2015-04-23 13:56:34 +00:00
|
|
|
while (p != NULL) {
|
|
|
|
/* check if we come before p */
|
2017-03-31 13:20:42 +00:00
|
|
|
if (timestamp < p->timestamp) {
|
2015-04-23 13:56:34 +00:00
|
|
|
break;
|
|
|
|
}
|
|
|
|
/* go to the next element */
|
|
|
|
prev = p;
|
|
|
|
p = p->next;
|
|
|
|
}
|
2018-06-20 10:24:18 +00:00
|
|
|
|
Fix crash with events in the past
ticker_insert_event() can crash on KLXX (and probably other platforms) if an event is inserted with a timestamp before the current real time.
The problem is easy to trigger: you just need to set up a Ticker object, and then disable interrupts for slightly longer than the Ticker object's interval. It's generally bad practice to disable interrupts for too long, but there are some cases where it's unavoidable, and anyway it would be better for the core library function not to crash. The case where I had an unavoidably long interrupts-off interval was writing flash with the FTFA. The FTFA hardware prohibits flash reads while an FTFA command is in progress, so interrupts must be disabled for the whole duration of each command to ensure that there are no instruction fetches from flash-resident ISRs in the course of the execution. An FTFA "erase sector" command takes a fairly long time (milliseconds), and I have a fairly high frequency Ticker (1ms).
The problem and the fix are pretty straightforward. ticker_insert_event() searches the linked list to figure out where to insert the new event, looking for a spot earlier than any event currently queued. If the event is in the past, it'll usually end up at the head of the list. When the routine sees that the new event belongs at the head of the list, it calls data->interface->set_interrupt() to schedule the interrupt for the event, since it's the new soonest event. The KLXX version of us_ticker_set_interrupt() then looks to see if the event is in the past, which we've stipulated that it is, so rather than actually setting the interrupt, it simply calls the handler directly. The first thing the Ticker interrupt handler does is re-schedule itself, so we re-enter ticker_insert_event() at this point. This is where the problem comes in: we didn't finish updating the linked list before we called set_interrupt() and thus before we recursed back into ticker_insert_event(). We set the head of the list to the new event but we didn't set the new event's 'next' pointer.
The fix is simply to finish updating the list before we call set_interrupt(), which we can do by moving the obj->next initialization ahead of the head pointer update.
2017-03-21 19:02:33 +00:00
|
|
|
/* if we're at the end p will be NULL, which is correct */
|
|
|
|
obj->next = p;
|
|
|
|
|
2015-04-23 13:56:34 +00:00
|
|
|
/* if prev is NULL we're at the head */
|
|
|
|
if (prev == NULL) {
|
2017-03-31 13:20:42 +00:00
|
|
|
ticker->queue->head = obj;
|
2015-04-23 13:56:34 +00:00
|
|
|
} else {
|
|
|
|
prev->next = obj;
|
Ticker common layer: run interrupt reschedule if the inserted event has already expired.
On some platforms, if low power ticker interrupt is set to very close value (e.g. timestamp < current tick + 3), then interrupt may not fire. This is one use case of lp ticker wrapper, but not all platforms use the wrapper. Some platforms cheat a bit and in this case, simply schedules interrupt a bit later. The problem has been found while working on the low-level lp ticker wrapper for ST boards which run lp ticker using LPTIM. These platforms have such limitation.
Failing test: tests-mbed_drivers-lp_timeout (Test Case: Zero delay)
In the test scenarion, the lp ticker callback is attached with 0.0 s delay in the loop. The new events are put in the front of the lp ticker event list and interrupt reschedule is performed. Usually, the new event is already expired, interrupt fires immediately and next event from the list is then scheduled (e.g. system tick). When the next event (e.g. system tick) is very close to the current time it might be scheduled a bit later (because of lp ticker limitation). Let's assume that system tick has been delayed by 3 ticks and while inserting new zero delay event, absolute system tick time on the event list has already expired. In this case, zero delay event may be added after the expired system tick event and no reschedule is performed (because the head of the list has not changed). Interrupt also didn't fire yet since it has been delayed, so after return from attach_callback(0) we are still waiting for the delayed interrupt and zero delay callback has not been called instantly.
This may also affect other platforms which use such delays (Cypress, NORDIC, etc.).
The proposition is to add extra condition while adding an event to the event list. If the inserted event is already expired, then perform reschedule immediately.
2019-05-31 09:39:16 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if (prev == NULL || timestamp <= ticker->queue->present_time) {
|
|
|
|
schedule_interrupt(ticker);
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
|
|
|
|
2016-05-18 11:50:31 +00:00
|
|
|
core_util_critical_section_exit();
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 14:28:48 +00:00
|
|
|
void ticker_remove_event(const ticker_data_t *const ticker, ticker_event_t *obj)
|
|
|
|
{
|
2016-05-18 11:50:31 +00:00
|
|
|
core_util_critical_section_enter();
|
2015-04-23 13:56:34 +00:00
|
|
|
|
|
|
|
// remove this object from the list
|
2017-03-31 13:20:42 +00:00
|
|
|
if (ticker->queue->head == obj) {
|
2015-04-23 13:56:34 +00:00
|
|
|
// first in the list, so just drop me
|
2017-03-31 13:20:42 +00:00
|
|
|
ticker->queue->head = obj->next;
|
2017-05-15 16:08:53 +00:00
|
|
|
schedule_interrupt(ticker);
|
2015-04-23 13:56:34 +00:00
|
|
|
} else {
|
|
|
|
// find the object before me, then drop me
|
2018-06-20 10:24:18 +00:00
|
|
|
ticker_event_t *p = ticker->queue->head;
|
2015-04-23 13:56:34 +00:00
|
|
|
while (p != NULL) {
|
|
|
|
if (p->next == obj) {
|
|
|
|
p->next = obj->next;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
p = p->next;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2016-05-18 11:50:31 +00:00
|
|
|
core_util_critical_section_exit();
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
|
|
|
|
2017-05-15 14:28:48 +00:00
|
|
|
timestamp_t ticker_read(const ticker_data_t *const ticker)
|
|
|
|
{
|
2017-03-31 13:20:42 +00:00
|
|
|
return ticker_read_us(ticker);
|
|
|
|
}
|
|
|
|
|
2017-05-15 14:28:48 +00:00
|
|
|
us_timestamp_t ticker_read_us(const ticker_data_t *const ticker)
|
|
|
|
{
|
2018-12-21 19:41:47 +00:00
|
|
|
us_timestamp_t ret;
|
|
|
|
|
2017-09-25 20:37:31 +00:00
|
|
|
initialize(ticker);
|
2018-01-19 12:16:05 +00:00
|
|
|
|
|
|
|
core_util_critical_section_enter();
|
2017-05-15 16:08:53 +00:00
|
|
|
update_present_time(ticker);
|
2018-12-21 19:41:47 +00:00
|
|
|
ret = ticker->queue->present_time;
|
2018-01-19 12:16:05 +00:00
|
|
|
core_util_critical_section_exit();
|
|
|
|
|
2018-12-21 19:41:47 +00:00
|
|
|
return ret;
|
2015-04-23 13:56:34 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
int ticker_get_next_timestamp(const ticker_data_t *const data, timestamp_t *timestamp)
|
|
|
|
{
|
|
|
|
int ret = 0;
|
|
|
|
|
|
|
|
/* if head is NULL, there are no pending events */
|
2016-05-18 11:50:31 +00:00
|
|
|
core_util_critical_section_enter();
|
2015-04-23 13:56:34 +00:00
|
|
|
if (data->queue->head != NULL) {
|
|
|
|
*timestamp = data->queue->head->timestamp;
|
|
|
|
ret = 1;
|
|
|
|
}
|
2016-05-18 11:50:31 +00:00
|
|
|
core_util_critical_section_exit();
|
2015-04-23 13:56:34 +00:00
|
|
|
|
|
|
|
return ret;
|
|
|
|
}
|
2018-07-13 18:58:37 +00:00
|
|
|
|
|
|
|
void ticker_suspend(const ticker_data_t *const ticker)
|
|
|
|
{
|
|
|
|
core_util_critical_section_enter();
|
|
|
|
|
|
|
|
ticker->queue->suspended = true;
|
|
|
|
|
|
|
|
core_util_critical_section_exit();
|
|
|
|
}
|
|
|
|
|
|
|
|
void ticker_resume(const ticker_data_t *const ticker)
|
|
|
|
{
|
|
|
|
core_util_critical_section_enter();
|
|
|
|
|
|
|
|
ticker->queue->suspended = false;
|
|
|
|
if (ticker->queue->initialized) {
|
|
|
|
ticker->queue->tick_last_read = ticker->interface->read();
|
|
|
|
|
|
|
|
update_present_time(ticker);
|
|
|
|
schedule_interrupt(ticker);
|
|
|
|
} else {
|
|
|
|
initialize(ticker);
|
|
|
|
}
|
|
|
|
|
|
|
|
core_util_critical_section_exit();
|
|
|
|
}
|