mirror of https://github.com/ARMmbed/mbed-os.git
Timed sleep rework
parent
a1e1ab61a4
commit
89eba7303f
|
@ -13,9 +13,6 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MBED_TICKLESS
|
||||
#error [NOT_SUPPORTED] Tickless mode not supported for this target.
|
||||
#endif
|
||||
|
||||
#include "mbed.h"
|
||||
#include "greentea-client/test_env.h"
|
||||
|
@ -28,7 +25,8 @@ extern "C" {
|
|||
}
|
||||
#include "platform/SysTimer.h"
|
||||
|
||||
#define TEST_TICKS 42UL
|
||||
#define TEST_TICKS 42
|
||||
#define TEST_TICK_US (TEST_TICKS * 1000)
|
||||
#define DELAY_DELTA_US 2500ULL
|
||||
|
||||
/* Use a specific delta value for deep sleep, as entry/exit adds up extra latency.
|
||||
|
@ -40,29 +38,29 @@ extern "C" {
|
|||
#endif
|
||||
|
||||
using namespace utest::v1;
|
||||
using mbed::internal::SysTimer;
|
||||
|
||||
const us_timestamp_t DELAY_US = 1000000ULL * TEST_TICKS / OS_TICK_FREQ;
|
||||
const us_timestamp_t DELAY_US = TEST_TICK_US;
|
||||
|
||||
// Override the handler() -- the SysTick interrupt must not be set as pending by the test code.
|
||||
class SysTimerTest: public mbed::internal::SysTimer {
|
||||
// The SysTick interrupt must not be set as pending by the test code.
|
||||
template <uint32_t US_IN_TICK>
|
||||
class SysTimerTest: public SysTimer<US_IN_TICK, false> {
|
||||
private:
|
||||
Semaphore _sem;
|
||||
virtual void handler()
|
||||
{
|
||||
core_util_critical_section_enter();
|
||||
_increment_tick();
|
||||
core_util_critical_section_exit();
|
||||
_sem.release();
|
||||
SysTimer<US_IN_TICK, false>::handler();
|
||||
}
|
||||
|
||||
public:
|
||||
SysTimerTest() :
|
||||
SysTimer(), _sem(0, 1)
|
||||
SysTimer<US_IN_TICK, false>(), _sem(0, 1)
|
||||
{
|
||||
}
|
||||
|
||||
SysTimerTest(const ticker_data_t *data) :
|
||||
SysTimer(data), _sem(0, 1)
|
||||
SysTimer<US_IN_TICK, false>(data), _sem(0, 1)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -153,7 +151,7 @@ void mock_ticker_reset()
|
|||
*/
|
||||
void test_created_with_zero_tick_count(void)
|
||||
{
|
||||
SysTimerTest st;
|
||||
SysTimerTest<1000> st;
|
||||
TEST_ASSERT_EQUAL_UINT32(0, st.get_tick());
|
||||
}
|
||||
|
||||
|
@ -164,26 +162,27 @@ void test_created_with_zero_tick_count(void)
|
|||
* Then the tick count is not updated
|
||||
* When @a suspend and @a resume methods are called again after a delay
|
||||
* Then the tick count is updated
|
||||
* and the number of ticks incremented is equal TEST_TICKS - 1
|
||||
* and the number of ticks incremented is equal TEST_TICKS
|
||||
* When @a suspend and @a resume methods are called again without a delay
|
||||
* Then the tick count is not updated
|
||||
*/
|
||||
void test_update_tick(void)
|
||||
{
|
||||
mock_ticker_reset();
|
||||
SysTimerTest st(&mock_ticker_data);
|
||||
st.suspend(TEST_TICKS * 2);
|
||||
TEST_ASSERT_EQUAL_UINT32(0, st.resume());
|
||||
SysTimerTest<1000> st(&mock_ticker_data);
|
||||
st.set_wake_time(st.get_tick() + TEST_TICKS * 2);
|
||||
st.cancel_wake();
|
||||
TEST_ASSERT_EQUAL_UINT32(0, st.get_tick());
|
||||
|
||||
st.suspend(TEST_TICKS * 2);
|
||||
st.set_wake_time(st.get_tick() + TEST_TICKS * 2);
|
||||
mock_ticker_timestamp = DELAY_US;
|
||||
TEST_ASSERT_EQUAL_UINT32(TEST_TICKS - 1, st.resume());
|
||||
TEST_ASSERT_EQUAL_UINT32(TEST_TICKS - 1, st.get_tick());
|
||||
st.cancel_wake();
|
||||
TEST_ASSERT_EQUAL_UINT32(TEST_TICKS, st.update_and_get_tick());
|
||||
TEST_ASSERT_EQUAL_UINT32(TEST_TICKS, st.get_tick());
|
||||
|
||||
st.suspend(TEST_TICKS * 2);
|
||||
TEST_ASSERT_EQUAL_UINT32(0, st.resume());
|
||||
TEST_ASSERT_EQUAL_UINT32(TEST_TICKS - 1, st.get_tick());
|
||||
st.set_wake_time(st.get_tick() + TEST_TICKS * 2);
|
||||
st.cancel_wake();
|
||||
TEST_ASSERT_EQUAL_UINT32(TEST_TICKS, st.get_tick());
|
||||
}
|
||||
|
||||
/** Test get_time returns correct time
|
||||
|
@ -195,7 +194,7 @@ void test_update_tick(void)
|
|||
void test_get_time(void)
|
||||
{
|
||||
mock_ticker_reset();
|
||||
SysTimerTest st(&mock_ticker_data);
|
||||
SysTimerTest<1000> st(&mock_ticker_data);
|
||||
us_timestamp_t t1 = st.get_time();
|
||||
|
||||
mock_ticker_timestamp = DELAY_US;
|
||||
|
@ -212,9 +211,9 @@ void test_get_time(void)
|
|||
*/
|
||||
void test_cancel_tick(void)
|
||||
{
|
||||
SysTimerTest st;
|
||||
SysTimerTest<TEST_TICK_US> st;
|
||||
st.cancel_tick();
|
||||
st.schedule_tick(TEST_TICKS);
|
||||
st.start_tick();
|
||||
|
||||
st.cancel_tick();
|
||||
bool acquired = st.sem_try_acquire((DELAY_US + DELAY_DELTA_US) / 1000ULL);
|
||||
|
@ -222,50 +221,41 @@ void test_cancel_tick(void)
|
|||
TEST_ASSERT_EQUAL_UINT32(0, st.get_tick());
|
||||
}
|
||||
|
||||
/** Test schedule zero
|
||||
*
|
||||
* Given a SysTimer
|
||||
* When a tick is scheduled with delta = 0 ticks
|
||||
* Then the handler is called instantly
|
||||
*/
|
||||
void test_schedule_zero(void)
|
||||
{
|
||||
SysTimerTest st;
|
||||
|
||||
st.schedule_tick(0UL);
|
||||
bool acquired = st.sem_try_acquire(0);
|
||||
TEST_ASSERT_TRUE(acquired);
|
||||
}
|
||||
|
||||
/** Test handler called once
|
||||
/** Test handler called twice
|
||||
*
|
||||
* Given a SysTimer with a tick scheduled with delta = TEST_TICKS
|
||||
* When the handler is called
|
||||
* Then the tick count is incremented by 1
|
||||
* and elapsed time is equal 1000000ULL * TEST_TICKS / OS_TICK_FREQ;
|
||||
* When more time elapses
|
||||
* Then the handler is not called again
|
||||
* Repeat a second time.
|
||||
*/
|
||||
void test_handler_called_once(void)
|
||||
void test_handler_called_twice(void)
|
||||
{
|
||||
SysTimerTest st;
|
||||
st.schedule_tick(TEST_TICKS);
|
||||
SysTimerTest<TEST_TICK_US> st;
|
||||
us_timestamp_t t1 = st.get_time();
|
||||
bool acquired = st.sem_try_acquire(0);
|
||||
TEST_ASSERT_FALSE(acquired);
|
||||
|
||||
st.start_tick();
|
||||
// Wait in a busy loop to prevent entering sleep or deepsleep modes.
|
||||
while (!acquired) {
|
||||
do {
|
||||
acquired = st.sem_try_acquire(0);
|
||||
}
|
||||
} while (!acquired);
|
||||
us_timestamp_t t2 = st.get_time();
|
||||
TEST_ASSERT_TRUE(acquired);
|
||||
TEST_ASSERT_EQUAL_UINT32(1, st.get_tick());
|
||||
TEST_ASSERT_UINT64_WITHIN(DELAY_DELTA_US, DELAY_US, t2 - t1);
|
||||
|
||||
acquired = st.sem_try_acquire((DELAY_US + DELAY_DELTA_US) / 1000ULL);
|
||||
TEST_ASSERT_FALSE(acquired);
|
||||
TEST_ASSERT_EQUAL_UINT32(1, st.get_tick());
|
||||
// Wait in a busy loop to prevent entering sleep or deepsleep modes.
|
||||
do {
|
||||
acquired = st.sem_try_acquire(0);
|
||||
} while (!acquired);
|
||||
t2 = st.get_time();
|
||||
TEST_ASSERT_TRUE(acquired);
|
||||
TEST_ASSERT_EQUAL_UINT32(2, st.get_tick());
|
||||
TEST_ASSERT_UINT64_WITHIN(DELAY_DELTA_US, DELAY_US * 2, t2 - t1);
|
||||
st.cancel_tick();
|
||||
}
|
||||
|
||||
#if DEVICE_SLEEP
|
||||
|
@ -281,16 +271,17 @@ void test_handler_called_once(void)
|
|||
void test_sleep(void)
|
||||
{
|
||||
Timer timer;
|
||||
SysTimerTest st;
|
||||
SysTimerTest<TEST_TICK_US> st;
|
||||
|
||||
sleep_manager_lock_deep_sleep();
|
||||
timer.start();
|
||||
st.schedule_tick(TEST_TICKS);
|
||||
st.start_tick();
|
||||
|
||||
TEST_ASSERT_FALSE_MESSAGE(sleep_manager_can_deep_sleep(), "Deep sleep should be disallowed");
|
||||
st.sem_acquire();
|
||||
|
||||
timer.stop();
|
||||
st.cancel_tick();
|
||||
sleep_manager_unlock_deep_sleep();
|
||||
|
||||
TEST_ASSERT_UINT64_WITHIN(DELAY_DELTA_US, DELAY_US, timer.read_high_resolution_us());
|
||||
|
@ -319,13 +310,14 @@ void test_deepsleep(void)
|
|||
wait_ms(10);
|
||||
// Regular Timer might be disabled during deepsleep.
|
||||
LowPowerTimer lptimer;
|
||||
SysTimerTest st;
|
||||
SysTimerTest<TEST_TICK_US> st;
|
||||
|
||||
lptimer.start();
|
||||
st.schedule_tick(TEST_TICKS);
|
||||
st.start_tick();
|
||||
TEST_ASSERT_TRUE_MESSAGE(sleep_manager_can_deep_sleep_test_check(), "Deep sleep should be allowed");
|
||||
st.sem_acquire();
|
||||
lptimer.stop();
|
||||
st.cancel_tick();
|
||||
|
||||
TEST_ASSERT_UINT64_WITHIN(DEEP_SLEEP_DELAY_DELTA_US, DELAY_US, lptimer.read_high_resolution_us());
|
||||
}
|
||||
|
@ -334,7 +326,7 @@ void test_deepsleep(void)
|
|||
|
||||
utest::v1::status_t test_setup(const size_t number_of_cases)
|
||||
{
|
||||
GREENTEA_SETUP(5, "default_auto");
|
||||
GREENTEA_SETUP(15, "default_auto");
|
||||
return verbose_test_setup_handler(number_of_cases);
|
||||
}
|
||||
|
||||
|
@ -343,8 +335,7 @@ Case cases[] = {
|
|||
Case("Tick count is updated correctly", test_update_tick),
|
||||
Case("Time is updated correctly", test_get_time),
|
||||
Case("Tick can be cancelled", test_cancel_tick),
|
||||
Case("Schedule zero ticks", test_schedule_zero),
|
||||
Case("Handler called once", test_handler_called_once),
|
||||
Case("Handler called twice", test_handler_called_twice),
|
||||
#if DEVICE_SLEEP
|
||||
Case("Wake up from sleep", test_sleep),
|
||||
#if DEVICE_LPTICKER && !MBED_CONF_TARGET_TICKLESS_FROM_US_TICKER
|
||||
|
|
|
@ -14,29 +14,21 @@
|
|||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#include "platform/SysTimer.h"
|
||||
|
||||
#if MBED_TICKLESS
|
||||
|
||||
#include "hal/us_ticker_api.h"
|
||||
#include "hal/lp_ticker_api.h"
|
||||
#include "mbed_atomic.h"
|
||||
#include "mbed_critical.h"
|
||||
#include "mbed_assert.h"
|
||||
#if defined(TARGET_CORTEX_A)
|
||||
#include "rtx_core_ca.h"
|
||||
#else//Cortex-M
|
||||
#include "rtx_core_cm.h"
|
||||
#endif
|
||||
#include "platform/mbed_power_mgmt.h"
|
||||
#include "platform/CriticalSectionLock.h"
|
||||
#include "platform/SysTimer.h"
|
||||
extern "C" {
|
||||
#if MBED_CONF_RTOS_PRESENT
|
||||
#include "rtx_lib.h"
|
||||
#if defined(TARGET_CORTEX_A)
|
||||
#include "irq_ctrl.h"
|
||||
#endif
|
||||
}
|
||||
|
||||
#define US_IN_TICK (1000000 / OS_TICK_FREQ)
|
||||
MBED_STATIC_ASSERT(1000000 % OS_TICK_FREQ == 0, "OS_TICK_FREQ must be a divisor of 1000000 for correct tick calculations");
|
||||
|
||||
#if (defined(NO_SYSTICK))
|
||||
/**
|
||||
* Return an IRQ number that can be used in the absence of SysTick
|
||||
|
@ -53,125 +45,263 @@ extern "C" IRQn_ID_t mbed_get_a9_tick_irqn(void);
|
|||
namespace mbed {
|
||||
namespace internal {
|
||||
|
||||
SysTimer::SysTimer() :
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
SysTimer<US_IN_TICK, IRQ>::SysTimer() :
|
||||
#if DEVICE_LPTICKER
|
||||
TimerEvent(get_lp_ticker_data()),
|
||||
#else
|
||||
TimerEvent(get_us_ticker_data()),
|
||||
#endif
|
||||
_time_us(0), _tick(0)
|
||||
_time_us(ticker_read_us(_ticker_data)),
|
||||
_tick(0),
|
||||
_unacknowledged_ticks(0),
|
||||
_wake_time_set(false),
|
||||
_wake_time_passed(false),
|
||||
_ticking(false),
|
||||
_deep_sleep_locked(false)
|
||||
{
|
||||
_time_us = ticker_read_us(_ticker_data);
|
||||
_suspend_time_passed = true;
|
||||
_suspended = false;
|
||||
}
|
||||
|
||||
SysTimer::SysTimer(const ticker_data_t *data) :
|
||||
TimerEvent(data), _time_us(0), _tick(0)
|
||||
{
|
||||
_time_us = ticker_read_us(_ticker_data);
|
||||
_suspend_time_passed = true;
|
||||
_suspended = false;
|
||||
}
|
||||
|
||||
void SysTimer::setup_irq()
|
||||
{
|
||||
#if (defined(NO_SYSTICK) && !defined (TARGET_CORTEX_A))
|
||||
NVIC_SetVector(mbed_get_m0_tick_irqn(), (uint32_t)SysTick_Handler);
|
||||
NVIC_SetPriority(mbed_get_m0_tick_irqn(), 0xFF); /* RTOS requires lowest priority */
|
||||
NVIC_EnableIRQ(mbed_get_m0_tick_irqn());
|
||||
#else
|
||||
// Ensure SysTick has the correct priority as it is still used
|
||||
// to trigger software interrupts on each tick. The period does
|
||||
// not matter since it will never start counting.
|
||||
OS_Tick_Setup(osRtxConfig.tick_freq, OS_TICK_HANDLER);
|
||||
#endif
|
||||
}
|
||||
|
||||
void SysTimer::suspend(uint32_t ticks)
|
||||
{
|
||||
// Remove ensures serialized access to SysTimer by stopping timer interrupt
|
||||
remove();
|
||||
|
||||
_suspend_time_passed = false;
|
||||
_suspended = true;
|
||||
|
||||
schedule_tick(ticks);
|
||||
}
|
||||
|
||||
bool SysTimer::suspend_time_passed()
|
||||
{
|
||||
return _suspend_time_passed;
|
||||
}
|
||||
|
||||
uint32_t SysTimer::resume()
|
||||
{
|
||||
// Remove ensures serialized access to SysTimer by stopping timer interrupt
|
||||
remove();
|
||||
|
||||
_suspended = false;
|
||||
_suspend_time_passed = true;
|
||||
|
||||
uint64_t elapsed_ticks = (ticker_read_us(_ticker_data) - _time_us) / US_IN_TICK;
|
||||
if (elapsed_ticks > 0) {
|
||||
// Don't update to the current tick. Instead, update to the
|
||||
// previous tick and let the SysTick handler increment it
|
||||
// to the current value. This allows scheduling restart
|
||||
// successfully after the OS is resumed.
|
||||
elapsed_ticks--;
|
||||
if (!_ticker_data->interface->runs_in_deep_sleep) {
|
||||
sleep_manager_lock_deep_sleep();
|
||||
}
|
||||
_time_us += elapsed_ticks * US_IN_TICK;
|
||||
_tick += elapsed_ticks;
|
||||
|
||||
return elapsed_ticks;
|
||||
}
|
||||
|
||||
void SysTimer::schedule_tick(uint32_t delta)
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
SysTimer<US_IN_TICK, IRQ>::SysTimer(const ticker_data_t *data) :
|
||||
TimerEvent(data),
|
||||
_time_us(ticker_read_us(_ticker_data)),
|
||||
_tick(0),
|
||||
_unacknowledged_ticks(0),
|
||||
_wake_time_set(false),
|
||||
_wake_time_passed(false),
|
||||
_ticking(false),
|
||||
_deep_sleep_locked(false)
|
||||
{
|
||||
core_util_critical_section_enter();
|
||||
|
||||
insert_absolute(_time_us + delta * US_IN_TICK);
|
||||
|
||||
core_util_critical_section_exit();
|
||||
if (!_ticker_data->interface->runs_in_deep_sleep) {
|
||||
sleep_manager_lock_deep_sleep();
|
||||
}
|
||||
}
|
||||
|
||||
void SysTimer::cancel_tick()
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
SysTimer<US_IN_TICK, IRQ>::~SysTimer()
|
||||
{
|
||||
cancel_tick();
|
||||
cancel_wake();
|
||||
if (!_ticker_data->interface->runs_in_deep_sleep) {
|
||||
sleep_manager_unlock_deep_sleep();
|
||||
}
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::set_wake_time(uint64_t at)
|
||||
{
|
||||
// SysTimer must not be active - we must be in suspend state
|
||||
MBED_ASSERT(!_ticking);
|
||||
|
||||
// There is a potential race here, when called from outside
|
||||
// a critical section. See function documentation for notes on
|
||||
// handling it.
|
||||
if (core_util_atomic_load_bool(&_wake_time_set)) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Analyse the timers
|
||||
if (update_and_get_tick() >= at) {
|
||||
_wake_time_passed = true;
|
||||
return;
|
||||
}
|
||||
|
||||
uint64_t ticks_to_sleep = at - _tick;
|
||||
uint64_t wake_time = at * US_IN_TICK;
|
||||
|
||||
/* Set this first, before attaching the interrupt that can unset it */
|
||||
_wake_time_set = true;
|
||||
_wake_time_passed = false;
|
||||
|
||||
/* If deep sleep is unlocked, and we have enough time, let's go for it */
|
||||
if (MBED_CONF_TARGET_DEEP_SLEEP_LATENCY > 0 &&
|
||||
ticks_to_sleep > MBED_CONF_TARGET_DEEP_SLEEP_LATENCY &&
|
||||
sleep_manager_can_deep_sleep()) {
|
||||
/* Schedule the wake up interrupt early, allowing for the deep sleep latency */
|
||||
_wake_early = true;
|
||||
insert_absolute(wake_time - MBED_CONF_TARGET_DEEP_SLEEP_LATENCY * US_IN_TICK);
|
||||
} else {
|
||||
/* Otherwise, we'll set up for shallow sleep at the precise time.
|
||||
* To make absolutely sure it's shallow so we don't incur the latency,
|
||||
* take our own lock, to avoid a race on a thread unlocking it.
|
||||
*/
|
||||
_wake_early = false;
|
||||
if (MBED_CONF_TARGET_DEEP_SLEEP_LATENCY > 0 && !_deep_sleep_locked) {
|
||||
_deep_sleep_locked = true;
|
||||
sleep_manager_lock_deep_sleep();
|
||||
}
|
||||
insert_absolute(wake_time);
|
||||
}
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::cancel_wake()
|
||||
{
|
||||
MBED_ASSERT(!_ticking);
|
||||
// Remove ensures serialized access to SysTimer by stopping timer interrupt
|
||||
remove();
|
||||
|
||||
_wake_time_set = false;
|
||||
_wake_time_passed = false;
|
||||
|
||||
if (_deep_sleep_locked) {
|
||||
_deep_sleep_locked = false;
|
||||
sleep_manager_unlock_deep_sleep();
|
||||
}
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
uint64_t SysTimer<US_IN_TICK, IRQ>::_elapsed_ticks() const
|
||||
{
|
||||
uint64_t elapsed_us = ticker_read_us(_ticker_data) - _time_us;
|
||||
if (elapsed_us < US_IN_TICK) {
|
||||
return 0;
|
||||
} else if (elapsed_us < 2 * US_IN_TICK) {
|
||||
return 1;
|
||||
} else if (elapsed_us <= 0xFFFFFFFF) {
|
||||
// Fast common case avoiding 64-bit division
|
||||
return (uint32_t) elapsed_us / US_IN_TICK;
|
||||
} else {
|
||||
return elapsed_us / US_IN_TICK;
|
||||
}
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::start_tick()
|
||||
{
|
||||
_ticking = true;
|
||||
if (_unacknowledged_ticks > 0) {
|
||||
_set_irq_pending();
|
||||
}
|
||||
_schedule_tick();
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::_schedule_tick()
|
||||
{
|
||||
insert_absolute(_time_us + US_IN_TICK);
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::acknowledge_tick()
|
||||
{
|
||||
// Try to avoid missed ticks if OS's IRQ level is not keeping
|
||||
// up with our handler.
|
||||
// 8-bit counter to save space, and also make sure it we don't
|
||||
// try TOO hard to resync if something goes really awry -
|
||||
// resync will reset if the count hits 256.
|
||||
if (core_util_atomic_decr_u8(&_unacknowledged_ticks, 1) > 0) {
|
||||
_set_irq_pending();
|
||||
}
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::cancel_tick()
|
||||
{
|
||||
// Underlying call is interrupt safe
|
||||
|
||||
remove();
|
||||
_ticking = false;
|
||||
|
||||
_clear_irq_pending();
|
||||
}
|
||||
|
||||
uint32_t SysTimer::get_tick()
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
uint64_t SysTimer<US_IN_TICK, IRQ>::get_tick() const
|
||||
{
|
||||
return _tick & 0xFFFFFFFF;
|
||||
// Atomic is necessary as this can be called from any foreground context,
|
||||
// while IRQ can update it.
|
||||
return core_util_atomic_load_u64(&_tick);
|
||||
}
|
||||
|
||||
us_timestamp_t SysTimer::get_time()
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
uint64_t SysTimer<US_IN_TICK, IRQ>::update_and_get_tick()
|
||||
{
|
||||
MBED_ASSERT(!_ticking && !_wake_time_set);
|
||||
// Can only be used when no interrupts are scheduled
|
||||
// Update counters to reflect elapsed time
|
||||
uint64_t elapsed_ticks = _elapsed_ticks();
|
||||
_unacknowledged_ticks = 0;
|
||||
_time_us += elapsed_ticks * US_IN_TICK;
|
||||
_tick += elapsed_ticks;
|
||||
|
||||
return _tick;
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
us_timestamp_t SysTimer<US_IN_TICK, IRQ>::get_time() const
|
||||
{
|
||||
// Underlying call is interrupt safe
|
||||
|
||||
return ticker_read_us(_ticker_data);
|
||||
}
|
||||
|
||||
SysTimer::~SysTimer()
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
us_timestamp_t SysTimer<US_IN_TICK, IRQ>::get_time_since_tick() const
|
||||
{
|
||||
// Underlying call is interrupt safe, and _time_us is not updated by IRQ
|
||||
|
||||
return get_time() - _time_us;
|
||||
}
|
||||
|
||||
void SysTimer::_set_irq_pending()
|
||||
#if (defined(NO_SYSTICK))
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
IRQn_Type SysTimer<US_IN_TICK, IRQ>::get_irq_number()
|
||||
{
|
||||
return mbed_get_m0_tick_irqn();
|
||||
}
|
||||
#elif (TARGET_CORTEX_M)
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
IRQn_Type SysTimer<US_IN_TICK, IRQ>::get_irq_number()
|
||||
{
|
||||
return SysTick_IRQn;
|
||||
}
|
||||
#elif (TARGET_CORTEX_A)
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
IRQn_ID_t SysTimer<US_IN_TICK, IRQ>::get_irq_number()
|
||||
{
|
||||
return mbed_get_a9_tick_irqn();
|
||||
}
|
||||
#endif
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::_set_irq_pending()
|
||||
{
|
||||
// Protected function synchronized externally
|
||||
|
||||
if (!IRQ) {
|
||||
return;
|
||||
}
|
||||
#if (defined(NO_SYSTICK))
|
||||
NVIC_SetPendingIRQ(mbed_get_m0_tick_irqn());
|
||||
#elif (TARGET_CORTEX_A)
|
||||
IRQ_SetPending(mbed_get_a9_tick_irqn());
|
||||
#else
|
||||
#elif (TARGET_CORTEX_M)
|
||||
SCB->ICSR = SCB_ICSR_PENDSTSET_Msk;
|
||||
#else
|
||||
IRQ_SetPending(mbed_get_a9_tick_irqn());
|
||||
#endif
|
||||
}
|
||||
|
||||
void SysTimer::_increment_tick()
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::_clear_irq_pending()
|
||||
{
|
||||
// Protected function synchronized externally
|
||||
if (!IRQ) {
|
||||
return;
|
||||
}
|
||||
#if (defined(NO_SYSTICK))
|
||||
NVIC_ClearPendingIRQ(mbed_get_m0_tick_irqn());
|
||||
#elif (TARGET_CORTEX_M)
|
||||
SCB->ICSR = SCB_ICSR_PENDSTCLR_Msk;
|
||||
#else
|
||||
IRQ_ClearPending(mbed_get_a9_tick_irqn());
|
||||
#endif
|
||||
}
|
||||
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::_increment_tick()
|
||||
{
|
||||
// Protected function synchronized externally
|
||||
|
||||
|
@ -179,22 +309,49 @@ void SysTimer::_increment_tick()
|
|||
_time_us += US_IN_TICK;
|
||||
}
|
||||
|
||||
void SysTimer::handler()
|
||||
template<uint32_t US_IN_TICK, bool IRQ>
|
||||
void SysTimer<US_IN_TICK, IRQ>::handler()
|
||||
{
|
||||
core_util_critical_section_enter();
|
||||
|
||||
if (_suspended) {
|
||||
_suspend_time_passed = true;
|
||||
} else {
|
||||
/* To reduce IRQ latency problems, we do not re-arm in the interrupt handler */
|
||||
if (_wake_time_set) {
|
||||
_wake_time_set = false;
|
||||
if (!_wake_early) {
|
||||
_wake_time_passed = true;
|
||||
}
|
||||
/* If this was an early interrupt, user has the responsibility to check and
|
||||
* note the combination of (!set, !passed), and re-arm the wake timer if
|
||||
* necessary.
|
||||
*/
|
||||
} else if (_ticking) {
|
||||
_unacknowledged_ticks++;
|
||||
_set_irq_pending();
|
||||
_increment_tick();
|
||||
schedule_tick();
|
||||
// We do this now, rather than in acknowledgement, as we get it "for free"
|
||||
// here - because we're in the ticker handler, the programming gets deferred
|
||||
// until end of dispatch, and the ticker would likely be rescheduling
|
||||
// anyway after dispatch.
|
||||
|
||||
_schedule_tick();
|
||||
}
|
||||
|
||||
core_util_critical_section_exit();
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
#if MBED_CONF_RTOS_PRESENT
|
||||
/* Whatever the OS wants (in case it isn't 1ms) */
|
||||
MBED_STATIC_ASSERT(1000000 % OS_TICK_FREQ == 0, "OS_TICK_FREQ must be a divisor of 1000000 for correct tick calculations");
|
||||
#define OS_TICK_US (1000000 / OS_TICK_FREQ)
|
||||
#if OS_TICK_US != 1000
|
||||
template class SysTimer<OS_TICK_US>;
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#endif // MBED_TICKLESS
|
||||
/* Standard 1ms SysTimer */
|
||||
template class SysTimer<1000>;
|
||||
|
||||
/* Standard 1ms SysTimer that doesn't set interrupts, used for Greentea tests */
|
||||
template class SysTimer<1000, false>;
|
||||
|
||||
/* Slowed-down SysTimer that doesn't set interrupts, used for Greentea tests */
|
||||
template class SysTimer<42000, false>;
|
||||
|
||||
} // namespace internal
|
||||
} // namespace mbed
|
||||
|
|
|
@ -17,105 +17,223 @@
|
|||
#ifndef MBED_SYS_TIMER_H
|
||||
#define MBED_SYS_TIMER_H
|
||||
|
||||
#if MBED_TICKLESS || defined(DOXYGEN_ONLY)
|
||||
|
||||
#include "platform/NonCopyable.h"
|
||||
#include "platform/mbed_atomic.h"
|
||||
#include "drivers/TimerEvent.h"
|
||||
#include "cmsis.h"
|
||||
#if defined(TARGET_CORTEX_A)
|
||||
#include "irq_ctrl.h"
|
||||
#endif
|
||||
|
||||
namespace mbed {
|
||||
namespace internal {
|
||||
|
||||
/**
|
||||
* @cond RTOS_INTERNAL
|
||||
* @cond MBED_INTERNAL
|
||||
*
|
||||
* @addtogroup rtos
|
||||
* @addtogroup mbed
|
||||
* @{
|
||||
*
|
||||
* @defgroup rtos_SysTimer SysTimer class
|
||||
* @defgroup mbed_SysTimer SysTimer class
|
||||
* @{
|
||||
*/
|
||||
|
||||
/**
|
||||
* The SysTimer class is used exclusively by RTX idle loop in TICKLESS mode.
|
||||
* The SysTimer class is used to provide timing for system suspension, and
|
||||
* the idle loop in TICKLESS mode.
|
||||
*
|
||||
* @note SysTimer is not the part of Mbed RTOS API.
|
||||
* Template for speed for testing - only one instance will be used normally.
|
||||
*
|
||||
* @note SysTimer is not the part of Mbed API.
|
||||
*/
|
||||
class SysTimer: private mbed::TimerEvent, private mbed::NonCopyable<SysTimer> {
|
||||
template <uint32_t US_IN_TICK, bool IRQ = true>
|
||||
class SysTimer: private mbed::TimerEvent, private mbed::NonCopyable<SysTimer<US_IN_TICK, IRQ> > {
|
||||
public:
|
||||
|
||||
/**
|
||||
* Default constructor uses LPTICKER if available (so the timer will
|
||||
* continue to run in deep sleep), else USTICKER.
|
||||
*/
|
||||
SysTimer();
|
||||
|
||||
SysTimer(const ticker_data_t *data);
|
||||
virtual ~SysTimer();
|
||||
|
||||
~SysTimer();
|
||||
|
||||
/**
|
||||
* Enable an IRQ/SysTick with the correct priority.
|
||||
* Get the interrupt number for the tick
|
||||
*
|
||||
* @return interrupt number
|
||||
*/
|
||||
static void setup_irq();
|
||||
#if TARGET_CORTEX_A
|
||||
static IRQn_ID_t get_irq_number();
|
||||
#elif TARGET_CORTEX_M
|
||||
static IRQn_Type get_irq_number();
|
||||
#endif
|
||||
|
||||
/**
|
||||
* Set wakeup time and schedule a wakeup event after delta ticks
|
||||
* Set the wake time
|
||||
*
|
||||
* After suspend has been called the function suspend_time_passed
|
||||
* can be used to determine if the suspend time has passed.
|
||||
* Schedules an interrupt to cause wake-up in time for the event. Interrupt
|
||||
* may be arranged early to account for latency. If the time has already
|
||||
* passed, no interrupt will be scheduled.
|
||||
*
|
||||
* @param delta Ticks to remain suspended
|
||||
* This is called from outside a critical section, as it is known to be
|
||||
* a slow operation.
|
||||
*
|
||||
* If the wake time is already set, this is a no-op. But that check is racy,
|
||||
* which means wake_time_set() should be rechecked after taking a critical
|
||||
* section.
|
||||
*
|
||||
* As a side-effect, this clears the unacknowledged tick count - the caller
|
||||
* is expected to use update_and_get_tick() after the suspend operation.
|
||||
*
|
||||
* @param at Wake up tick
|
||||
* @warning If the ticker tick is already scheduled it needs to be cancelled first!
|
||||
*/
|
||||
void suspend(uint32_t delta);
|
||||
void set_wake_time(uint64_t at);
|
||||
|
||||
/**
|
||||
* Check if the suspend time has passed
|
||||
* Check whether the wake time has passed
|
||||
*
|
||||
* @return true if the specified number of ticks has passed otherwise false
|
||||
* This is a fast operation, based on checking whether the wake interrupt
|
||||
* has run.
|
||||
*
|
||||
* @return true if the specified wake tick has passed
|
||||
*/
|
||||
bool suspend_time_passed();
|
||||
bool wake_time_passed() const
|
||||
{
|
||||
return core_util_atomic_load_bool(&_wake_time_passed);
|
||||
}
|
||||
|
||||
/**
|
||||
* Exit suspend mode and return elapsed ticks
|
||||
* Check whether wake timer is active
|
||||
*
|
||||
* Due to a scheduling issue, the number of ticks returned is decremented
|
||||
* by 1 so that a handler can be called and update to the current value.
|
||||
* This allows scheduling restart successfully after the OS is resumed.
|
||||
*
|
||||
* @return the number of elapsed ticks minus 1
|
||||
* @return true if the wake timer is active.
|
||||
*/
|
||||
uint32_t resume();
|
||||
bool wake_time_set() const
|
||||
{
|
||||
return core_util_atomic_load_bool(&_wake_time_set);
|
||||
}
|
||||
|
||||
/**
|
||||
* Cancel any pending wake
|
||||
*/
|
||||
void cancel_wake();
|
||||
|
||||
/**
|
||||
* Schedule an os tick to fire
|
||||
*
|
||||
* @param delta Tick to fire at relative to current tick
|
||||
* Ticks will be rescheduled automatically every tick until cancel_tick is called.
|
||||
*
|
||||
* A tick will be fired immediately if there are any unacknowledged ticks.
|
||||
*
|
||||
* @warning If a tick is already scheduled it needs to be cancelled first!
|
||||
*/
|
||||
void schedule_tick(uint32_t delta = 1);
|
||||
void start_tick();
|
||||
|
||||
/**
|
||||
* Prevent any scheduled ticks from triggering
|
||||
* Acknowledge an os tick
|
||||
*
|
||||
* This will queue another os tick immediately if the os is running slow
|
||||
*/
|
||||
void acknowledge_tick();
|
||||
|
||||
/**
|
||||
* Prevent any more scheduled ticks from triggering
|
||||
*
|
||||
* If called from OS tick context, there may be remaining unacknowledged ticks.
|
||||
*/
|
||||
void cancel_tick();
|
||||
|
||||
/**
|
||||
* Check whether ticker is active
|
||||
*
|
||||
* Each time the tick interrupt fires, it is automatically rescheduled,
|
||||
* so this will remain true once the tick is started, except during
|
||||
* processing.
|
||||
*
|
||||
* @return true if the ticker is active.
|
||||
*/
|
||||
bool ticking() const
|
||||
{
|
||||
return core_util_atomic_load_bool(&_ticking);
|
||||
}
|
||||
|
||||
/**
|
||||
* Check unacknowledged ticks
|
||||
*
|
||||
* Returns the count of how many times the OS timer has been queued minus
|
||||
* the number of times is has been acknowledged.
|
||||
*
|
||||
* get_tick() - unacknowledged_ticks() should equal the OS's tick count,
|
||||
* although such a calculation is not atomic if the ticker is currently running.
|
||||
*
|
||||
* @return number of unacknowledged ticks
|
||||
*/
|
||||
int unacknowledged_ticks() const
|
||||
{
|
||||
return core_util_atomic_load_u8(&_unacknowledged_ticks);
|
||||
}
|
||||
|
||||
/** Get the current tick count
|
||||
*
|
||||
* @return The number of ticks since timer creation. For the os_timer this
|
||||
* should match RTX's tick count (the number of ticks since boot).
|
||||
* This count is updated by the ticker interrupt, if the ticker interrupt
|
||||
* is running. It the ticker interrupt is not running, update_and_get_tick()
|
||||
* should be used instead.
|
||||
*
|
||||
* This indicates how many ticks have been generated by the tick interrupt.
|
||||
* The os_timer should equal this number minus the number of unacknowledged ticks.
|
||||
*
|
||||
* @return The number of ticks since timer creation.
|
||||
*/
|
||||
uint32_t get_tick();
|
||||
uint64_t get_tick() const;
|
||||
|
||||
/** Update and get the current tick count
|
||||
*
|
||||
* This is a slow operation that reads the timer and adjusts for elapsed time.
|
||||
* Can only be used when the ticker is not running, as there is no IRQ
|
||||
* synchronization.
|
||||
*
|
||||
* This clears the unacknowledged tick counter - the caller is assumed to update
|
||||
* their timer based on this return.
|
||||
*
|
||||
* @return The number of ticks since timer creation.
|
||||
*/
|
||||
uint64_t update_and_get_tick();
|
||||
|
||||
/**
|
||||
* Returns time since last tick
|
||||
*
|
||||
* @return Relative time in microseconds
|
||||
*/
|
||||
us_timestamp_t get_time_since_tick() const;
|
||||
|
||||
/**
|
||||
* Get the time
|
||||
*
|
||||
* Returns the instantaneous precision time from underlying timer.
|
||||
* This is a slow operation so should not be called from critical sections.
|
||||
*
|
||||
* @return Current time in microseconds
|
||||
*/
|
||||
us_timestamp_t get_time();
|
||||
us_timestamp_t get_time() const;
|
||||
|
||||
protected:
|
||||
virtual void handler();
|
||||
void _increment_tick();
|
||||
void _schedule_tick();
|
||||
uint64_t _elapsed_ticks() const;
|
||||
static void _set_irq_pending();
|
||||
static void _clear_irq_pending();
|
||||
us_timestamp_t _time_us;
|
||||
uint64_t _tick;
|
||||
bool _suspend_time_passed;
|
||||
bool _suspended;
|
||||
uint8_t _unacknowledged_ticks;
|
||||
bool _wake_time_set;
|
||||
bool _wake_time_passed;
|
||||
bool _wake_early;
|
||||
bool _ticking;
|
||||
bool _deep_sleep_locked;
|
||||
};
|
||||
|
||||
/**
|
||||
|
@ -128,5 +246,3 @@ protected:
|
|||
}
|
||||
|
||||
#endif
|
||||
|
||||
#endif
|
||||
|
|
|
@ -0,0 +1,252 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2019, ARM Limited, All Rights Reserved
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#include "platform/mbed_power_mgmt.h"
|
||||
#include "platform/mbed_os_timer.h"
|
||||
#include "platform/CriticalSectionLock.h"
|
||||
#include "platform/SysTimer.h"
|
||||
#include "us_ticker_api.h"
|
||||
#include "lp_ticker_api.h"
|
||||
#include "mbed_critical.h"
|
||||
#include "mbed_assert.h"
|
||||
#include <new>
|
||||
|
||||
/* This provides the marshalling point for a system global SysTimer, which
|
||||
* is used to provide:
|
||||
* - timed sleeps (for default idle hook in RTOS tickless mode, or non-RTOS sleeps)
|
||||
* - regular ticks for RTOS
|
||||
* - absolute system timing (directly for non-RTOS, or indirectly via RTOS tick count)
|
||||
*/
|
||||
|
||||
namespace mbed {
|
||||
namespace internal {
|
||||
|
||||
OsTimer *os_timer;
|
||||
|
||||
namespace {
|
||||
uint64_t os_timer_data[(sizeof(OsTimer) + 7) / 8];
|
||||
}
|
||||
|
||||
OsTimer *init_os_timer()
|
||||
{
|
||||
// Do not use SingletonPtr since this relies on the RTOS.
|
||||
// Locking not required as it will be first called during
|
||||
// OS init, or else we're a non-RTOS single-threaded setup.
|
||||
if (!os_timer) {
|
||||
#if MBED_CONF_TARGET_TICKLESS_FROM_US_TICKER && DEVICE_USTICKER
|
||||
os_timer = new (os_timer_data) OsTimer(get_us_ticker_data());
|
||||
#elif !MBED_CONF_TARGET_TICKLESS_FROM_US_TICKER && DEVICE_LPTICKER
|
||||
os_timer = new (os_timer_data) OsTimer(get_lp_ticker_data());
|
||||
#else
|
||||
MBED_ASSERT("OS timer not available - check MBED_CONF_TARGET_TICKLESS_FROM_US_TICKER" && false);
|
||||
return NULL;
|
||||
#endif
|
||||
//os_timer->setup_irq();
|
||||
}
|
||||
|
||||
return os_timer;
|
||||
}
|
||||
|
||||
/* These traits classes are designed to permit chunks of code to be
|
||||
* omitted - in particular eliminating timers. However, we don't want
|
||||
* to cause template bloat, so don't have too many traits variants.
|
||||
*/
|
||||
|
||||
/* Optionally timed operation, with optional predicate */
|
||||
struct timed_predicate_op {
|
||||
timed_predicate_op(uint64_t t) : wake_time(t), orig_predicate(NULL), orig_handle(NULL)
|
||||
{
|
||||
init_os_timer();
|
||||
}
|
||||
|
||||
timed_predicate_op(uint64_t t, bool (*wake_predicate)(void *), void *wake_predicate_handle) : wake_time(t), orig_predicate(wake_predicate), orig_handle(wake_predicate_handle)
|
||||
{
|
||||
init_os_timer();
|
||||
}
|
||||
|
||||
~timed_predicate_op()
|
||||
{
|
||||
// Make sure wake timer is cancelled. (It may or may not be, depending on
|
||||
// why we woke).
|
||||
os_timer->cancel_wake();
|
||||
}
|
||||
|
||||
bool wake_condition() const
|
||||
{
|
||||
return (orig_predicate && orig_predicate(orig_handle)) || os_timer->wake_time_passed();
|
||||
}
|
||||
|
||||
void sleep_prepare()
|
||||
{
|
||||
if (wake_time != (uint64_t) -1) {
|
||||
os_timer->set_wake_time(wake_time);
|
||||
}
|
||||
}
|
||||
|
||||
bool sleep_prepared()
|
||||
{
|
||||
return wake_time == (uint64_t) -1 || os_timer->wake_time_set();
|
||||
}
|
||||
|
||||
private:
|
||||
uint64_t wake_time;
|
||||
bool (*orig_predicate)(void *);
|
||||
void *orig_handle;
|
||||
};
|
||||
|
||||
/* Untimed operation with predicate */
|
||||
struct untimed_op {
|
||||
untimed_op(bool (*wake_predicate)(void *), void *wake_predicate_handle) : orig_predicate(wake_predicate), orig_handle(wake_predicate_handle)
|
||||
{
|
||||
}
|
||||
|
||||
bool wake_condition() const
|
||||
{
|
||||
return orig_predicate(orig_handle);
|
||||
}
|
||||
|
||||
void sleep_prepare()
|
||||
{
|
||||
}
|
||||
|
||||
bool sleep_prepared()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
private:
|
||||
bool (*orig_predicate)(void *);
|
||||
void *orig_handle;
|
||||
};
|
||||
|
||||
/* We require that this is called from thread context, outside a critical section,
|
||||
* and the kernel already suspended if an RTOS, meaning we don't have to worry
|
||||
* about any potential threading issues.
|
||||
*
|
||||
* The wake predicate will be called from both outside and inside a critical
|
||||
* section, so appropriate atomic care must be taken.
|
||||
*/
|
||||
template <class OpT>
|
||||
void do_sleep_operation(OpT &op)
|
||||
{
|
||||
// We assume the ticker is not already in use - without RTOS, it
|
||||
// is never used, with RTOS, it will have been disabled with OS_Tick_Disable
|
||||
while (!op.wake_condition()) {
|
||||
// Set (or re-set) the wake time - outside a critical section, as
|
||||
// it could take long enough to cause UART data loss on some platforms.
|
||||
op.sleep_prepare();
|
||||
|
||||
// If no target sleep function, nothing else to do - just keep
|
||||
// rechecking the wake condition.
|
||||
#if DEVICE_SLEEP
|
||||
// Now we need to enter the critical section for the race-free sleep
|
||||
{
|
||||
CriticalSectionLock lock;
|
||||
|
||||
// Recheck wake conditions before starting sleep, avoiding race
|
||||
if (op.wake_condition()) {
|
||||
break;
|
||||
}
|
||||
|
||||
// It's possible that an intermediate wake interrupt occurred
|
||||
// between "set_wake_time" and the critical lock - only sleep
|
||||
// if we see that the timer is armed or we don't need it. Otherwise,
|
||||
// we go round to set the timer again.
|
||||
if (op.sleep_prepared()) {
|
||||
// Enter HAL sleep (normal or deep)
|
||||
sleep();
|
||||
}
|
||||
}
|
||||
|
||||
// Ensure interrupts get a chance to fire, which allows new result from
|
||||
// wake_predicate() and wake_time_passed()
|
||||
__ISB();
|
||||
#endif
|
||||
}
|
||||
}
|
||||
|
||||
/* We require that this is called from thread context, outside a critical section,
|
||||
* and the kernel already suspended if an RTOS, meaning we don't have to worry
|
||||
* about any potential threading issues.
|
||||
*
|
||||
* The wake predicate will be called from both outside and inside a critical
|
||||
* section, so appropriate atomic care must be taken.
|
||||
*/
|
||||
uint64_t do_timed_sleep_absolute(uint64_t wake_time, bool (*wake_predicate)(void *), void *wake_predicate_handle)
|
||||
{
|
||||
{
|
||||
timed_predicate_op op(wake_time, wake_predicate, wake_predicate_handle);
|
||||
do_sleep_operation(op);
|
||||
}
|
||||
|
||||
return os_timer->update_and_get_tick();
|
||||
}
|
||||
|
||||
|
||||
#if MBED_CONF_RTOS_PRESENT
|
||||
/* The 32-bit limit is part of the API - we will always wake within 2^32 ticks */
|
||||
/* This version is tuned for RTOS use, where the RTOS needs to know the time spent sleeping */
|
||||
uint32_t do_timed_sleep_relative(uint32_t wake_delay, bool (*wake_predicate)(void *), void *wake_predicate_handle)
|
||||
{
|
||||
uint64_t sleep_start = init_os_timer()->get_tick();
|
||||
// When running with RTOS, the requested delay will be based on the kernel's tick count.
|
||||
// If it missed a tick as entering idle, we should reflect that by moving the
|
||||
// start time back to reflect its current idea of time.
|
||||
// Example: OS tick count = 100, our tick count = 101, requested delay = 50
|
||||
// We need to schedule wake for tick 150, report 50 ticks back to our caller, and
|
||||
// clear the unacknowledged tick count.
|
||||
sleep_start -= os_timer->unacknowledged_ticks();
|
||||
|
||||
uint64_t sleep_finish = do_timed_sleep_absolute(sleep_start + wake_delay, wake_predicate, wake_predicate_handle);
|
||||
|
||||
return static_cast<uint32_t>(sleep_finish - sleep_start);
|
||||
}
|
||||
|
||||
#else
|
||||
|
||||
void do_untimed_sleep(bool (*wake_predicate)(void *), void *wake_predicate_handle)
|
||||
{
|
||||
untimed_op op(wake_predicate, wake_predicate_handle);
|
||||
|
||||
do_sleep_operation(op);
|
||||
}
|
||||
|
||||
/* (uint32_t)-1 delay is treated as "wait forever" */
|
||||
/* This version is tuned for non-RTOS use, where we don't need to return sleep time, and waiting forever is possible */
|
||||
void do_timed_sleep_relative_or_forever(uint32_t wake_delay, bool (*wake_predicate)(void *), void *wake_predicate_handle)
|
||||
{
|
||||
// Special-case 0 delay, to save multiple callers having to do it. Just call the predicate once.
|
||||
if (wake_delay == 0) {
|
||||
wake_predicate(wake_predicate_handle);
|
||||
return;
|
||||
}
|
||||
|
||||
uint64_t wake_time;
|
||||
if (wake_delay == (uint32_t) -1) {
|
||||
wake_time = (uint64_t) -1;
|
||||
} else {
|
||||
wake_time = init_os_timer()->update_and_get_tick() + wake_delay;
|
||||
}
|
||||
/* Always use timed_predicate_op here to save pulling in two templates */
|
||||
timed_predicate_op op(wake_time, wake_predicate, wake_predicate_handle);
|
||||
do_sleep_operation(op);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
||||
} // namespace internal
|
||||
} // namespace mbed
|
|
@ -0,0 +1,64 @@
|
|||
/*
|
||||
* Copyright (c) 2006-2019, ARM Limited, All Rights Reserved
|
||||
* SPDX-License-Identifier: Apache-2.0
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
#ifndef MBED_MBED_SLEEP_TIMER_H
|
||||
#define MBED_MBED_SLEEP_TIMER_H
|
||||
|
||||
#include "platform/SysTimer.h"
|
||||
|
||||
#if MBED_CONF_RTOS_PRESENT
|
||||
extern "C" {
|
||||
#include "rtx_lib.h"
|
||||
}
|
||||
#endif
|
||||
|
||||
namespace mbed {
|
||||
namespace internal {
|
||||
|
||||
#if MBED_CONF_RTOS_PRESENT
|
||||
#define OS_TICK_US (1000000 / OS_TICK_FREQ)
|
||||
#else
|
||||
#define OS_TICK_US 1000
|
||||
#endif
|
||||
typedef SysTimer<OS_TICK_US> OsTimer;
|
||||
|
||||
/* A SysTimer is used to provide the timed sleep - this provides access to share it for
|
||||
* other use, such as ticks. If accessed this way, it must not be in use when a sleep function below is called.
|
||||
*/
|
||||
extern OsTimer *os_timer;
|
||||
OsTimer *init_os_timer();
|
||||
|
||||
/* -1 is effectively "sleep forever" */
|
||||
uint64_t do_timed_sleep_absolute(uint64_t wake_time, bool (*wake_predicate)(void *) = NULL, void *wake_predicate_handle = NULL);
|
||||
|
||||
#if MBED_CONF_RTOS_PRESENT
|
||||
/* Maximum sleep time is 2^32-1 ticks; timer is always set to achieve this */
|
||||
/* Assumes that ticker has been in use prior to call, so restricted to RTOS use */
|
||||
uint32_t do_timed_sleep_relative(uint32_t wake_delay, bool (*wake_predicate)(void *) = NULL, void *wake_predicate_handle = NULL);
|
||||
#else
|
||||
|
||||
void do_untimed_sleep(bool (*wake_predicate)(void *), void *wake_predicate_handle = NULL);
|
||||
|
||||
/* (uint32_t)-1 delay is sleep forever */
|
||||
|
||||
void do_timed_sleep_relative_or_forever(uint32_t wake_delay, bool (*wake_predicate)(void *) = NULL, void *wake_predicate_handle = NULL);
|
||||
|
||||
#endif
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
#endif
|
|
@ -22,9 +22,8 @@
|
|||
|
||||
#include "rtos/rtos_idle.h"
|
||||
#include "platform/mbed_power_mgmt.h"
|
||||
#include "platform/mbed_os_timer.h"
|
||||
#include "TimerEvent.h"
|
||||
#include "lp_ticker_api.h"
|
||||
#include "us_ticker_api.h"
|
||||
#include "mbed_critical.h"
|
||||
#include "mbed_assert.h"
|
||||
#include <new>
|
||||
|
@ -34,7 +33,8 @@
|
|||
extern "C" {
|
||||
#include "rtx_lib.h"
|
||||
|
||||
using namespace mbed;
|
||||
using mbed::internal::os_timer;
|
||||
using mbed::internal::OsTimer;
|
||||
|
||||
#ifdef MBED_TICKLESS
|
||||
|
||||
|
@ -46,26 +46,39 @@ extern "C" {
|
|||
#error Low power ticker required when MBED_CONF_TARGET_TICKLESS_FROM_US_TICKER is false
|
||||
#endif
|
||||
|
||||
#include "platform/SysTimer.h"
|
||||
// Setup OS Tick timer to generate periodic RTOS Kernel Ticks
|
||||
int32_t OS_Tick_Setup(uint32_t freq, IRQHandler_t handler)
|
||||
{
|
||||
MBED_ASSERT(freq == 1000);
|
||||
|
||||
static mbed::internal::SysTimer *os_timer;
|
||||
static uint64_t os_timer_data[sizeof(mbed::internal::SysTimer) / 8];
|
||||
#ifdef TARGET_CORTEX_A
|
||||
IRQn_ID_t irq = OsTimer::get_irq_number();
|
||||
|
||||
IRQ_SetPriority(irq, 0xFF);
|
||||
IRQ_SetHandler(irq, handler);
|
||||
IRQ_EnableIRQ(irq);
|
||||
#else
|
||||
IRQn_Type irq = OsTimer::get_irq_number();
|
||||
|
||||
NVIC_SetPriority(irq, 0xFF);
|
||||
#ifdef NVIC_RAM_VECTOR_ADDRESS
|
||||
NVIC_SetVector(irq, (uint32_t)handler);
|
||||
#else
|
||||
MBED_ASSERT(handler == (IRQHandler_t)NVIC_GetVector(irq));
|
||||
#endif
|
||||
if (irq >= 0) {
|
||||
NVIC_EnableIRQ(irq);
|
||||
}
|
||||
#endif
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Enable System Timer.
|
||||
void OS_Tick_Enable(void)
|
||||
{
|
||||
// Do not use SingletonPtr since this relies on the RTOS
|
||||
if (NULL == os_timer) {
|
||||
#if MBED_CONF_TARGET_TICKLESS_FROM_US_TICKER
|
||||
os_timer = new (os_timer_data) mbed::internal::SysTimer(get_us_ticker_data());
|
||||
#else
|
||||
os_timer = new (os_timer_data) mbed::internal::SysTimer(get_lp_ticker_data());
|
||||
#endif
|
||||
os_timer->setup_irq();
|
||||
}
|
||||
|
||||
// set to fire interrupt on next tick
|
||||
os_timer->schedule_tick();
|
||||
mbed::internal::init_os_timer()->start_tick();
|
||||
}
|
||||
|
||||
// Disable System Timer.
|
||||
|
@ -77,66 +90,54 @@ extern "C" {
|
|||
// Acknowledge System Timer IRQ.
|
||||
void OS_Tick_AcknowledgeIRQ(void)
|
||||
{
|
||||
|
||||
os_timer->acknowledge_tick();
|
||||
}
|
||||
|
||||
// Get System Timer count.
|
||||
uint32_t OS_Tick_GetCount(void)
|
||||
{
|
||||
return os_timer->get_time() & 0xFFFFFFFF;
|
||||
return (uint32_t) os_timer->get_time_since_tick();
|
||||
}
|
||||
|
||||
// Get OS Tick IRQ number.
|
||||
int32_t OS_Tick_GetIRQn(void)
|
||||
{
|
||||
return -1;
|
||||
return os_timer->get_irq_number();
|
||||
}
|
||||
|
||||
// Get OS Tick overflow status.
|
||||
uint32_t OS_Tick_GetOverflow(void)
|
||||
{
|
||||
// No need to indicate overflow - we let OS_Tick_GetCount overflow above
|
||||
// OS_Tick_GetInterval.
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Get OS Tick timer clock frequency
|
||||
uint32_t OS_Tick_GetClock(void)
|
||||
{
|
||||
return 1000000;
|
||||
}
|
||||
|
||||
// Get OS Tick interval.
|
||||
uint32_t OS_Tick_GetInterval(void)
|
||||
{
|
||||
return 1000;
|
||||
}
|
||||
|
||||
static bool rtos_event_pending(void *)
|
||||
{
|
||||
return core_util_atomic_load_u8(&osRtxInfo.kernel.pendSV);
|
||||
}
|
||||
|
||||
static void default_idle_hook(void)
|
||||
{
|
||||
uint32_t ticks_to_sleep = osKernelSuspend();
|
||||
const bool block_deep_sleep = MBED_CONF_TARGET_TICKLESS_FROM_US_TICKER ||
|
||||
(ticks_to_sleep <= MBED_CONF_TARGET_DEEP_SLEEP_LATENCY);
|
||||
|
||||
if (block_deep_sleep) {
|
||||
sleep_manager_lock_deep_sleep();
|
||||
} else {
|
||||
ticks_to_sleep -= MBED_CONF_TARGET_DEEP_SLEEP_LATENCY;
|
||||
}
|
||||
os_timer->suspend(ticks_to_sleep);
|
||||
|
||||
bool event_pending = false;
|
||||
while (!os_timer->suspend_time_passed() && !event_pending) {
|
||||
|
||||
core_util_critical_section_enter();
|
||||
if (osRtxInfo.kernel.pendSV) {
|
||||
event_pending = true;
|
||||
} else {
|
||||
sleep();
|
||||
}
|
||||
core_util_critical_section_exit();
|
||||
|
||||
// Ensure interrupts get a chance to fire
|
||||
__ISB();
|
||||
}
|
||||
|
||||
if (block_deep_sleep) {
|
||||
sleep_manager_unlock_deep_sleep();
|
||||
}
|
||||
|
||||
osKernelResume(os_timer->resume());
|
||||
// osKernelSuspend will call OS_Tick_Disable, cancelling the tick, which frees
|
||||
// up the os timer for the timed sleep
|
||||
uint64_t ticks_slept = mbed::internal::do_timed_sleep_relative(ticks_to_sleep, rtos_event_pending);
|
||||
MBED_ASSERT(ticks_slept < osWaitForever);
|
||||
osKernelResume((uint32_t) ticks_slept);
|
||||
}
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue