equeue: add user allocated event support

Allow posting events allocated outside queue memory
pull/11342/head
Maciej Bocianski 2019-08-12 15:56:17 +02:00
parent 95fc8cfa2c
commit 66da694625
2 changed files with 121 additions and 24 deletions

View File

@ -176,6 +176,17 @@ void equeue_event_dtor(void *event, void (*dtor)(void *));
// be passed to equeue_cancel.
int equeue_post(equeue_t *queue, void (*cb)(void *), void *event);
// Post an user allocated event onto the event queue
//
// The equeue_post_user_allocated function takes a callback and a pointer
// to an event allocated by user. The specified callback will be executed
// in the context of the event queue's dispatch loop with the allocated
// event as its argument.
//
// The equeue_post_user_allocated function is irq safe and can act as
// a mechanism for moving events out of irq contexts.
void equeue_post_user_allocated(equeue_t *queue, void (*cb)(void *), void *event);
// Cancel an in-flight event
//
// Attempts to cancel an event referenced by the unique id returned from
@ -191,6 +202,20 @@ int equeue_post(equeue_t *queue, void (*cb)(void *), void *event);
// Returning false if invalid id or already started executing.
bool equeue_cancel(equeue_t *queue, int id);
// Cancel an in-flight user allocated event
//
// Attempts to cancel an event referenced by its address.
// It is safe to call equeue_cancel_user_allocated after an event
// has already been dispatched.
//
// The equeue_cancel_user_allocated function is irq safe.
//
// If called while the event queue's dispatch loop is active,
// equeue_cancel_user_allocated does not guarantee that the event
// will not not execute after it returns as the event may have
// already begun executing.
bool equeue_cancel_user_allocated(equeue_t *queue, void *event);
// Query how much time is left for delayed event
//
// If event is delayed, this function can be used to query how much time
@ -200,6 +225,15 @@ bool equeue_cancel(equeue_t *queue, int id);
//
int equeue_timeleft(equeue_t *q, int id);
// Query how much time is left for delayed user allocated event
//
// If event is delayed, this function can be used to query how much time
// is left until the event is due to be dispatched.
//
// This function is irq safe.
//
int equeue_timeleft_user_allocated(equeue_t *q, void *event);
// Background an event queue onto a single-shot timer
//
// The provided update function will be called to indicate when the queue

View File

@ -21,6 +21,9 @@
#include <stdint.h>
#include <string.h>
// check if the event is allocaded by user - event address is outside queues internal buffer address range
#define EQUEUE_IS_USER_ALLOCATED_EVENT(e) (((uintptr_t)(e) < (uintptr_t)q->buffer) || ((uintptr_t)(e) > ((uintptr_t)q->slab.data)))
// calculate the relative-difference between absolute times while
// correctly handling overflow conditions
static inline int equeue_tickdiff(unsigned a, unsigned b)
@ -64,9 +67,15 @@ int equeue_create_inplace(equeue_t *q, size_t size, void *buffer)
{
// setup queue around provided buffer
// ensure buffer and size are aligned
q->buffer = (void *)(((uintptr_t) buffer + sizeof(void *) -1) & ~(sizeof(void *) -1));
size -= (char *) q->buffer - (char *) buffer;
size &= ~(sizeof(void *) -1);
if (size >= sizeof(void *)) {
q->buffer = (void *)(((uintptr_t) buffer + sizeof(void *) -1) & ~(sizeof(void *) -1));
size -= (char *) q->buffer - (char *) buffer;
size &= ~(sizeof(void *) -1);
} else {
// don't align when size less then pointer size
// e.g. static queue (size == 1)
q->buffer = buffer;
}
q->allocated = 0;
@ -220,15 +229,13 @@ void equeue_dealloc(equeue_t *q, void *p)
e->dtor(e + 1);
}
equeue_mem_dealloc(q, e);
if (!EQUEUE_IS_USER_ALLOCATED_EVENT(e)) {
equeue_mem_dealloc(q, e);
}
}
// equeue scheduling functions
static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
void equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
{
// setup event and hash local id with buffer offset for unique id
int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer);
e->target = tick + equeue_clampdiff(e->target, tick);
e->generation = q->generation;
@ -254,7 +261,6 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
if (e->next) {
e->next->ref = &e->next;
}
e->sibling = 0;
}
@ -267,24 +273,19 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
q->background.update(q->background.timer,
equeue_clampdiff(e->target, tick));
}
equeue_mutex_unlock(&q->queuelock);
return id;
}
static struct equeue_event *equeue_unqueue(equeue_t *q, int id)
// equeue scheduling functions
static int equeue_event_id(equeue_t *q, struct equeue_event *e)
{
// decode event from unique id and check that the local id matches
struct equeue_event *e = (struct equeue_event *)
&q->buffer[id & ((1 << q->npw2) - 1)];
// setup event and hash local id with buffer offset for unique id
return ((e->id << q->npw2) | ((unsigned char *)e - q->buffer));
}
static struct equeue_event *equeue_unqueue_by_address(equeue_t *q, struct equeue_event *e)
{
equeue_mutex_lock(&q->queuelock);
if (e->id != id >> q->npw2) {
equeue_mutex_unlock(&q->queuelock);
return 0;
}
// clear the event and check if already in-flight
e->cb = 0;
e->period = -1;
@ -310,6 +311,26 @@ static struct equeue_event *equeue_unqueue(equeue_t *q, int id)
e->next->ref = e->ref;
}
}
equeue_mutex_unlock(&q->queuelock);
return e;
}
static struct equeue_event *equeue_unqueue_by_id(equeue_t *q, int id)
{
// decode event from unique id and check that the local id matches
struct equeue_event *e = (struct equeue_event *)
&q->buffer[id & ((1 << q->npw2) - 1)];
equeue_mutex_lock(&q->queuelock);
if (e->id != id >> q->npw2) {
equeue_mutex_unlock(&q->queuelock);
return 0;
}
if (0 == equeue_unqueue_by_address(q, e)) {
equeue_mutex_unlock(&q->queuelock);
return 0;
}
equeue_incid(q, e);
equeue_mutex_unlock(&q->queuelock);
@ -369,18 +390,30 @@ int equeue_post(equeue_t *q, void (*cb)(void *), void *p)
e->cb = cb;
e->target = tick + e->target;
int id = equeue_enqueue(q, e, tick);
equeue_enqueue(q, e, tick);
int id = equeue_event_id(q, e);
equeue_sema_signal(&q->eventsema);
return id;
}
void equeue_post_user_allocated(equeue_t *q, void (*cb)(void *), void *p)
{
struct equeue_event *e = (struct equeue_event *)p;
unsigned tick = equeue_tick();
e->cb = cb;
e->target = tick + e->target;
equeue_enqueue(q, e, tick);
equeue_sema_signal(&q->eventsema);
}
bool equeue_cancel(equeue_t *q, int id)
{
if (!id) {
return false;
}
struct equeue_event *e = equeue_unqueue(q, id);
struct equeue_event *e = equeue_unqueue_by_id(q, id);
if (e) {
equeue_dealloc(q, e + 1);
return true;
@ -389,6 +422,21 @@ bool equeue_cancel(equeue_t *q, int id)
}
}
bool equeue_cancel_user_allocated(equeue_t *q, void *e)
{
if (!e) {
return false;
}
struct equeue_event *_e = equeue_unqueue_by_address(q, e);
if (_e) {
equeue_dealloc(q, _e + 1);
return true;
} else {
return false;
}
}
int equeue_timeleft(equeue_t *q, int id)
{
int ret = -1;
@ -409,6 +457,21 @@ int equeue_timeleft(equeue_t *q, int id)
return ret;
}
int equeue_timeleft_user_allocated(equeue_t *q, void *e)
{
int ret = -1;
if (!e) {
return -1;
}
struct equeue_event *_e = (struct equeue_event *)e;
equeue_mutex_lock(&q->queuelock);
ret = equeue_clampdiff(_e->target, equeue_tick());
equeue_mutex_unlock(&q->queuelock);
return ret;
}
void equeue_break(equeue_t *q)
{
equeue_mutex_lock(&q->queuelock);