mirror of https://github.com/ARMmbed/mbed-os.git
events: fix astyle
parent
62148b973e
commit
08b94aa193
|
@ -23,19 +23,22 @@
|
|||
|
||||
// calculate the relative-difference between absolute times while
|
||||
// correctly handling overflow conditions
|
||||
static inline int equeue_tickdiff(unsigned a, unsigned b) {
|
||||
static inline int equeue_tickdiff(unsigned a, unsigned b)
|
||||
{
|
||||
return (int)(unsigned)(a - b);
|
||||
}
|
||||
|
||||
// calculate the relative-difference between absolute times, but
|
||||
// also clamp to zero, resulting in only non-zero values.
|
||||
static inline int equeue_clampdiff(unsigned a, unsigned b) {
|
||||
static inline int equeue_clampdiff(unsigned a, unsigned b)
|
||||
{
|
||||
int diff = equeue_tickdiff(a, b);
|
||||
return ~(diff >> (8*sizeof(int)-1)) & diff;
|
||||
return ~(diff >> (8 * sizeof(int) -1)) & diff;
|
||||
}
|
||||
|
||||
// Increment the unique id in an event, hiding the event from cancel
|
||||
static inline void equeue_incid(equeue_t *q, struct equeue_event *e) {
|
||||
static inline void equeue_incid(equeue_t *q, struct equeue_event *e)
|
||||
{
|
||||
e->id += 1;
|
||||
if ((e->id << q->npw2) == 0) {
|
||||
e->id = 1;
|
||||
|
@ -44,7 +47,8 @@ static inline void equeue_incid(equeue_t *q, struct equeue_event *e) {
|
|||
|
||||
|
||||
// equeue lifetime management
|
||||
int equeue_create(equeue_t *q, size_t size) {
|
||||
int equeue_create(equeue_t *q, size_t size)
|
||||
{
|
||||
// dynamically allocate the specified buffer
|
||||
void *buffer = malloc(size);
|
||||
if (!buffer) {
|
||||
|
@ -56,7 +60,8 @@ int equeue_create(equeue_t *q, size_t size) {
|
|||
return err;
|
||||
}
|
||||
|
||||
int equeue_create_inplace(equeue_t *q, size_t size, void *buffer) {
|
||||
int equeue_create_inplace(equeue_t *q, size_t size, void *buffer)
|
||||
{
|
||||
// setup queue around provided buffer
|
||||
q->buffer = buffer;
|
||||
q->allocated = 0;
|
||||
|
@ -99,7 +104,8 @@ int equeue_create_inplace(equeue_t *q, size_t size, void *buffer) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void equeue_destroy(equeue_t *q) {
|
||||
void equeue_destroy(equeue_t *q)
|
||||
{
|
||||
// call destructors on pending events
|
||||
for (struct equeue_event *es = q->queue; es; es = es->next) {
|
||||
for (struct equeue_event *e = q->queue; e; e = e->sibling) {
|
||||
|
@ -123,10 +129,11 @@ void equeue_destroy(equeue_t *q) {
|
|||
|
||||
|
||||
// equeue chunk allocation functions
|
||||
static struct equeue_event *equeue_mem_alloc(equeue_t *q, size_t size) {
|
||||
static struct equeue_event *equeue_mem_alloc(equeue_t *q, size_t size)
|
||||
{
|
||||
// add event overhead
|
||||
size += sizeof(struct equeue_event);
|
||||
size = (size + sizeof(void*)-1) & ~(sizeof(void*)-1);
|
||||
size = (size + sizeof(void *) -1) & ~(sizeof(void *) -1);
|
||||
|
||||
equeue_mutex_lock(&q->memlock);
|
||||
|
||||
|
@ -162,7 +169,8 @@ static struct equeue_event *equeue_mem_alloc(equeue_t *q, size_t size) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
static void equeue_mem_dealloc(equeue_t *q, struct equeue_event *e) {
|
||||
static void equeue_mem_dealloc(equeue_t *q, struct equeue_event *e)
|
||||
{
|
||||
equeue_mutex_lock(&q->memlock);
|
||||
|
||||
// stick chunk into list of chunks
|
||||
|
@ -183,7 +191,8 @@ static void equeue_mem_dealloc(equeue_t *q, struct equeue_event *e) {
|
|||
equeue_mutex_unlock(&q->memlock);
|
||||
}
|
||||
|
||||
void *equeue_alloc(equeue_t *q, size_t size) {
|
||||
void *equeue_alloc(equeue_t *q, size_t size)
|
||||
{
|
||||
struct equeue_event *e = equeue_mem_alloc(q, size);
|
||||
if (!e) {
|
||||
return 0;
|
||||
|
@ -196,11 +205,12 @@ void *equeue_alloc(equeue_t *q, size_t size) {
|
|||
return e + 1;
|
||||
}
|
||||
|
||||
void equeue_dealloc(equeue_t *q, void *p) {
|
||||
struct equeue_event *e = (struct equeue_event*)p - 1;
|
||||
void equeue_dealloc(equeue_t *q, void *p)
|
||||
{
|
||||
struct equeue_event *e = (struct equeue_event *)p - 1;
|
||||
|
||||
if (e->dtor) {
|
||||
e->dtor(e+1);
|
||||
e->dtor(e + 1);
|
||||
}
|
||||
|
||||
equeue_mem_dealloc(q, e);
|
||||
|
@ -208,7 +218,8 @@ void equeue_dealloc(equeue_t *q, void *p) {
|
|||
|
||||
|
||||
// equeue scheduling functions
|
||||
static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) {
|
||||
static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick)
|
||||
{
|
||||
// setup event and hash local id with buffer offset for unique id
|
||||
int id = (e->id << q->npw2) | ((unsigned char *)e - q->buffer);
|
||||
e->target = tick + equeue_clampdiff(e->target, tick);
|
||||
|
@ -245,9 +256,9 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) {
|
|||
|
||||
// notify background timer
|
||||
if ((q->background.update && q->background.active) &&
|
||||
(q->queue == e && !e->sibling)) {
|
||||
(q->queue == e && !e->sibling)) {
|
||||
q->background.update(q->background.timer,
|
||||
equeue_clampdiff(e->target, tick));
|
||||
equeue_clampdiff(e->target, tick));
|
||||
}
|
||||
|
||||
equeue_mutex_unlock(&q->queuelock);
|
||||
|
@ -255,10 +266,11 @@ static int equeue_enqueue(equeue_t *q, struct equeue_event *e, unsigned tick) {
|
|||
return id;
|
||||
}
|
||||
|
||||
static struct equeue_event *equeue_unqueue(equeue_t *q, int id) {
|
||||
static struct equeue_event *equeue_unqueue(equeue_t *q, int id)
|
||||
{
|
||||
// decode event from unique id and check that the local id matches
|
||||
struct equeue_event *e = (struct equeue_event *)
|
||||
&q->buffer[id & ((1 << q->npw2)-1)];
|
||||
&q->buffer[id & ((1 << q->npw2) - 1)];
|
||||
|
||||
equeue_mutex_lock(&q->queuelock);
|
||||
if (e->id != id >> q->npw2) {
|
||||
|
@ -298,7 +310,8 @@ static struct equeue_event *equeue_unqueue(equeue_t *q, int id) {
|
|||
return e;
|
||||
}
|
||||
|
||||
static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target) {
|
||||
static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target)
|
||||
{
|
||||
equeue_mutex_lock(&q->queuelock);
|
||||
|
||||
// find all expired events and mark a new generation
|
||||
|
@ -342,8 +355,9 @@ static struct equeue_event *equeue_dequeue(equeue_t *q, unsigned target) {
|
|||
return head;
|
||||
}
|
||||
|
||||
int equeue_post(equeue_t *q, void (*cb)(void*), void *p) {
|
||||
struct equeue_event *e = (struct equeue_event*)p - 1;
|
||||
int equeue_post(equeue_t *q, void (*cb)(void *), void *p)
|
||||
{
|
||||
struct equeue_event *e = (struct equeue_event *)p - 1;
|
||||
unsigned tick = equeue_tick();
|
||||
e->cb = cb;
|
||||
e->target = tick + e->target;
|
||||
|
@ -353,7 +367,8 @@ int equeue_post(equeue_t *q, void (*cb)(void*), void *p) {
|
|||
return id;
|
||||
}
|
||||
|
||||
void equeue_cancel(equeue_t *q, int id) {
|
||||
void equeue_cancel(equeue_t *q, int id)
|
||||
{
|
||||
if (!id) {
|
||||
return;
|
||||
}
|
||||
|
@ -364,7 +379,8 @@ void equeue_cancel(equeue_t *q, int id) {
|
|||
}
|
||||
}
|
||||
|
||||
int equeue_timeleft(equeue_t *q, int id) {
|
||||
int equeue_timeleft(equeue_t *q, int id)
|
||||
{
|
||||
int ret = -1;
|
||||
|
||||
if (!id) {
|
||||
|
@ -373,7 +389,7 @@ int equeue_timeleft(equeue_t *q, int id) {
|
|||
|
||||
// decode event from unique id and check that the local id matches
|
||||
struct equeue_event *e = (struct equeue_event *)
|
||||
&q->buffer[id & ((1 << q->npw2)-1)];
|
||||
&q->buffer[id & ((1 << q->npw2) - 1)];
|
||||
|
||||
equeue_mutex_lock(&q->queuelock);
|
||||
if (e->id == id >> q->npw2) {
|
||||
|
@ -383,14 +399,16 @@ int equeue_timeleft(equeue_t *q, int id) {
|
|||
return ret;
|
||||
}
|
||||
|
||||
void equeue_break(equeue_t *q) {
|
||||
void equeue_break(equeue_t *q)
|
||||
{
|
||||
equeue_mutex_lock(&q->queuelock);
|
||||
q->break_requested = true;
|
||||
equeue_mutex_unlock(&q->queuelock);
|
||||
equeue_sema_signal(&q->eventsema);
|
||||
}
|
||||
|
||||
void equeue_dispatch(equeue_t *q, int ms) {
|
||||
void equeue_dispatch(equeue_t *q, int ms)
|
||||
{
|
||||
unsigned tick = equeue_tick();
|
||||
unsigned timeout = tick + ms;
|
||||
q->background.active = false;
|
||||
|
@ -416,7 +434,7 @@ void equeue_dispatch(equeue_t *q, int ms) {
|
|||
equeue_enqueue(q, e, equeue_tick());
|
||||
} else {
|
||||
equeue_incid(q, e);
|
||||
equeue_dealloc(q, e+1);
|
||||
equeue_dealloc(q, e + 1);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -432,7 +450,7 @@ void equeue_dispatch(equeue_t *q, int ms) {
|
|||
equeue_mutex_lock(&q->queuelock);
|
||||
if (q->background.update && q->queue) {
|
||||
q->background.update(q->background.timer,
|
||||
equeue_clampdiff(q->queue->target, tick));
|
||||
equeue_clampdiff(q->queue->target, tick));
|
||||
}
|
||||
q->background.active = true;
|
||||
equeue_mutex_unlock(&q->queuelock);
|
||||
|
@ -473,34 +491,39 @@ void equeue_dispatch(equeue_t *q, int ms) {
|
|||
|
||||
|
||||
// event functions
|
||||
void equeue_event_delay(void *p, int ms) {
|
||||
struct equeue_event *e = (struct equeue_event*)p - 1;
|
||||
void equeue_event_delay(void *p, int ms)
|
||||
{
|
||||
struct equeue_event *e = (struct equeue_event *)p - 1;
|
||||
e->target = ms;
|
||||
}
|
||||
|
||||
void equeue_event_period(void *p, int ms) {
|
||||
struct equeue_event *e = (struct equeue_event*)p - 1;
|
||||
void equeue_event_period(void *p, int ms)
|
||||
{
|
||||
struct equeue_event *e = (struct equeue_event *)p - 1;
|
||||
e->period = ms;
|
||||
}
|
||||
|
||||
void equeue_event_dtor(void *p, void (*dtor)(void *)) {
|
||||
struct equeue_event *e = (struct equeue_event*)p - 1;
|
||||
void equeue_event_dtor(void *p, void (*dtor)(void *))
|
||||
{
|
||||
struct equeue_event *e = (struct equeue_event *)p - 1;
|
||||
e->dtor = dtor;
|
||||
}
|
||||
|
||||
|
||||
// simple callbacks
|
||||
struct ecallback {
|
||||
void (*cb)(void*);
|
||||
void (*cb)(void *);
|
||||
void *data;
|
||||
};
|
||||
|
||||
static void ecallback_dispatch(void *p) {
|
||||
struct ecallback *e = (struct ecallback*)p;
|
||||
static void ecallback_dispatch(void *p)
|
||||
{
|
||||
struct ecallback *e = (struct ecallback *)p;
|
||||
e->cb(e->data);
|
||||
}
|
||||
|
||||
int equeue_call(equeue_t *q, void (*cb)(void*), void *data) {
|
||||
int equeue_call(equeue_t *q, void (*cb)(void *), void *data)
|
||||
{
|
||||
struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
|
||||
if (!e) {
|
||||
return 0;
|
||||
|
@ -511,7 +534,8 @@ int equeue_call(equeue_t *q, void (*cb)(void*), void *data) {
|
|||
return equeue_post(q, ecallback_dispatch, e);
|
||||
}
|
||||
|
||||
int equeue_call_in(equeue_t *q, int ms, void (*cb)(void*), void *data) {
|
||||
int equeue_call_in(equeue_t *q, int ms, void (*cb)(void *), void *data)
|
||||
{
|
||||
struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
|
||||
if (!e) {
|
||||
return 0;
|
||||
|
@ -523,7 +547,8 @@ int equeue_call_in(equeue_t *q, int ms, void (*cb)(void*), void *data) {
|
|||
return equeue_post(q, ecallback_dispatch, e);
|
||||
}
|
||||
|
||||
int equeue_call_every(equeue_t *q, int ms, void (*cb)(void*), void *data) {
|
||||
int equeue_call_every(equeue_t *q, int ms, void (*cb)(void *), void *data)
|
||||
{
|
||||
struct ecallback *e = equeue_alloc(q, sizeof(struct ecallback));
|
||||
if (!e) {
|
||||
return 0;
|
||||
|
@ -539,7 +564,8 @@ int equeue_call_every(equeue_t *q, int ms, void (*cb)(void*), void *data) {
|
|||
|
||||
// backgrounding
|
||||
void equeue_background(equeue_t *q,
|
||||
void (*update)(void *timer, int ms), void *timer) {
|
||||
void (*update)(void *timer, int ms), void *timer)
|
||||
{
|
||||
equeue_mutex_lock(&q->queuelock);
|
||||
if (q->background.update) {
|
||||
q->background.update(q->background.timer, -1);
|
||||
|
@ -550,7 +576,7 @@ void equeue_background(equeue_t *q,
|
|||
|
||||
if (q->background.update && q->queue) {
|
||||
q->background.update(q->background.timer,
|
||||
equeue_clampdiff(q->queue->target, equeue_tick()));
|
||||
equeue_clampdiff(q->queue->target, equeue_tick()));
|
||||
}
|
||||
q->background.active = true;
|
||||
equeue_mutex_unlock(&q->queuelock);
|
||||
|
@ -562,11 +588,13 @@ struct equeue_chain_context {
|
|||
int id;
|
||||
};
|
||||
|
||||
static void equeue_chain_dispatch(void *p) {
|
||||
static void equeue_chain_dispatch(void *p)
|
||||
{
|
||||
equeue_dispatch((equeue_t *)p, 0);
|
||||
}
|
||||
|
||||
static void equeue_chain_update(void *p, int ms) {
|
||||
static void equeue_chain_update(void *p, int ms)
|
||||
{
|
||||
struct equeue_chain_context *c = (struct equeue_chain_context *)p;
|
||||
equeue_cancel(c->target, c->id);
|
||||
|
||||
|
@ -577,14 +605,15 @@ static void equeue_chain_update(void *p, int ms) {
|
|||
}
|
||||
}
|
||||
|
||||
void equeue_chain(equeue_t *q, equeue_t *target) {
|
||||
void equeue_chain(equeue_t *q, equeue_t *target)
|
||||
{
|
||||
if (!target) {
|
||||
equeue_background(q, 0, 0);
|
||||
return;
|
||||
}
|
||||
|
||||
struct equeue_chain_context *c = equeue_alloc(q,
|
||||
sizeof(struct equeue_chain_context));
|
||||
sizeof(struct equeue_chain_context));
|
||||
|
||||
c->q = q;
|
||||
c->target = target;
|
||||
|
|
|
@ -25,33 +25,39 @@
|
|||
|
||||
|
||||
// Tick operations
|
||||
unsigned equeue_tick(void) {
|
||||
unsigned equeue_tick(void)
|
||||
{
|
||||
struct timeval tv;
|
||||
gettimeofday(&tv, 0);
|
||||
return (unsigned)(tv.tv_sec*1000 + tv.tv_usec/1000);
|
||||
return (unsigned)(tv.tv_sec * 1000 + tv.tv_usec / 1000);
|
||||
}
|
||||
|
||||
|
||||
// Mutex operations
|
||||
int equeue_mutex_create(equeue_mutex_t *m) {
|
||||
int equeue_mutex_create(equeue_mutex_t *m)
|
||||
{
|
||||
return pthread_mutex_init(m, 0);
|
||||
}
|
||||
|
||||
void equeue_mutex_destroy(equeue_mutex_t *m) {
|
||||
void equeue_mutex_destroy(equeue_mutex_t *m)
|
||||
{
|
||||
pthread_mutex_destroy(m);
|
||||
}
|
||||
|
||||
void equeue_mutex_lock(equeue_mutex_t *m) {
|
||||
void equeue_mutex_lock(equeue_mutex_t *m)
|
||||
{
|
||||
pthread_mutex_lock(m);
|
||||
}
|
||||
|
||||
void equeue_mutex_unlock(equeue_mutex_t *m) {
|
||||
void equeue_mutex_unlock(equeue_mutex_t *m)
|
||||
{
|
||||
pthread_mutex_unlock(m);
|
||||
}
|
||||
|
||||
|
||||
// Semaphore operations
|
||||
int equeue_sema_create(equeue_sema_t *s) {
|
||||
int equeue_sema_create(equeue_sema_t *s)
|
||||
{
|
||||
int err = pthread_mutex_init(&s->mutex, 0);
|
||||
if (err) {
|
||||
return err;
|
||||
|
@ -66,19 +72,22 @@ int equeue_sema_create(equeue_sema_t *s) {
|
|||
return 0;
|
||||
}
|
||||
|
||||
void equeue_sema_destroy(equeue_sema_t *s) {
|
||||
void equeue_sema_destroy(equeue_sema_t *s)
|
||||
{
|
||||
pthread_cond_destroy(&s->cond);
|
||||
pthread_mutex_destroy(&s->mutex);
|
||||
}
|
||||
|
||||
void equeue_sema_signal(equeue_sema_t *s) {
|
||||
void equeue_sema_signal(equeue_sema_t *s)
|
||||
{
|
||||
pthread_mutex_lock(&s->mutex);
|
||||
s->signal = true;
|
||||
pthread_cond_signal(&s->cond);
|
||||
pthread_mutex_unlock(&s->mutex);
|
||||
}
|
||||
|
||||
bool equeue_sema_wait(equeue_sema_t *s, int ms) {
|
||||
bool equeue_sema_wait(equeue_sema_t *s, int ms)
|
||||
{
|
||||
pthread_mutex_lock(&s->mutex);
|
||||
if (!s->signal) {
|
||||
if (ms < 0) {
|
||||
|
@ -88,8 +97,8 @@ bool equeue_sema_wait(equeue_sema_t *s, int ms) {
|
|||
gettimeofday(&tv, 0);
|
||||
|
||||
struct timespec ts = {
|
||||
.tv_sec = ms/1000 + tv.tv_sec,
|
||||
.tv_nsec = ms*1000000 + tv.tv_usec*1000,
|
||||
.tv_sec = ms / 1000 + tv.tv_sec,
|
||||
.tv_nsec = ms * 1000000 + tv.tv_usec * 1000,
|
||||
};
|
||||
|
||||
pthread_cond_timedwait(&s->cond, &s->mutex, &ts);
|
||||
|
|
|
@ -113,20 +113,23 @@ static const char *prof_units;
|
|||
|
||||
|
||||
// Various test functions
|
||||
void no_func(void *eh) {
|
||||
void no_func(void *eh)
|
||||
{
|
||||
}
|
||||
|
||||
|
||||
// Actual performance tests
|
||||
void baseline_prof(void) {
|
||||
void baseline_prof(void)
|
||||
{
|
||||
prof_loop() {
|
||||
prof_start();
|
||||
__asm__ volatile ("");
|
||||
__asm__ volatile("");
|
||||
prof_stop();
|
||||
}
|
||||
}
|
||||
|
||||
void equeue_tick_prof(void) {
|
||||
void equeue_tick_prof(void)
|
||||
{
|
||||
prof_volatile(unsigned) res;
|
||||
prof_loop() {
|
||||
prof_start();
|
||||
|
@ -135,9 +138,10 @@ void equeue_tick_prof(void) {
|
|||
}
|
||||
}
|
||||
|
||||
void equeue_alloc_prof(void) {
|
||||
void equeue_alloc_prof(void)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, 32*EQUEUE_EVENT_SIZE);
|
||||
equeue_create(&q, 32 * EQUEUE_EVENT_SIZE);
|
||||
|
||||
prof_loop() {
|
||||
prof_start();
|
||||
|
@ -150,9 +154,10 @@ void equeue_alloc_prof(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_alloc_many_prof(int count) {
|
||||
void equeue_alloc_many_prof(int count)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, count*EQUEUE_EVENT_SIZE);
|
||||
equeue_create(&q, count * EQUEUE_EVENT_SIZE);
|
||||
|
||||
void *es[count];
|
||||
|
||||
|
@ -175,7 +180,8 @@ void equeue_alloc_many_prof(int count) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_post_prof(void) {
|
||||
void equeue_post_prof(void)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, EQUEUE_EVENT_SIZE);
|
||||
|
||||
|
@ -192,11 +198,12 @@ void equeue_post_prof(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_post_many_prof(int count) {
|
||||
void equeue_post_many_prof(int count)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, count*EQUEUE_EVENT_SIZE);
|
||||
equeue_create(&q, count * EQUEUE_EVENT_SIZE);
|
||||
|
||||
for (int i = 0; i < count-1; i++) {
|
||||
for (int i = 0; i < count - 1; i++) {
|
||||
equeue_call(&q, no_func, 0);
|
||||
}
|
||||
|
||||
|
@ -213,7 +220,8 @@ void equeue_post_many_prof(int count) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_post_future_prof(void) {
|
||||
void equeue_post_future_prof(void)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, EQUEUE_EVENT_SIZE);
|
||||
|
||||
|
@ -231,11 +239,12 @@ void equeue_post_future_prof(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_post_future_many_prof(int count) {
|
||||
void equeue_post_future_many_prof(int count)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, count*EQUEUE_EVENT_SIZE);
|
||||
equeue_create(&q, count * EQUEUE_EVENT_SIZE);
|
||||
|
||||
for (int i = 0; i < count-1; i++) {
|
||||
for (int i = 0; i < count - 1; i++) {
|
||||
equeue_call(&q, no_func, 0);
|
||||
}
|
||||
|
||||
|
@ -253,7 +262,8 @@ void equeue_post_future_many_prof(int count) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_dispatch_prof(void) {
|
||||
void equeue_dispatch_prof(void)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, EQUEUE_EVENT_SIZE);
|
||||
|
||||
|
@ -268,9 +278,10 @@ void equeue_dispatch_prof(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_dispatch_many_prof(int count) {
|
||||
void equeue_dispatch_many_prof(int count)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, count*EQUEUE_EVENT_SIZE);
|
||||
equeue_create(&q, count * EQUEUE_EVENT_SIZE);
|
||||
|
||||
prof_loop() {
|
||||
for (int i = 0; i < count; i++) {
|
||||
|
@ -285,7 +296,8 @@ void equeue_dispatch_many_prof(int count) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_cancel_prof(void) {
|
||||
void equeue_cancel_prof(void)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, EQUEUE_EVENT_SIZE);
|
||||
|
||||
|
@ -300,11 +312,12 @@ void equeue_cancel_prof(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_cancel_many_prof(int count) {
|
||||
void equeue_cancel_many_prof(int count)
|
||||
{
|
||||
struct equeue q;
|
||||
equeue_create(&q, count*EQUEUE_EVENT_SIZE);
|
||||
equeue_create(&q, count * EQUEUE_EVENT_SIZE);
|
||||
|
||||
for (int i = 0; i < count-1; i++) {
|
||||
for (int i = 0; i < count - 1; i++) {
|
||||
equeue_call(&q, no_func, 0);
|
||||
}
|
||||
|
||||
|
@ -319,8 +332,9 @@ void equeue_cancel_many_prof(int count) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_alloc_size_prof(void) {
|
||||
size_t size = 32*EQUEUE_EVENT_SIZE;
|
||||
void equeue_alloc_size_prof(void)
|
||||
{
|
||||
size_t size = 32 * EQUEUE_EVENT_SIZE;
|
||||
|
||||
struct equeue q;
|
||||
equeue_create(&q, size);
|
||||
|
@ -331,8 +345,9 @@ void equeue_alloc_size_prof(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_alloc_many_size_prof(int count) {
|
||||
size_t size = count*EQUEUE_EVENT_SIZE;
|
||||
void equeue_alloc_many_size_prof(int count)
|
||||
{
|
||||
size_t size = count * EQUEUE_EVENT_SIZE;
|
||||
|
||||
struct equeue q;
|
||||
equeue_create(&q, size);
|
||||
|
@ -346,8 +361,9 @@ void equeue_alloc_many_size_prof(int count) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void equeue_alloc_fragmented_size_prof(int count) {
|
||||
size_t size = count*EQUEUE_EVENT_SIZE;
|
||||
void equeue_alloc_fragmented_size_prof(int count)
|
||||
{
|
||||
size_t size = count * EQUEUE_EVENT_SIZE;
|
||||
|
||||
struct equeue q;
|
||||
equeue_create(&q, size);
|
||||
|
@ -362,11 +378,11 @@ void equeue_alloc_fragmented_size_prof(int count) {
|
|||
equeue_dealloc(&q, es[i]);
|
||||
}
|
||||
|
||||
for (int i = count-1; i >= 0; i--) {
|
||||
for (int i = count - 1; i >= 0; i--) {
|
||||
es[i] = equeue_alloc(&q, (i % 4) * sizeof(int));
|
||||
}
|
||||
|
||||
for (int i = count-1; i >= 0; i--) {
|
||||
for (int i = count - 1; i >= 0; i--) {
|
||||
equeue_dealloc(&q, es[i]);
|
||||
}
|
||||
|
||||
|
@ -381,7 +397,8 @@ void equeue_alloc_fragmented_size_prof(int count) {
|
|||
|
||||
|
||||
// Entry point
|
||||
int main() {
|
||||
int main()
|
||||
{
|
||||
printf("beginning profiling...\n");
|
||||
|
||||
prof_baseline(baseline_prof);
|
||||
|
|
|
@ -51,14 +51,17 @@ static int test_failure;
|
|||
|
||||
|
||||
// Test functions
|
||||
void pass_func(void *eh) {
|
||||
void pass_func(void *eh)
|
||||
{
|
||||
}
|
||||
|
||||
void simple_func(void *p) {
|
||||
void simple_func(void *p)
|
||||
{
|
||||
(*(int *)p)++;
|
||||
}
|
||||
|
||||
void sloth_func(void *p) {
|
||||
void sloth_func(void *p)
|
||||
{
|
||||
usleep(10000);
|
||||
(*(int *)p)++;
|
||||
}
|
||||
|
@ -68,8 +71,9 @@ struct indirect {
|
|||
uint8_t buffer[7];
|
||||
};
|
||||
|
||||
void indirect_func(void *p) {
|
||||
struct indirect *i = (struct indirect*)p;
|
||||
void indirect_func(void *p)
|
||||
{
|
||||
struct indirect *i = (struct indirect *)p;
|
||||
(*i->touched)++;
|
||||
}
|
||||
|
||||
|
@ -78,8 +82,9 @@ struct timing {
|
|||
unsigned delay;
|
||||
};
|
||||
|
||||
void timing_func(void *p) {
|
||||
struct timing *timing = (struct timing*)p;
|
||||
void timing_func(void *p)
|
||||
{
|
||||
struct timing *timing = (struct timing *)p;
|
||||
unsigned tick = equeue_tick();
|
||||
|
||||
unsigned t1 = timing->delay;
|
||||
|
@ -95,8 +100,9 @@ struct fragment {
|
|||
struct timing timing;
|
||||
};
|
||||
|
||||
void fragment_func(void *p) {
|
||||
struct fragment *fragment = (struct fragment*)p;
|
||||
void fragment_func(void *p)
|
||||
{
|
||||
struct fragment *fragment = (struct fragment *)p;
|
||||
timing_func(&fragment->timing);
|
||||
|
||||
struct fragment *nfragment = equeue_alloc(fragment->q, fragment->size);
|
||||
|
@ -114,7 +120,8 @@ struct cancel {
|
|||
int id;
|
||||
};
|
||||
|
||||
void cancel_func(void *p) {
|
||||
void cancel_func(void *p)
|
||||
{
|
||||
struct cancel *cancel = (struct cancel *)p;
|
||||
equeue_cancel(cancel->q, cancel->id);
|
||||
}
|
||||
|
@ -125,7 +132,8 @@ struct nest {
|
|||
void *data;
|
||||
};
|
||||
|
||||
void nest_func(void *p) {
|
||||
void nest_func(void *p)
|
||||
{
|
||||
struct nest *nest = (struct nest *)p;
|
||||
equeue_call(nest->q, nest->cb, nest->data);
|
||||
|
||||
|
@ -134,7 +142,8 @@ void nest_func(void *p) {
|
|||
|
||||
|
||||
// Simple call tests
|
||||
void simple_call_test(void) {
|
||||
void simple_call_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -147,7 +156,8 @@ void simple_call_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void simple_call_in_test(void) {
|
||||
void simple_call_in_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -162,7 +172,8 @@ void simple_call_in_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void simple_call_every_test(void) {
|
||||
void simple_call_every_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -177,7 +188,8 @@ void simple_call_every_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void simple_post_test(void) {
|
||||
void simple_post_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -197,7 +209,8 @@ void simple_post_test(void) {
|
|||
}
|
||||
|
||||
// Misc tests
|
||||
void destructor_test(void) {
|
||||
void destructor_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -253,7 +266,8 @@ void destructor_test(void) {
|
|||
test_assert(touched == 3);
|
||||
}
|
||||
|
||||
void allocation_failure_test(void) {
|
||||
void allocation_failure_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -269,19 +283,20 @@ void allocation_failure_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void cancel_test(int N) {
|
||||
void cancel_test(int N)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
||||
bool touched = false;
|
||||
int *ids = malloc(N*sizeof(int));
|
||||
int *ids = malloc(N * sizeof(int));
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
ids[i] = equeue_call(&q, simple_func, &touched);
|
||||
}
|
||||
|
||||
for (int i = N-1; i >= 0; i--) {
|
||||
for (int i = N - 1; i >= 0; i--) {
|
||||
equeue_cancel(&q, ids[i]);
|
||||
}
|
||||
|
||||
|
@ -293,7 +308,8 @@ void cancel_test(int N) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void cancel_inflight_test(void) {
|
||||
void cancel_inflight_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -328,7 +344,8 @@ void cancel_inflight_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void cancel_unnecessarily_test(void) {
|
||||
void cancel_unnecessarily_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -356,7 +373,8 @@ void cancel_unnecessarily_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void loop_protect_test(void) {
|
||||
void loop_protect_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -376,7 +394,8 @@ void loop_protect_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void break_test(void) {
|
||||
void break_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -391,7 +410,8 @@ void break_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void break_no_windup_test(void) {
|
||||
void break_no_windup_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -411,7 +431,8 @@ void break_no_windup_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void period_test(void) {
|
||||
void period_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -425,7 +446,8 @@ void period_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void nested_test(void) {
|
||||
void nested_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -462,7 +484,8 @@ void nested_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void sloth_test(void) {
|
||||
void sloth_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -483,13 +506,15 @@ void sloth_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void *multithread_thread(void *p) {
|
||||
void *multithread_thread(void *p)
|
||||
{
|
||||
equeue_t *q = (equeue_t *)p;
|
||||
equeue_dispatch(q, -1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void multithread_test(void) {
|
||||
void multithread_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -511,11 +536,13 @@ void multithread_test(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void background_func(void *p, int ms) {
|
||||
void background_func(void *p, int ms)
|
||||
{
|
||||
*(unsigned *)p = ms;
|
||||
}
|
||||
|
||||
void background_test(void) {
|
||||
void background_test(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -542,7 +569,8 @@ void background_test(void) {
|
|||
test_assert(ms == -1);
|
||||
}
|
||||
|
||||
void chain_test(void) {
|
||||
void chain_test(void)
|
||||
{
|
||||
equeue_t q1;
|
||||
int err = equeue_create(&q1, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -582,7 +610,8 @@ void chain_test(void) {
|
|||
equeue_destroy(&q2);
|
||||
}
|
||||
|
||||
void unchain_test(void) {
|
||||
void unchain_test(void)
|
||||
{
|
||||
equeue_t q1;
|
||||
int err = equeue_create(&q1, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -616,9 +645,10 @@ void unchain_test(void) {
|
|||
}
|
||||
|
||||
// Barrage tests
|
||||
void simple_barrage_test(int N) {
|
||||
void simple_barrage_test(int N)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, N*(EQUEUE_EVENT_SIZE+sizeof(struct timing)));
|
||||
int err = equeue_create(&q, N * (EQUEUE_EVENT_SIZE + sizeof(struct timing)));
|
||||
test_assert(!err);
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
|
@ -626,7 +656,7 @@ void simple_barrage_test(int N) {
|
|||
test_assert(timing);
|
||||
|
||||
timing->tick = equeue_tick();
|
||||
timing->delay = (i+1)*100;
|
||||
timing->delay = (i + 1) * 100;
|
||||
equeue_event_delay(timing, timing->delay);
|
||||
equeue_event_period(timing, timing->delay);
|
||||
|
||||
|
@ -634,33 +664,34 @@ void simple_barrage_test(int N) {
|
|||
test_assert(id);
|
||||
}
|
||||
|
||||
equeue_dispatch(&q, N*100);
|
||||
equeue_dispatch(&q, N * 100);
|
||||
|
||||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
void fragmenting_barrage_test(int N) {
|
||||
void fragmenting_barrage_test(int N)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q,
|
||||
2*N*(EQUEUE_EVENT_SIZE+sizeof(struct fragment)+N*sizeof(int)));
|
||||
2 * N * (EQUEUE_EVENT_SIZE + sizeof(struct fragment) + N * sizeof(int)));
|
||||
test_assert(!err);
|
||||
|
||||
for (int i = 0; i < N; i++) {
|
||||
size_t size = sizeof(struct fragment) + i*sizeof(int);
|
||||
size_t size = sizeof(struct fragment) + i * sizeof(int);
|
||||
struct fragment *fragment = equeue_alloc(&q, size);
|
||||
test_assert(fragment);
|
||||
|
||||
fragment->q = &q;
|
||||
fragment->size = size;
|
||||
fragment->timing.tick = equeue_tick();
|
||||
fragment->timing.delay = (i+1)*100;
|
||||
fragment->timing.delay = (i + 1) * 100;
|
||||
equeue_event_delay(fragment, fragment->timing.delay);
|
||||
|
||||
int id = equeue_post(&q, fragment_func, fragment);
|
||||
test_assert(id);
|
||||
}
|
||||
|
||||
equeue_dispatch(&q, N*100);
|
||||
equeue_dispatch(&q, N * 100);
|
||||
|
||||
equeue_destroy(&q);
|
||||
}
|
||||
|
@ -671,20 +702,22 @@ struct ethread {
|
|||
int ms;
|
||||
};
|
||||
|
||||
static void *ethread_dispatch(void *p) {
|
||||
struct ethread *t = (struct ethread*)p;
|
||||
static void *ethread_dispatch(void *p)
|
||||
{
|
||||
struct ethread *t = (struct ethread *)p;
|
||||
equeue_dispatch(t->q, t->ms);
|
||||
return 0;
|
||||
}
|
||||
|
||||
void multithreaded_barrage_test(int N) {
|
||||
void multithreaded_barrage_test(int N)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, N*(EQUEUE_EVENT_SIZE+sizeof(struct timing)));
|
||||
int err = equeue_create(&q, N * (EQUEUE_EVENT_SIZE + sizeof(struct timing)));
|
||||
test_assert(!err);
|
||||
|
||||
struct ethread t;
|
||||
t.q = &q;
|
||||
t.ms = N*100;
|
||||
t.ms = N * 100;
|
||||
err = pthread_create(&t.thread, 0, ethread_dispatch, &t);
|
||||
test_assert(!err);
|
||||
|
||||
|
@ -693,7 +726,7 @@ void multithreaded_barrage_test(int N) {
|
|||
test_assert(timing);
|
||||
|
||||
timing->tick = equeue_tick();
|
||||
timing->delay = (i+1)*100;
|
||||
timing->delay = (i + 1) * 100;
|
||||
equeue_event_delay(timing, timing->delay);
|
||||
equeue_event_period(timing, timing->delay);
|
||||
|
||||
|
@ -707,20 +740,21 @@ void multithreaded_barrage_test(int N) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
struct count_and_queue
|
||||
{
|
||||
struct count_and_queue {
|
||||
int p;
|
||||
equeue_t* q;
|
||||
equeue_t *q;
|
||||
};
|
||||
|
||||
void simple_breaker(void *p) {
|
||||
struct count_and_queue* caq = (struct count_and_queue*)p;
|
||||
void simple_breaker(void *p)
|
||||
{
|
||||
struct count_and_queue *caq = (struct count_and_queue *)p;
|
||||
equeue_break(caq->q);
|
||||
usleep(10000);
|
||||
caq->p++;
|
||||
}
|
||||
|
||||
void break_request_cleared_on_timeout(void) {
|
||||
void break_request_cleared_on_timeout(void)
|
||||
{
|
||||
equeue_t q;
|
||||
int err = equeue_create(&q, 2048);
|
||||
test_assert(!err);
|
||||
|
@ -745,7 +779,8 @@ void break_request_cleared_on_timeout(void) {
|
|||
equeue_destroy(&q);
|
||||
}
|
||||
|
||||
int main() {
|
||||
int main()
|
||||
{
|
||||
printf("beginning tests...\n");
|
||||
|
||||
test_run(simple_call_test);
|
||||
|
|
Loading…
Reference in New Issue