diff options
-rw-r--r-- | core/Makefile.inc | 1 | ||||
-rw-r--r-- | core/interrupts.c | 6 | ||||
-rw-r--r-- | core/opal.c | 4 | ||||
-rw-r--r-- | core/test/Makefile.check | 2 | ||||
-rw-r--r-- | core/test/run-timer.c | 56 | ||||
-rw-r--r-- | core/timer.c | 134 | ||||
-rw-r--r-- | include/timebase.h | 2 | ||||
-rw-r--r-- | include/timer.h | 70 |
8 files changed, 274 insertions, 1 deletions
diff --git a/core/Makefile.inc b/core/Makefile.inc index c848c8e..5098d2d 100644 --- a/core/Makefile.inc +++ b/core/Makefile.inc @@ -7,6 +7,7 @@ CORE_OBJS += timebase.o opal-msg.o pci.o pci-opal.o fast-reboot.o CORE_OBJS += device.o exceptions.o trace.o affinity.o vpd.o CORE_OBJS += hostservices.o platform.o nvram.o flash-nvram.o hmi.o CORE_OBJS += console-log.o ipmi.o time-utils.o pel.o pool.o errorlog.o +CORE_OBJS += timer.o CORE=core/built-in.o $(CORE): $(CORE_OBJS:%=core/%) diff --git a/core/interrupts.c b/core/interrupts.c index d33e787..dfccb32 100644 --- a/core/interrupts.c +++ b/core/interrupts.c @@ -23,6 +23,7 @@ #include <cec.h> #include <device.h> #include <ccan/str/str.h> +#include <timer.h> /* ICP registers */ #define ICP_XIRR 0x4 /* 32-bit access */ @@ -291,11 +292,16 @@ static int64_t opal_handle_interrupt(uint32_t isn, uint64_t *outstanding_event_m struct irq_source *is = irq_find_source(isn); int64_t rc = OPAL_SUCCESS; + /* We run the timers first */ + check_timers(); + + /* No source ? return */ if (!is || !is->ops->interrupt) { rc = OPAL_PARAMETER; goto bail; } + /* Run it */ is->ops->interrupt(is->data, isn); /* Update output events */ diff --git a/core/opal.c b/core/opal.c index ef19927..0dab3f7 100644 --- a/core/opal.c +++ b/core/opal.c @@ -28,6 +28,7 @@ #include <timebase.h> #include <affinity.h> #include <opal-msg.h> +#include <timer.h> /* Pending events to signal via opal_poll_events */ uint64_t opal_pending_events; @@ -278,6 +279,9 @@ void opal_run_pollers(void) { struct opal_poll_entry *poll_ent; + /* We run the timers first */ + check_timers(); + /* The pollers are run lokelessly, see comment in opal_del_poller */ list_for_each(&opal_pollers, poll_ent, link) poll_ent->poller(poll_ent->data); diff --git a/core/test/Makefile.check b/core/test/Makefile.check index 1e5d797..840ce25 100644 --- a/core/test/Makefile.check +++ b/core/test/Makefile.check @@ -1,5 +1,5 @@ # -*-Makefile-*- -CORE_TEST := core/test/run-device core/test/run-mem_region core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-trace core/test/run-msg core/test/run-pel core/test/run-pool +CORE_TEST := core/test/run-device core/test/run-mem_region core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-trace core/test/run-msg core/test/run-pel core/test/run-pool core/test/run-timer check: $(CORE_TEST:%=%-check) $(CORE_TEST:%=%-gcov-run) diff --git a/core/test/run-timer.c b/core/test/run-timer.c new file mode 100644 index 0000000..36d9a65 --- /dev/null +++ b/core/test/run-timer.c @@ -0,0 +1,56 @@ +#include <stdint.h> +#include <stdlib.h> +#include <stdio.h> + +#define __TEST__ +#include <timer.h> + +#define mftb() (stamp) +#define sync() + +static uint64_t stamp, last; +struct lock; +static inline void lock(struct lock *l) { (void)l; } +static inline void unlock(struct lock *l) { (void)l; } + +#include "../timer.c" + +#define NUM_TIMERS 100 + +static struct timer timers[NUM_TIMERS]; +static unsigned int rand_shift, count; + +static void init_rand(void) +{ + unsigned long max = RAND_MAX; + + /* Get something reasonably small */ + while(max > 0x10000) { + rand_shift++; + max >>= 1; + } +} + +static void expiry(struct timer *t, void *data) +{ + (void)data; + assert(t->target >= last); + count--; +} + +int main(void) +{ + unsigned int i; + + init_rand(); + for (i = 0; i < NUM_TIMERS; i++) { + init_timer(&timers[i], expiry, NULL); + schedule_timer(&timers[i], random() >> rand_shift); + } + count = NUM_TIMERS; + while(count) { + check_timers(); + stamp++; + } + return 0; +} diff --git a/core/timer.c b/core/timer.c new file mode 100644 index 0000000..12ec6f5 --- /dev/null +++ b/core/timer.c @@ -0,0 +1,134 @@ +#include <timer.h> +#include <timebase.h> +#include <lock.h> + +#ifdef __TEST__ +#define this_cpu() ((void *)-1) +#define cpu_relax() +#else +#include <cpu.h> +#endif + +static struct lock timer_lock = LOCK_UNLOCKED; +static LIST_HEAD(timer_list); + +void init_timer(struct timer *t, timer_func_t expiry, void *data) +{ + t->link.next = t->link.prev = NULL; + t->target = 0; + t->expiry = expiry; + t->user_data = data; + t->running = NULL; +} + +static void __remove_timer(struct timer *t) +{ + list_del(&t->link); + t->link.next = t->link.prev = NULL; +} + +static void __sync_timer(struct timer *t) +{ + sync(); + + /* Guard against re-entrancy */ + assert(t->running != this_cpu()); + + while (t->running) { + unlock(&timer_lock); + cpu_relax(); + /* Should we call the pollers here ? */ + lock(&timer_lock); + } +} + +void sync_timer(struct timer *t) +{ + lock(&timer_lock); + __sync_timer(t); + unlock(&timer_lock); +} + +void cancel_timer(struct timer *t) +{ + lock(&timer_lock); + __sync_timer(t); + if (t->link.next) + __remove_timer(t); + unlock(&timer_lock); +} + +void cancel_timer_async(struct timer *t) +{ + lock(&timer_lock); + if (t->link.next) + __remove_timer(t); + unlock(&timer_lock); +} + +void schedule_timer_at(struct timer *t, uint64_t when) +{ + struct timer *lt; + + lock(&timer_lock); + if (t->link.next) + __remove_timer(t); + t->target = when; + list_for_each(&timer_list, lt, link) { + if (when < lt->target) { + list_add_before(&timer_list, &t->link, <->link); + unlock(&timer_lock); + return; + } + } + list_add_tail(&timer_list, &t->link); + unlock(&timer_lock); +} + +void schedule_timer(struct timer *t, uint64_t how_long) +{ + schedule_timer_at(t, mftb() + how_long); +} + +void check_timers(void) +{ + struct timer *t; + uint64_t now = mftb(); + + /* Lockless "peek", a bit racy but shouldn't be a problem */ + t = list_top(&timer_list, struct timer, link); + if (!t || t->target > now) + return; + + /* Take lock and try again */ + lock(&timer_lock); + for (;;) { + t = list_top(&timer_list, struct timer, link); + now = mftb(); + + /* Top of list not expired ? that's it ... */ + if (!t || t->target > now) + break; + + /* Top of list still running, we have to delay handling + * it. For now just skip until the next poll, when we have + * SLW interrupts, we'll probably want to trip another one + * ASAP + */ + if (t->running) + break; + + /* Allright, first remove it and mark it running */ + __remove_timer(t); + t->running = this_cpu(); + + /* Now we can unlock and call it's expiry */ + unlock(&timer_lock); + t->expiry(t, t->user_data); + + /* Re-lock and mark not running */ + lock(&timer_lock); + t->running = NULL; + } + unlock(&timer_lock); +} diff --git a/include/timebase.h b/include/timebase.h index 4537256..156c296 100644 --- a/include/timebase.h +++ b/include/timebase.h @@ -24,6 +24,7 @@ #include <time.h> +#ifndef __TEST__ static inline unsigned long mftb(void) { unsigned long tb; @@ -34,6 +35,7 @@ static inline unsigned long mftb(void) asm volatile("mftb %0" : "=r"(tb) : : "memory"); return tb; } +#endif enum tb_cmpval { TB_ABEFOREB = -1, diff --git a/include/timer.h b/include/timer.h new file mode 100644 index 0000000..8770a95 --- /dev/null +++ b/include/timer.h @@ -0,0 +1,70 @@ +#ifndef __TIMER_H +#define __TIMER_H + +#include <stdint.h> +#include <ccan/list/list.h> + +struct timer; + +typedef void (*timer_func_t)(struct timer *t, void *data); + +/* Structure exposed in order to be able to allocate it + * statically but otherwise, use accessors, don't access + * the fields directly + * + * WARNING: Do not free a timer object unless you have cancelled + * it first or you know it won't reschedule itself and have done + * a sync_timer() on it. The timer core *will* access the object + * again after you return from the expiry callback so it must not + * be freed from the callback itself. + */ +struct timer { + struct list_node link; + uint64_t target; + timer_func_t expiry; + void * user_data; + void * running; +}; + +extern void init_timer(struct timer *t, timer_func_t expiry, void *data); + +/* (re)schedule a timer. If already scheduled, it's expiry will be updated + * + * This doesn't synchronize so if the timer also reschedules itself there + * is no telling which one "wins". The advantage is that this can be called + * with any lock held or from the timer expiry itself. + */ +extern void schedule_timer(struct timer *t, uint64_t how_long); +extern void schedule_timer_at(struct timer *t, uint64_t when); + +/* Synchronization point with the timer. If the callback has started before + * that function is called, it will be complete when this function returns. + * + * It might start *again* but at least anything before this will be visible + * to any subsequent occurrence. + * + * The usual issue of such sync functions exist: don't call it while holding + * a lock that the timer callback might take or from the timer expiry itself. + */ +extern void sync_timer(struct timer *t); + +/* cancel_timer() will ensure the timer isn't concurrently running so + * the cancellation is guaranteed even if the timer reschedules itself. + * + * This uses sync_timer() internally so don't call this while holding a + * lock the timer might use. + */ +extern void cancel_timer(struct timer *t); + +/* cancel_timer_async() allows to remove the timer from the schedule + * list without trying to synchronize. This is useful if the cancellation + * must happen while holding locks that would make the synchronization + * impossible. The user is responsible of ensuring it deals with potentially + * spurrious occurrences + */ +extern void cancel_timer_async(struct timer *t); + +/* Run the timers */ +extern void check_timers(void); + +#endif /* __TIMER_H */ |