aboutsummaryrefslogtreecommitdiff
path: root/core
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-07 10:02:32 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-07 15:54:22 +1100
commitffecdd630b4ff48c8d5b94da1c8044e0c927c6f2 (patch)
tree975b88e2f6b960416895356901975b3caa02db30 /core
parent9e29e3f29faad28e43b6e1b69f57d83935229f44 (diff)
downloadskiboot-ffecdd630b4ff48c8d5b94da1c8044e0c927c6f2.zip
skiboot-ffecdd630b4ff48c8d5b94da1c8044e0c927c6f2.tar.gz
skiboot-ffecdd630b4ff48c8d5b94da1c8044e0c927c6f2.tar.bz2
timer: Add scheduled timer facility
For now running off the event pollers, that will improve once we get delayed interrupts from the SLW Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'core')
-rw-r--r--core/Makefile.inc1
-rw-r--r--core/interrupts.c6
-rw-r--r--core/opal.c4
-rw-r--r--core/test/Makefile.check2
-rw-r--r--core/test/run-timer.c56
-rw-r--r--core/timer.c134
6 files changed, 202 insertions, 1 deletions
diff --git a/core/Makefile.inc b/core/Makefile.inc
index c848c8e..5098d2d 100644
--- a/core/Makefile.inc
+++ b/core/Makefile.inc
@@ -7,6 +7,7 @@ CORE_OBJS += timebase.o opal-msg.o pci.o pci-opal.o fast-reboot.o
CORE_OBJS += device.o exceptions.o trace.o affinity.o vpd.o
CORE_OBJS += hostservices.o platform.o nvram.o flash-nvram.o hmi.o
CORE_OBJS += console-log.o ipmi.o time-utils.o pel.o pool.o errorlog.o
+CORE_OBJS += timer.o
CORE=core/built-in.o
$(CORE): $(CORE_OBJS:%=core/%)
diff --git a/core/interrupts.c b/core/interrupts.c
index d33e787..dfccb32 100644
--- a/core/interrupts.c
+++ b/core/interrupts.c
@@ -23,6 +23,7 @@
#include <cec.h>
#include <device.h>
#include <ccan/str/str.h>
+#include <timer.h>
/* ICP registers */
#define ICP_XIRR 0x4 /* 32-bit access */
@@ -291,11 +292,16 @@ static int64_t opal_handle_interrupt(uint32_t isn, uint64_t *outstanding_event_m
struct irq_source *is = irq_find_source(isn);
int64_t rc = OPAL_SUCCESS;
+ /* We run the timers first */
+ check_timers();
+
+ /* No source ? return */
if (!is || !is->ops->interrupt) {
rc = OPAL_PARAMETER;
goto bail;
}
+ /* Run it */
is->ops->interrupt(is->data, isn);
/* Update output events */
diff --git a/core/opal.c b/core/opal.c
index ef19927..0dab3f7 100644
--- a/core/opal.c
+++ b/core/opal.c
@@ -28,6 +28,7 @@
#include <timebase.h>
#include <affinity.h>
#include <opal-msg.h>
+#include <timer.h>
/* Pending events to signal via opal_poll_events */
uint64_t opal_pending_events;
@@ -278,6 +279,9 @@ void opal_run_pollers(void)
{
struct opal_poll_entry *poll_ent;
+ /* We run the timers first */
+ check_timers();
+
/* The pollers are run lokelessly, see comment in opal_del_poller */
list_for_each(&opal_pollers, poll_ent, link)
poll_ent->poller(poll_ent->data);
diff --git a/core/test/Makefile.check b/core/test/Makefile.check
index 1e5d797..840ce25 100644
--- a/core/test/Makefile.check
+++ b/core/test/Makefile.check
@@ -1,5 +1,5 @@
# -*-Makefile-*-
-CORE_TEST := core/test/run-device core/test/run-mem_region core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-trace core/test/run-msg core/test/run-pel core/test/run-pool
+CORE_TEST := core/test/run-device core/test/run-mem_region core/test/run-malloc core/test/run-malloc-speed core/test/run-mem_region_init core/test/run-mem_region_release_unused core/test/run-mem_region_release_unused_noalloc core/test/run-trace core/test/run-msg core/test/run-pel core/test/run-pool core/test/run-timer
check: $(CORE_TEST:%=%-check) $(CORE_TEST:%=%-gcov-run)
diff --git a/core/test/run-timer.c b/core/test/run-timer.c
new file mode 100644
index 0000000..36d9a65
--- /dev/null
+++ b/core/test/run-timer.c
@@ -0,0 +1,56 @@
+#include <stdint.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#define __TEST__
+#include <timer.h>
+
+#define mftb() (stamp)
+#define sync()
+
+static uint64_t stamp, last;
+struct lock;
+static inline void lock(struct lock *l) { (void)l; }
+static inline void unlock(struct lock *l) { (void)l; }
+
+#include "../timer.c"
+
+#define NUM_TIMERS 100
+
+static struct timer timers[NUM_TIMERS];
+static unsigned int rand_shift, count;
+
+static void init_rand(void)
+{
+ unsigned long max = RAND_MAX;
+
+ /* Get something reasonably small */
+ while(max > 0x10000) {
+ rand_shift++;
+ max >>= 1;
+ }
+}
+
+static void expiry(struct timer *t, void *data)
+{
+ (void)data;
+ assert(t->target >= last);
+ count--;
+}
+
+int main(void)
+{
+ unsigned int i;
+
+ init_rand();
+ for (i = 0; i < NUM_TIMERS; i++) {
+ init_timer(&timers[i], expiry, NULL);
+ schedule_timer(&timers[i], random() >> rand_shift);
+ }
+ count = NUM_TIMERS;
+ while(count) {
+ check_timers();
+ stamp++;
+ }
+ return 0;
+}
diff --git a/core/timer.c b/core/timer.c
new file mode 100644
index 0000000..12ec6f5
--- /dev/null
+++ b/core/timer.c
@@ -0,0 +1,134 @@
+#include <timer.h>
+#include <timebase.h>
+#include <lock.h>
+
+#ifdef __TEST__
+#define this_cpu() ((void *)-1)
+#define cpu_relax()
+#else
+#include <cpu.h>
+#endif
+
+static struct lock timer_lock = LOCK_UNLOCKED;
+static LIST_HEAD(timer_list);
+
+void init_timer(struct timer *t, timer_func_t expiry, void *data)
+{
+ t->link.next = t->link.prev = NULL;
+ t->target = 0;
+ t->expiry = expiry;
+ t->user_data = data;
+ t->running = NULL;
+}
+
+static void __remove_timer(struct timer *t)
+{
+ list_del(&t->link);
+ t->link.next = t->link.prev = NULL;
+}
+
+static void __sync_timer(struct timer *t)
+{
+ sync();
+
+ /* Guard against re-entrancy */
+ assert(t->running != this_cpu());
+
+ while (t->running) {
+ unlock(&timer_lock);
+ cpu_relax();
+ /* Should we call the pollers here ? */
+ lock(&timer_lock);
+ }
+}
+
+void sync_timer(struct timer *t)
+{
+ lock(&timer_lock);
+ __sync_timer(t);
+ unlock(&timer_lock);
+}
+
+void cancel_timer(struct timer *t)
+{
+ lock(&timer_lock);
+ __sync_timer(t);
+ if (t->link.next)
+ __remove_timer(t);
+ unlock(&timer_lock);
+}
+
+void cancel_timer_async(struct timer *t)
+{
+ lock(&timer_lock);
+ if (t->link.next)
+ __remove_timer(t);
+ unlock(&timer_lock);
+}
+
+void schedule_timer_at(struct timer *t, uint64_t when)
+{
+ struct timer *lt;
+
+ lock(&timer_lock);
+ if (t->link.next)
+ __remove_timer(t);
+ t->target = when;
+ list_for_each(&timer_list, lt, link) {
+ if (when < lt->target) {
+ list_add_before(&timer_list, &t->link, &lt->link);
+ unlock(&timer_lock);
+ return;
+ }
+ }
+ list_add_tail(&timer_list, &t->link);
+ unlock(&timer_lock);
+}
+
+void schedule_timer(struct timer *t, uint64_t how_long)
+{
+ schedule_timer_at(t, mftb() + how_long);
+}
+
+void check_timers(void)
+{
+ struct timer *t;
+ uint64_t now = mftb();
+
+ /* Lockless "peek", a bit racy but shouldn't be a problem */
+ t = list_top(&timer_list, struct timer, link);
+ if (!t || t->target > now)
+ return;
+
+ /* Take lock and try again */
+ lock(&timer_lock);
+ for (;;) {
+ t = list_top(&timer_list, struct timer, link);
+ now = mftb();
+
+ /* Top of list not expired ? that's it ... */
+ if (!t || t->target > now)
+ break;
+
+ /* Top of list still running, we have to delay handling
+ * it. For now just skip until the next poll, when we have
+ * SLW interrupts, we'll probably want to trip another one
+ * ASAP
+ */
+ if (t->running)
+ break;
+
+ /* Allright, first remove it and mark it running */
+ __remove_timer(t);
+ t->running = this_cpu();
+
+ /* Now we can unlock and call it's expiry */
+ unlock(&timer_lock);
+ t->expiry(t, t->user_data);
+
+ /* Re-lock and mark not running */
+ lock(&timer_lock);
+ t->running = NULL;
+ }
+ unlock(&timer_lock);
+}