aboutsummaryrefslogtreecommitdiff
path: root/core/timer.c
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-12 15:17:03 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-12 16:41:25 +1100
commiteec6e53ff88c0cb82f3624642390793cc0f8e3ce (patch)
treeb149be31c96ef55103116c5530da640bc689d2c6 /core/timer.c
parent2e47b392d69e17f5614ee7796710a69aa273a8c1 (diff)
downloadskiboot-eec6e53ff88c0cb82f3624642390793cc0f8e3ce.zip
skiboot-eec6e53ff88c0cb82f3624642390793cc0f8e3ce.tar.gz
skiboot-eec6e53ff88c0cb82f3624642390793cc0f8e3ce.tar.bz2
timer: Add "polling" timers
These have no expiry and get called whenever the opal pollers run, they are intended to replace most opal pollers and allow the same code in drivers to chose between a poller or a timer based on things like interrupt availability for example. The other advantage over existing pollers (which I hope to deprecate) is that they are protected against re-entrancy (while still running without locks held). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'core/timer.c')
-rw-r--r--core/timer.c142
1 files changed, 126 insertions, 16 deletions
diff --git a/core/timer.c b/core/timer.c
index 12ec6f5..a4fa8b8 100644
--- a/core/timer.c
+++ b/core/timer.c
@@ -1,6 +1,9 @@
#include <timer.h>
#include <timebase.h>
#include <lock.h>
+#include <fsp.h>
+#include <device.h>
+#include <opal.h>
#ifdef __TEST__
#define this_cpu() ((void *)-1)
@@ -11,6 +14,9 @@
static struct lock timer_lock = LOCK_UNLOCKED;
static LIST_HEAD(timer_list);
+static LIST_HEAD(timer_poll_list);
+static bool timer_in_poll;
+static uint64_t timer_poll_gen;
void init_timer(struct timer *t, timer_func_t expiry, void *data)
{
@@ -74,37 +80,94 @@ void schedule_timer_at(struct timer *t, uint64_t when)
if (t->link.next)
__remove_timer(t);
t->target = when;
- list_for_each(&timer_list, lt, link) {
- if (when < lt->target) {
- list_add_before(&timer_list, &t->link, &lt->link);
- unlock(&timer_lock);
- return;
+ if (when == TIMER_POLL) {
+ t->gen = timer_poll_gen;
+ list_add_tail(&timer_poll_list, &t->link);
+ } else {
+ list_for_each(&timer_list, lt, link) {
+ if (when < lt->target) {
+ list_add_before(&timer_list, &t->link,
+ &lt->link);
+ unlock(&timer_lock);
+ return;
+ }
}
+ list_add_tail(&timer_list, &t->link);
}
- list_add_tail(&timer_list, &t->link);
unlock(&timer_lock);
}
-void schedule_timer(struct timer *t, uint64_t how_long)
+uint64_t schedule_timer(struct timer *t, uint64_t how_long)
{
- schedule_timer_at(t, mftb() + how_long);
+ uint64_t now = mftb();
+
+ if (how_long == TIMER_POLL)
+ schedule_timer_at(t, TIMER_POLL);
+ else
+ schedule_timer_at(t, now + how_long);
+
+ return now;
}
-void check_timers(void)
+static void __check_poll_timers(void)
{
struct timer *t;
- uint64_t now = mftb();
- /* Lockless "peek", a bit racy but shouldn't be a problem */
- t = list_top(&timer_list, struct timer, link);
- if (!t || t->target > now)
+ /* Don't call this from multiple CPUs at once */
+ if (timer_in_poll)
return;
+ timer_in_poll = true;
+
+ /*
+ * Poll timers might re-enqueue themselves and don't have an
+ * expiry so we can't do like normal timers and just run until
+ * we hit a wall. Instead, each timer has a generation count,
+ * which we set to the current global gen count when we schedule
+ * it and update when we run it. It will only be considered if
+ * the generation count is different than the current one. We
+ * don't try to compare generations being larger or smaller
+ * because at boot, this can be called quite quickly and I want
+ * to be safe vs. wraps.
+ */
+ timer_poll_gen++;
+ for (;;) {
+ t = list_top(&timer_poll_list, struct timer, link);
+
+ /* Top timer has a different generation than current ? Must
+ * be older
+ */
+ if (!t || t->gen == timer_poll_gen)
+ break;
+
+ /* Top of list still running, we have to delay handling
+ * it. For now just skip until the next poll, when we have
+ * SLW interrupts, we'll probably want to trip another one
+ * ASAP
+ */
+ if (t->running)
+ break;
+
+ /* Allright, first remove it and mark it running */
+ __remove_timer(t);
+ t->running = this_cpu();
+
+ /* Now we can unlock and call it's expiry */
+ unlock(&timer_lock);
+ t->expiry(t, t->user_data);
+
+ /* Re-lock and mark not running */
+ lock(&timer_lock);
+ t->running = NULL;
+ }
+ timer_in_poll = false;
+}
+
+static void __check_timers(uint64_t now)
+{
+ struct timer *t;
- /* Take lock and try again */
- lock(&timer_lock);
for (;;) {
t = list_top(&timer_list, struct timer, link);
- now = mftb();
/* Top of list not expired ? that's it ... */
if (!t || t->target > now)
@@ -129,6 +192,53 @@ void check_timers(void)
/* Re-lock and mark not running */
lock(&timer_lock);
t->running = NULL;
+
+ /* Update time stamp */
+ now = mftb();
}
+}
+
+void check_timers(void)
+{
+ struct timer *t;
+ uint64_t now = mftb();
+
+ /* This is the polling variant, the SLW interrupt path, when it
+ * exists, will use a slight variant of this that doesn't call
+ * the pollers
+ */
+
+ /* Lockless "peek", a bit racy but shouldn't be a problem */
+ t = list_top(&timer_list, struct timer, link);
+ if (list_empty(&timer_poll_list) && (!t || t->target > now))
+ return;
+
+ /* Take lock and try again */
+ lock(&timer_lock);
+ __check_poll_timers();
+ __check_timers(now);
unlock(&timer_lock);
}
+
+#ifndef __TEST__
+void late_init_timers(void)
+{
+ /* Add a property requesting the OS to call opal_poll_event() at
+ * a specified interval in order for us to run our background
+ * low priority poller.
+ *
+ * When we have a working SLW based HW timer, we'll be able to
+ * reduce this or even remove it, for now however, we want to be
+ * called at least every couple of seconds on FSP based machines
+ * and a bit faster on BMC based machines where the LPC and i2c
+ * interrupts might not be functional.
+ *
+ * We use a value in milliseconds, we don't want this to ever be
+ * faster than that.
+ */
+ if (fsp_present())
+ dt_add_property_cells(opal_node, "ibm,heartbeat-freq", 2000);
+ else
+ dt_add_property_cells(opal_node, "ibm,heartbeat-freq", 250);
+}
+#endif