aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-12 15:17:03 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-12 16:41:25 +1100
commiteec6e53ff88c0cb82f3624642390793cc0f8e3ce (patch)
treeb149be31c96ef55103116c5530da640bc689d2c6
parent2e47b392d69e17f5614ee7796710a69aa273a8c1 (diff)
downloadskiboot-eec6e53ff88c0cb82f3624642390793cc0f8e3ce.zip
skiboot-eec6e53ff88c0cb82f3624642390793cc0f8e3ce.tar.gz
skiboot-eec6e53ff88c0cb82f3624642390793cc0f8e3ce.tar.bz2
timer: Add "polling" timers
These have no expiry and get called whenever the opal pollers run, they are intended to replace most opal pollers and allow the same code in drivers to chose between a poller or a timer based on things like interrupt availability for example. The other advantage over existing pollers (which I hope to deprecate) is that they are protected against re-entrancy (while still running without locks held). Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
-rw-r--r--core/init.c4
-rw-r--r--core/timer.c142
-rw-r--r--include/timer.h18
3 files changed, 147 insertions, 17 deletions
diff --git a/core/init.c b/core/init.c
index f50e5ec..1d32be5 100644
--- a/core/init.c
+++ b/core/init.c
@@ -43,6 +43,7 @@
#include <centaur.h>
#include <libfdt/libfdt.h>
#include <hostservices.h>
+#include <timer.h>
/*
* Boot semaphore, incremented by each CPU calling in
@@ -645,6 +646,9 @@ void __noreturn main_cpu_entry(const void *fdt, u32 master_cpu)
/* Initialize PCI */
pci_init_slots();
+ /* Add OPAL timer related properties */
+ late_init_timers();
+
/*
* These last few things must be done as late as possible
* because they rely on various other things having been setup,
diff --git a/core/timer.c b/core/timer.c
index 12ec6f5..a4fa8b8 100644
--- a/core/timer.c
+++ b/core/timer.c
@@ -1,6 +1,9 @@
#include <timer.h>
#include <timebase.h>
#include <lock.h>
+#include <fsp.h>
+#include <device.h>
+#include <opal.h>
#ifdef __TEST__
#define this_cpu() ((void *)-1)
@@ -11,6 +14,9 @@
static struct lock timer_lock = LOCK_UNLOCKED;
static LIST_HEAD(timer_list);
+static LIST_HEAD(timer_poll_list);
+static bool timer_in_poll;
+static uint64_t timer_poll_gen;
void init_timer(struct timer *t, timer_func_t expiry, void *data)
{
@@ -74,37 +80,94 @@ void schedule_timer_at(struct timer *t, uint64_t when)
if (t->link.next)
__remove_timer(t);
t->target = when;
- list_for_each(&timer_list, lt, link) {
- if (when < lt->target) {
- list_add_before(&timer_list, &t->link, &lt->link);
- unlock(&timer_lock);
- return;
+ if (when == TIMER_POLL) {
+ t->gen = timer_poll_gen;
+ list_add_tail(&timer_poll_list, &t->link);
+ } else {
+ list_for_each(&timer_list, lt, link) {
+ if (when < lt->target) {
+ list_add_before(&timer_list, &t->link,
+ &lt->link);
+ unlock(&timer_lock);
+ return;
+ }
}
+ list_add_tail(&timer_list, &t->link);
}
- list_add_tail(&timer_list, &t->link);
unlock(&timer_lock);
}
-void schedule_timer(struct timer *t, uint64_t how_long)
+uint64_t schedule_timer(struct timer *t, uint64_t how_long)
{
- schedule_timer_at(t, mftb() + how_long);
+ uint64_t now = mftb();
+
+ if (how_long == TIMER_POLL)
+ schedule_timer_at(t, TIMER_POLL);
+ else
+ schedule_timer_at(t, now + how_long);
+
+ return now;
}
-void check_timers(void)
+static void __check_poll_timers(void)
{
struct timer *t;
- uint64_t now = mftb();
- /* Lockless "peek", a bit racy but shouldn't be a problem */
- t = list_top(&timer_list, struct timer, link);
- if (!t || t->target > now)
+ /* Don't call this from multiple CPUs at once */
+ if (timer_in_poll)
return;
+ timer_in_poll = true;
+
+ /*
+ * Poll timers might re-enqueue themselves and don't have an
+ * expiry so we can't do like normal timers and just run until
+ * we hit a wall. Instead, each timer has a generation count,
+ * which we set to the current global gen count when we schedule
+ * it and update when we run it. It will only be considered if
+ * the generation count is different than the current one. We
+ * don't try to compare generations being larger or smaller
+ * because at boot, this can be called quite quickly and I want
+ * to be safe vs. wraps.
+ */
+ timer_poll_gen++;
+ for (;;) {
+ t = list_top(&timer_poll_list, struct timer, link);
+
+ /* Top timer has a different generation than current ? Must
+ * be older
+ */
+ if (!t || t->gen == timer_poll_gen)
+ break;
+
+ /* Top of list still running, we have to delay handling
+ * it. For now just skip until the next poll, when we have
+ * SLW interrupts, we'll probably want to trip another one
+ * ASAP
+ */
+ if (t->running)
+ break;
+
+ /* Allright, first remove it and mark it running */
+ __remove_timer(t);
+ t->running = this_cpu();
+
+ /* Now we can unlock and call it's expiry */
+ unlock(&timer_lock);
+ t->expiry(t, t->user_data);
+
+ /* Re-lock and mark not running */
+ lock(&timer_lock);
+ t->running = NULL;
+ }
+ timer_in_poll = false;
+}
+
+static void __check_timers(uint64_t now)
+{
+ struct timer *t;
- /* Take lock and try again */
- lock(&timer_lock);
for (;;) {
t = list_top(&timer_list, struct timer, link);
- now = mftb();
/* Top of list not expired ? that's it ... */
if (!t || t->target > now)
@@ -129,6 +192,53 @@ void check_timers(void)
/* Re-lock and mark not running */
lock(&timer_lock);
t->running = NULL;
+
+ /* Update time stamp */
+ now = mftb();
}
+}
+
+void check_timers(void)
+{
+ struct timer *t;
+ uint64_t now = mftb();
+
+ /* This is the polling variant, the SLW interrupt path, when it
+ * exists, will use a slight variant of this that doesn't call
+ * the pollers
+ */
+
+ /* Lockless "peek", a bit racy but shouldn't be a problem */
+ t = list_top(&timer_list, struct timer, link);
+ if (list_empty(&timer_poll_list) && (!t || t->target > now))
+ return;
+
+ /* Take lock and try again */
+ lock(&timer_lock);
+ __check_poll_timers();
+ __check_timers(now);
unlock(&timer_lock);
}
+
+#ifndef __TEST__
+void late_init_timers(void)
+{
+ /* Add a property requesting the OS to call opal_poll_event() at
+ * a specified interval in order for us to run our background
+ * low priority poller.
+ *
+ * When we have a working SLW based HW timer, we'll be able to
+ * reduce this or even remove it, for now however, we want to be
+ * called at least every couple of seconds on FSP based machines
+ * and a bit faster on BMC based machines where the LPC and i2c
+ * interrupts might not be functional.
+ *
+ * We use a value in milliseconds, we don't want this to ever be
+ * faster than that.
+ */
+ if (fsp_present())
+ dt_add_property_cells(opal_node, "ibm,heartbeat-freq", 2000);
+ else
+ dt_add_property_cells(opal_node, "ibm,heartbeat-freq", 250);
+}
+#endif
diff --git a/include/timer.h b/include/timer.h
index 8770a95..1796508 100644
--- a/include/timer.h
+++ b/include/timer.h
@@ -24,6 +24,7 @@ struct timer {
timer_func_t expiry;
void * user_data;
void * running;
+ uint64_t gen;
};
extern void init_timer(struct timer *t, timer_func_t expiry, void *data);
@@ -33,8 +34,20 @@ extern void init_timer(struct timer *t, timer_func_t expiry, void *data);
* This doesn't synchronize so if the timer also reschedules itself there
* is no telling which one "wins". The advantage is that this can be called
* with any lock held or from the timer expiry itself.
+ *
+ * We support a magic expiry of TIMER_POLL which causes a given timer to
+ * be called whenever OPAL main polling loop is run, which is often during
+ * boot and occasionally while Linux is up. This can be used with both
+ * schedule_timer() and schedule_timer_at()
+ *
+ * This is useful for a number of interrupt driven drivers to have a way
+ * to crank their state machine at times when the interrupt isn't available
+ * such as during early boot.
+ *
+ * Note: For convenience, schedule_timer() returns the current TB value
*/
-extern void schedule_timer(struct timer *t, uint64_t how_long);
+#define TIMER_POLL ((uint64_t)-1)
+extern uint64_t schedule_timer(struct timer *t, uint64_t how_long);
extern void schedule_timer_at(struct timer *t, uint64_t when);
/* Synchronization point with the timer. If the callback has started before
@@ -67,4 +80,7 @@ extern void cancel_timer_async(struct timer *t);
/* Run the timers */
extern void check_timers(void);
+/* Core init */
+void late_init_timers(void);
+
#endif /* __TIMER_H */