aboutsummaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
authorBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-07 10:02:32 +1100
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-11-07 15:54:22 +1100
commitffecdd630b4ff48c8d5b94da1c8044e0c927c6f2 (patch)
tree975b88e2f6b960416895356901975b3caa02db30 /include
parent9e29e3f29faad28e43b6e1b69f57d83935229f44 (diff)
downloadskiboot-ffecdd630b4ff48c8d5b94da1c8044e0c927c6f2.zip
skiboot-ffecdd630b4ff48c8d5b94da1c8044e0c927c6f2.tar.gz
skiboot-ffecdd630b4ff48c8d5b94da1c8044e0c927c6f2.tar.bz2
timer: Add scheduled timer facility
For now running off the event pollers, that will improve once we get delayed interrupts from the SLW Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'include')
-rw-r--r--include/timebase.h2
-rw-r--r--include/timer.h70
2 files changed, 72 insertions, 0 deletions
diff --git a/include/timebase.h b/include/timebase.h
index 4537256..156c296 100644
--- a/include/timebase.h
+++ b/include/timebase.h
@@ -24,6 +24,7 @@
#include <time.h>
+#ifndef __TEST__
static inline unsigned long mftb(void)
{
unsigned long tb;
@@ -34,6 +35,7 @@ static inline unsigned long mftb(void)
asm volatile("mftb %0" : "=r"(tb) : : "memory");
return tb;
}
+#endif
enum tb_cmpval {
TB_ABEFOREB = -1,
diff --git a/include/timer.h b/include/timer.h
new file mode 100644
index 0000000..8770a95
--- /dev/null
+++ b/include/timer.h
@@ -0,0 +1,70 @@
+#ifndef __TIMER_H
+#define __TIMER_H
+
+#include <stdint.h>
+#include <ccan/list/list.h>
+
+struct timer;
+
+typedef void (*timer_func_t)(struct timer *t, void *data);
+
+/* Structure exposed in order to be able to allocate it
+ * statically but otherwise, use accessors, don't access
+ * the fields directly
+ *
+ * WARNING: Do not free a timer object unless you have cancelled
+ * it first or you know it won't reschedule itself and have done
+ * a sync_timer() on it. The timer core *will* access the object
+ * again after you return from the expiry callback so it must not
+ * be freed from the callback itself.
+ */
+struct timer {
+ struct list_node link;
+ uint64_t target;
+ timer_func_t expiry;
+ void * user_data;
+ void * running;
+};
+
+extern void init_timer(struct timer *t, timer_func_t expiry, void *data);
+
+/* (re)schedule a timer. If already scheduled, it's expiry will be updated
+ *
+ * This doesn't synchronize so if the timer also reschedules itself there
+ * is no telling which one "wins". The advantage is that this can be called
+ * with any lock held or from the timer expiry itself.
+ */
+extern void schedule_timer(struct timer *t, uint64_t how_long);
+extern void schedule_timer_at(struct timer *t, uint64_t when);
+
+/* Synchronization point with the timer. If the callback has started before
+ * that function is called, it will be complete when this function returns.
+ *
+ * It might start *again* but at least anything before this will be visible
+ * to any subsequent occurrence.
+ *
+ * The usual issue of such sync functions exist: don't call it while holding
+ * a lock that the timer callback might take or from the timer expiry itself.
+ */
+extern void sync_timer(struct timer *t);
+
+/* cancel_timer() will ensure the timer isn't concurrently running so
+ * the cancellation is guaranteed even if the timer reschedules itself.
+ *
+ * This uses sync_timer() internally so don't call this while holding a
+ * lock the timer might use.
+ */
+extern void cancel_timer(struct timer *t);
+
+/* cancel_timer_async() allows to remove the timer from the schedule
+ * list without trying to synchronize. This is useful if the cancellation
+ * must happen while holding locks that would make the synchronization
+ * impossible. The user is responsible of ensuring it deals with potentially
+ * spurrious occurrences
+ */
+extern void cancel_timer_async(struct timer *t);
+
+/* Run the timers */
+extern void check_timers(void);
+
+#endif /* __TIMER_H */