aboutsummaryrefslogtreecommitdiff
path: root/core/timer.c
blob: 12ec6f5767bdc4c56341f8608306d840b70c0df5 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
#include <timer.h>
#include <timebase.h>
#include <lock.h>

#ifdef __TEST__
#define this_cpu()	((void *)-1)
#define cpu_relax()
#else
#include <cpu.h>
#endif

static struct lock timer_lock = LOCK_UNLOCKED;
static LIST_HEAD(timer_list);

void init_timer(struct timer *t, timer_func_t expiry, void *data)
{
	t->link.next = t->link.prev = NULL;
	t->target = 0;
	t->expiry = expiry;
	t->user_data = data;
	t->running = NULL;
}

static void __remove_timer(struct timer *t)
{
	list_del(&t->link);
	t->link.next = t->link.prev = NULL;
}

static void __sync_timer(struct timer *t)
{
	sync();

	/* Guard against re-entrancy */
	assert(t->running != this_cpu());

	while (t->running) {
		unlock(&timer_lock);
		cpu_relax();
		/* Should we call the pollers here ? */
		lock(&timer_lock);
	}
}

void sync_timer(struct timer *t)
{
	lock(&timer_lock);
	__sync_timer(t);
	unlock(&timer_lock);
}

void cancel_timer(struct timer *t)
{
	lock(&timer_lock);
	__sync_timer(t);
	if (t->link.next)
		__remove_timer(t);
	unlock(&timer_lock);
}

void cancel_timer_async(struct timer *t)
{
	lock(&timer_lock);
	if (t->link.next)
		__remove_timer(t);
	unlock(&timer_lock);
}

void schedule_timer_at(struct timer *t, uint64_t when)
{
	struct timer *lt;

	lock(&timer_lock);
	if (t->link.next)
		__remove_timer(t);
	t->target = when;
	list_for_each(&timer_list, lt, link) {
		if (when < lt->target) {
			list_add_before(&timer_list, &t->link, &lt->link);
			unlock(&timer_lock);
			return;
		}
	}
	list_add_tail(&timer_list, &t->link);
	unlock(&timer_lock);
}

void schedule_timer(struct timer *t, uint64_t how_long)
{
	schedule_timer_at(t, mftb() + how_long);
}

void check_timers(void)
{
	struct timer *t;
	uint64_t now = mftb();

	/* Lockless "peek", a bit racy but shouldn't be a problem */
	t = list_top(&timer_list, struct timer, link);
	if (!t || t->target > now)
		return;

	/* Take lock and try again */
	lock(&timer_lock);
	for (;;) {
		t = list_top(&timer_list, struct timer, link);
		now = mftb();

		/* Top of list not expired ? that's it ... */
		if (!t || t->target > now)
			break;

		/* Top of list still running, we have to delay handling
		 * it. For now just skip until the next poll, when we have
		 * SLW interrupts, we'll probably want to trip another one
		 * ASAP
		 */
		if (t->running)
			break;

		/* Allright, first remove it and mark it running */
		__remove_timer(t);
		t->running = this_cpu();

		/* Now we can unlock and call it's expiry */
		unlock(&timer_lock);
		t->expiry(t, t->user_data);

		/* Re-lock and mark not running */
		lock(&timer_lock);
		t->running = NULL;
	}
	unlock(&timer_lock);
}