aboutsummaryrefslogtreecommitdiff
path: root/include/lock.h
blob: 4b0b29d92c1031b64c043de371483808d2531726 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
/* Copyright 2013-2014 IBM Corp.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * 	http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
 * implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

#ifndef __LOCK_H
#define __LOCK_H

#include <stdbool.h>
#include <processor.h>

struct lock {
	/* Lock value has bit 63 as lock bit and the PIR of the owner
	 * in the top 32-bit
	 */
	uint64_t lock_val;

	/*
	 * Set to true if lock is involved in the console flush path
	 * in which case taking it will suspend console flushing
	 */
	bool in_con_path;
};

/* Initializer */
#define LOCK_UNLOCKED	{ .lock_val = 0, .in_con_path = 0 }

/* Note vs. libc and locking:
 *
 * The printf() family of
 * functions use stack based t buffers and call into skiboot
 * underlying read() and write() which use a console lock.
 *
 * The underlying FSP console code will thus operate within that
 * console lock.
 *
 * The libc does *NOT* lock stream buffer operations, so don't
 * try to scanf() from the same FILE from two different processors.
 *
 * FSP operations are locked using an FSP lock, so all processors
 * can safely call the FSP API
 *
 * Note about ordering:
 *
 * lock() is a full memory barrier. unlock() is a lwsync
 *
 */

extern bool bust_locks;

static inline void init_lock(struct lock *l)
{
	*l = (struct lock)LOCK_UNLOCKED;
}

#ifndef __TEST__
/*
 * Bare cmpxchg, no barriers.
 */
static inline uint32_t __cmpxchg32(uint32_t *mem, uint32_t old, uint32_t new)
{
	uint32_t prev;

	asm volatile(
		"# __cmpxchg32		\n"
		"1:	lwarx	%0,0,%2	\n"
		"	cmpw	%0,%3	\n"
		"	bne-	2f	\n"
		"	stwcx.	%4,0,%2	\n"
		"	bne-	1b	\n"
		"2:			\n"

		: "=&r"(prev), "+m"(*mem)
		: "r"(mem), "r"(old), "r"(new)
		: "cr0");

	return prev;
}

static inline uint64_t __cmpxchg64(uint64_t *mem, uint64_t old, uint64_t new)
{
	uint64_t prev;

	asm volatile(
		"# __cmpxchg64		\n"
		"1:	ldarx	%0,0,%2	\n"
		"	cmpd	%0,%3	\n"
		"	bne-	2f	\n"
		"	stdcx.	%4,0,%2	\n"
		"	bne-	1b	\n"
		"2:			\n"

		: "=&r"(prev), "+m"(*mem)
		: "r"(mem), "r"(old), "r"(new)
		: "cr0");

	return prev;
}

static inline uint32_t cmpxchg32(uint32_t *mem, uint32_t old, uint32_t new)
{
	uint32_t prev;

	sync();
	prev = __cmpxchg32(mem, old,new);
	sync();

	return prev;
}

#endif /* __TEST_ */

extern bool try_lock(struct lock *l);
extern void lock(struct lock *l);
extern void unlock(struct lock *l);

extern bool lock_held_by_me(struct lock *l);

/* The debug output can happen while the FSP lock, so we need some kind
 * of recursive lock support here. I don't want all locks to be recursive
 * though, thus the caller need to explicitly call lock_recursive which
 * returns false if the lock was already held by this cpu. If it returns
 * true, then the caller shall release it when done.
 */
extern bool lock_recursive(struct lock *l);

/* Called after per-cpu data structures are available */
extern void init_locks(void);

#endif /* __LOCK_H */