blob: 120cfe4577c752bc44a417cdf2e4c1cef986f1c9 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001/* spinlock.h: 64-bit Sparc spinlock support.
2 *
3 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
4 */
5
6#ifndef __SPARC64_SPINLOCK_H
7#define __SPARC64_SPINLOCK_H
8
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07009#ifndef __ASSEMBLY__
10
11/* To get debugging spinlocks which detect and catch
12 * deadlock situations, set CONFIG_DEBUG_SPINLOCK
13 * and rebuild your kernel.
14 */
15
16/* All of these locking primitives are expected to work properly
17 * even in an RMO memory model, which currently is what the kernel
18 * runs in.
19 *
20 * There is another issue. Because we play games to save cycles
21 * in the non-contention case, we need to be extra careful about
22 * branch targets into the "spinning" code. They live in their
23 * own section, but the newer V9 branches have a shorter range
24 * than the traditional 32-bit sparc branch variants. The rule
25 * is that the branches that go into and out of the spinner sections
26 * must be pre-V9 branches.
27 */
28
29#define __raw_spin_is_locked(lp) ((lp)->lock != 0)
30
31#define __raw_spin_unlock_wait(lp) \
32 do { rmb(); \
33 } while((lp)->lock)
34
35static inline void __raw_spin_lock(raw_spinlock_t *lock)
36{
37 unsigned long tmp;
38
39 __asm__ __volatile__(
40"1: ldstub [%1], %0\n"
41" membar #StoreLoad | #StoreStore\n"
42" brnz,pn %0, 2f\n"
43" nop\n"
44" .subsection 2\n"
45"2: ldub [%1], %0\n"
46" membar #LoadLoad\n"
47" brnz,pt %0, 2b\n"
48" nop\n"
49" ba,a,pt %%xcc, 1b\n"
50" .previous"
51 : "=&r" (tmp)
52 : "r" (lock)
53 : "memory");
54}
55
56static inline int __raw_spin_trylock(raw_spinlock_t *lock)
57{
58 unsigned long result;
59
60 __asm__ __volatile__(
61" ldstub [%1], %0\n"
62" membar #StoreLoad | #StoreStore"
63 : "=r" (result)
64 : "r" (lock)
65 : "memory");
66
67 return (result == 0UL);
68}
69
70static inline void __raw_spin_unlock(raw_spinlock_t *lock)
71{
72 __asm__ __volatile__(
73" membar #StoreStore | #LoadStore\n"
74" stb %%g0, [%0]"
75 : /* No outputs */
76 : "r" (lock)
77 : "memory");
78}
79
80static inline void __raw_spin_lock_flags(raw_spinlock_t *lock, unsigned long flags)
81{
82 unsigned long tmp1, tmp2;
83
84 __asm__ __volatile__(
85"1: ldstub [%2], %0\n"
86" membar #StoreLoad | #StoreStore\n"
87" brnz,pn %0, 2f\n"
88" nop\n"
89" .subsection 2\n"
90"2: rdpr %%pil, %1\n"
91" wrpr %3, %%pil\n"
92"3: ldub [%2], %0\n"
93" membar #LoadLoad\n"
94" brnz,pt %0, 3b\n"
95" nop\n"
96" ba,pt %%xcc, 1b\n"
97" wrpr %1, %%pil\n"
98" .previous"
99 : "=&r" (tmp1), "=&r" (tmp2)
100 : "r"(lock), "r"(flags)
101 : "memory");
102}
103
104/* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
105
106static void inline __read_lock(raw_rwlock_t *lock)
107{
108 unsigned long tmp1, tmp2;
109
110 __asm__ __volatile__ (
111"1: ldsw [%2], %0\n"
112" brlz,pn %0, 2f\n"
113"4: add %0, 1, %1\n"
114" cas [%2], %0, %1\n"
115" cmp %0, %1\n"
116" membar #StoreLoad | #StoreStore\n"
117" bne,pn %%icc, 1b\n"
118" nop\n"
119" .subsection 2\n"
120"2: ldsw [%2], %0\n"
121" membar #LoadLoad\n"
122" brlz,pt %0, 2b\n"
123" nop\n"
124" ba,a,pt %%xcc, 4b\n"
125" .previous"
126 : "=&r" (tmp1), "=&r" (tmp2)
127 : "r" (lock)
128 : "memory");
129}
130
131static int inline __read_trylock(raw_rwlock_t *lock)
132{
133 int tmp1, tmp2;
134
135 __asm__ __volatile__ (
136"1: ldsw [%2], %0\n"
137" brlz,a,pn %0, 2f\n"
138" mov 0, %0\n"
139" add %0, 1, %1\n"
140" cas [%2], %0, %1\n"
141" cmp %0, %1\n"
142" membar #StoreLoad | #StoreStore\n"
143" bne,pn %%icc, 1b\n"
144" mov 1, %0\n"
145"2:"
146 : "=&r" (tmp1), "=&r" (tmp2)
147 : "r" (lock)
148 : "memory");
149
150 return tmp1;
151}
152
153static void inline __read_unlock(raw_rwlock_t *lock)
154{
155 unsigned long tmp1, tmp2;
156
157 __asm__ __volatile__(
158" membar #StoreLoad | #LoadLoad\n"
159"1: lduw [%2], %0\n"
160" sub %0, 1, %1\n"
161" cas [%2], %0, %1\n"
162" cmp %0, %1\n"
163" bne,pn %%xcc, 1b\n"
164" nop"
165 : "=&r" (tmp1), "=&r" (tmp2)
166 : "r" (lock)
167 : "memory");
168}
169
170static void inline __write_lock(raw_rwlock_t *lock)
171{
172 unsigned long mask, tmp1, tmp2;
173
174 mask = 0x80000000UL;
175
176 __asm__ __volatile__(
177"1: lduw [%2], %0\n"
178" brnz,pn %0, 2f\n"
179"4: or %0, %3, %1\n"
180" cas [%2], %0, %1\n"
181" cmp %0, %1\n"
182" membar #StoreLoad | #StoreStore\n"
183" bne,pn %%icc, 1b\n"
184" nop\n"
185" .subsection 2\n"
186"2: lduw [%2], %0\n"
187" membar #LoadLoad\n"
188" brnz,pt %0, 2b\n"
189" nop\n"
190" ba,a,pt %%xcc, 4b\n"
191" .previous"
192 : "=&r" (tmp1), "=&r" (tmp2)
193 : "r" (lock), "r" (mask)
194 : "memory");
195}
196
197static void inline __write_unlock(raw_rwlock_t *lock)
198{
199 __asm__ __volatile__(
200" membar #LoadStore | #StoreStore\n"
201" stw %%g0, [%0]"
202 : /* no outputs */
203 : "r" (lock)
204 : "memory");
205}
206
207static int inline __write_trylock(raw_rwlock_t *lock)
208{
209 unsigned long mask, tmp1, tmp2, result;
210
211 mask = 0x80000000UL;
212
213 __asm__ __volatile__(
214" mov 0, %2\n"
215"1: lduw [%3], %0\n"
216" brnz,pn %0, 2f\n"
217" or %0, %4, %1\n"
218" cas [%3], %0, %1\n"
219" cmp %0, %1\n"
220" membar #StoreLoad | #StoreStore\n"
221" bne,pn %%icc, 1b\n"
222" nop\n"
223" mov 1, %2\n"
224"2:"
225 : "=&r" (tmp1), "=&r" (tmp2), "=&r" (result)
226 : "r" (lock), "r" (mask)
227 : "memory");
228
229 return result;
230}
231
232#define __raw_read_lock(p) __read_lock(p)
233#define __raw_read_trylock(p) __read_trylock(p)
234#define __raw_read_unlock(p) __read_unlock(p)
235#define __raw_write_lock(p) __write_lock(p)
236#define __raw_write_unlock(p) __write_unlock(p)
237#define __raw_write_trylock(p) __write_trylock(p)
238
239#define __raw_read_can_lock(rw) (!((rw)->lock & 0x80000000UL))
240#define __raw_write_can_lock(rw) (!(rw)->lock)
241
242#define _raw_spin_relax(lock) cpu_relax()
243#define _raw_read_relax(lock) cpu_relax()
244#define _raw_write_relax(lock) cpu_relax()
245
246#endif /* !(__ASSEMBLY__) */
247
248#endif /* !(__SPARC64_SPINLOCK_H) */