H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_SPINLOCK_H |
| 2 | #define _ASM_X86_SPINLOCK_H |
Glauber de Oliveira Costa | 2fed0c5 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 3 | |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 4 | #include <asm/atomic.h> |
| 5 | #include <asm/rwlock.h> |
| 6 | #include <asm/page.h> |
| 7 | #include <asm/processor.h> |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 8 | #include <linux/compiler.h> |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 9 | #include <asm/paravirt.h> |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 10 | /* |
| 11 | * Your basic SMP spinlocks, allowing only a single CPU anywhere |
| 12 | * |
| 13 | * Simple spin lock operations. There are two variants, one clears IRQ's |
| 14 | * on the local processor, one does not. |
| 15 | * |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 16 | * These are fair FIFO ticket locks, which are currently limited to 256 |
| 17 | * CPUs. |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 18 | * |
| 19 | * (the type definitions are in asm/spinlock_types.h) |
| 20 | */ |
| 21 | |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 22 | #ifdef CONFIG_X86_32 |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 23 | # define LOCK_PTR_REG "a" |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 24 | # define REG_PTR_MODE "k" |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 25 | #else |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 26 | # define LOCK_PTR_REG "D" |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 27 | # define REG_PTR_MODE "q" |
Thomas Gleixner | 96a388d | 2007-10-11 11:20:03 +0200 | [diff] [blame] | 28 | #endif |
Glauber de Oliveira Costa | 2fed0c5 | 2008-01-30 13:30:33 +0100 | [diff] [blame] | 29 | |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 30 | #if defined(CONFIG_X86_32) && \ |
| 31 | (defined(CONFIG_X86_OOSTORE) || defined(CONFIG_X86_PPRO_FENCE)) |
| 32 | /* |
| 33 | * On PPro SMP or if we are using OOSTORE, we use a locked operation to unlock |
| 34 | * (PPro errata 66, 92) |
| 35 | */ |
| 36 | # define UNLOCK_LOCK_PREFIX LOCK_PREFIX |
| 37 | #else |
| 38 | # define UNLOCK_LOCK_PREFIX |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 39 | #endif |
| 40 | |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 41 | /* |
| 42 | * Ticket locks are conceptually two parts, one indicating the current head of |
| 43 | * the queue, and the other indicating the current tail. The lock is acquired |
| 44 | * by atomically noting the tail and incrementing it by one (thus adding |
| 45 | * ourself to the queue and noting our position), then waiting until the head |
| 46 | * becomes equal to the the initial value of the tail. |
| 47 | * |
| 48 | * We use an xadd covering *both* parts of the lock, to increment the tail and |
| 49 | * also load the position of the head, which takes care of memory ordering |
| 50 | * issues and should be optimal for the uncontended case. Note the tail must be |
| 51 | * in the high part, because a wide xadd increment of the low part would carry |
| 52 | * up and contaminate the high part. |
| 53 | * |
| 54 | * With fewer than 2^8 possible CPUs, we can use x86's partial registers to |
| 55 | * save some instructions and make the code more elegant. There really isn't |
| 56 | * much between them in performance though, especially as locks are out of line. |
| 57 | */ |
| 58 | #if (NR_CPUS < 256) |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 59 | #define TICKET_SHIFT 8 |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 60 | |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 61 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 62 | { |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 63 | short inc = 0x0100; |
| 64 | |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 65 | asm volatile ( |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 66 | LOCK_PREFIX "xaddw %w0, %1\n" |
| 67 | "1:\t" |
| 68 | "cmpb %h0, %b0\n\t" |
| 69 | "je 2f\n\t" |
| 70 | "rep ; nop\n\t" |
| 71 | "movb %1, %b0\n\t" |
| 72 | /* don't need lfence here, because loads are in-order */ |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 73 | "jmp 1b\n" |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 74 | "2:" |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 75 | : "+Q" (inc), "+m" (lock->slock) |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 76 | : |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 77 | : "memory", "cc"); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 78 | } |
| 79 | |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 80 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 81 | { |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 82 | int tmp, new; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 83 | |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 84 | asm volatile("movzwl %2, %0\n\t" |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 85 | "cmpb %h0,%b0\n\t" |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 86 | "leal 0x100(%" REG_PTR_MODE "0), %1\n\t" |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 87 | "jne 1f\n\t" |
Mathieu Desnoyers | 5bbd4c3 | 2008-08-15 12:56:59 -0400 | [diff] [blame] | 88 | LOCK_PREFIX "cmpxchgw %w1,%2\n\t" |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 89 | "1:" |
| 90 | "sete %b1\n\t" |
| 91 | "movzbl %b1,%0\n\t" |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 92 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 93 | : |
| 94 | : "memory", "cc"); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 95 | |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 96 | return tmp; |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 97 | } |
| 98 | |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 99 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 100 | { |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 101 | asm volatile(UNLOCK_LOCK_PREFIX "incb %0" |
| 102 | : "+m" (lock->slock) |
| 103 | : |
| 104 | : "memory", "cc"); |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 105 | } |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 106 | #else |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 107 | #define TICKET_SHIFT 16 |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 108 | |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 109 | static __always_inline void __ticket_spin_lock(raw_spinlock_t *lock) |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 110 | { |
| 111 | int inc = 0x00010000; |
| 112 | int tmp; |
| 113 | |
Mathieu Desnoyers | 5bbd4c3 | 2008-08-15 12:56:59 -0400 | [diff] [blame] | 114 | asm volatile(LOCK_PREFIX "xaddl %0, %1\n" |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 115 | "movzwl %w0, %2\n\t" |
| 116 | "shrl $16, %0\n\t" |
| 117 | "1:\t" |
| 118 | "cmpl %0, %2\n\t" |
| 119 | "je 2f\n\t" |
| 120 | "rep ; nop\n\t" |
| 121 | "movzwl %1, %2\n\t" |
| 122 | /* don't need lfence here, because loads are in-order */ |
| 123 | "jmp 1b\n" |
| 124 | "2:" |
Jan Beulich | ef1f341 | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 125 | : "+r" (inc), "+m" (lock->slock), "=&r" (tmp) |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 126 | : |
| 127 | : "memory", "cc"); |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 128 | } |
| 129 | |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 130 | static __always_inline int __ticket_spin_trylock(raw_spinlock_t *lock) |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 131 | { |
| 132 | int tmp; |
| 133 | int new; |
| 134 | |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 135 | asm volatile("movl %2,%0\n\t" |
| 136 | "movl %0,%1\n\t" |
| 137 | "roll $16, %0\n\t" |
| 138 | "cmpl %0,%1\n\t" |
Jan Beulich | 74e9160 | 2008-09-05 13:27:45 +0100 | [diff] [blame] | 139 | "leal 0x00010000(%" REG_PTR_MODE "0), %1\n\t" |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 140 | "jne 1f\n\t" |
Mathieu Desnoyers | 5bbd4c3 | 2008-08-15 12:56:59 -0400 | [diff] [blame] | 141 | LOCK_PREFIX "cmpxchgl %1,%2\n\t" |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 142 | "1:" |
| 143 | "sete %b1\n\t" |
| 144 | "movzbl %b1,%0\n\t" |
Jan Beulich | ef1f341 | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 145 | : "=&a" (tmp), "=&q" (new), "+m" (lock->slock) |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 146 | : |
| 147 | : "memory", "cc"); |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 148 | |
| 149 | return tmp; |
| 150 | } |
| 151 | |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 152 | static __always_inline void __ticket_spin_unlock(raw_spinlock_t *lock) |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 153 | { |
Joe Perches | d3bf60a | 2008-03-23 01:03:31 -0700 | [diff] [blame] | 154 | asm volatile(UNLOCK_LOCK_PREFIX "incw %0" |
| 155 | : "+m" (lock->slock) |
| 156 | : |
| 157 | : "memory", "cc"); |
Nick Piggin | 3a556b2 | 2008-01-30 13:33:00 +0100 | [diff] [blame] | 158 | } |
| 159 | #endif |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 160 | |
Jan Beulich | 08f5fcb | 2008-09-05 13:26:39 +0100 | [diff] [blame] | 161 | static inline int __ticket_spin_is_locked(raw_spinlock_t *lock) |
| 162 | { |
| 163 | int tmp = ACCESS_ONCE(lock->slock); |
| 164 | |
| 165 | return !!(((tmp >> TICKET_SHIFT) ^ tmp) & ((1 << TICKET_SHIFT) - 1)); |
| 166 | } |
| 167 | |
| 168 | static inline int __ticket_spin_is_contended(raw_spinlock_t *lock) |
| 169 | { |
| 170 | int tmp = ACCESS_ONCE(lock->slock); |
| 171 | |
| 172 | return (((tmp >> TICKET_SHIFT) - tmp) & ((1 << TICKET_SHIFT) - 1)) > 1; |
| 173 | } |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 174 | |
Jeremy Fitzhardinge | 8efcbab | 2008-07-07 12:07:51 -0700 | [diff] [blame] | 175 | #ifdef CONFIG_PARAVIRT |
| 176 | /* |
| 177 | * Define virtualization-friendly old-style lock byte lock, for use in |
| 178 | * pv_lock_ops if desired. |
| 179 | * |
| 180 | * This differs from the pre-2.6.24 spinlock by always using xchgb |
| 181 | * rather than decb to take the lock; this allows it to use a |
| 182 | * zero-initialized lock structure. It also maintains a 1-byte |
| 183 | * contention counter, so that we can implement |
| 184 | * __byte_spin_is_contended. |
| 185 | */ |
| 186 | struct __byte_spinlock { |
| 187 | s8 lock; |
| 188 | s8 spinners; |
| 189 | }; |
| 190 | |
| 191 | static inline int __byte_spin_is_locked(raw_spinlock_t *lock) |
| 192 | { |
| 193 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; |
| 194 | return bl->lock != 0; |
| 195 | } |
| 196 | |
| 197 | static inline int __byte_spin_is_contended(raw_spinlock_t *lock) |
| 198 | { |
| 199 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; |
| 200 | return bl->spinners != 0; |
| 201 | } |
| 202 | |
| 203 | static inline void __byte_spin_lock(raw_spinlock_t *lock) |
| 204 | { |
| 205 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; |
| 206 | s8 val = 1; |
| 207 | |
| 208 | asm("1: xchgb %1, %0\n" |
| 209 | " test %1,%1\n" |
| 210 | " jz 3f\n" |
| 211 | " " LOCK_PREFIX "incb %2\n" |
| 212 | "2: rep;nop\n" |
| 213 | " cmpb $1, %0\n" |
| 214 | " je 2b\n" |
| 215 | " " LOCK_PREFIX "decb %2\n" |
| 216 | " jmp 1b\n" |
| 217 | "3:" |
| 218 | : "+m" (bl->lock), "+q" (val), "+m" (bl->spinners): : "memory"); |
| 219 | } |
| 220 | |
| 221 | static inline int __byte_spin_trylock(raw_spinlock_t *lock) |
| 222 | { |
| 223 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; |
| 224 | u8 old = 1; |
| 225 | |
| 226 | asm("xchgb %1,%0" |
| 227 | : "+m" (bl->lock), "+q" (old) : : "memory"); |
| 228 | |
| 229 | return old == 0; |
| 230 | } |
| 231 | |
| 232 | static inline void __byte_spin_unlock(raw_spinlock_t *lock) |
| 233 | { |
| 234 | struct __byte_spinlock *bl = (struct __byte_spinlock *)lock; |
| 235 | smp_wmb(); |
| 236 | bl->lock = 0; |
| 237 | } |
| 238 | #else /* !CONFIG_PARAVIRT */ |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 239 | static inline int __raw_spin_is_locked(raw_spinlock_t *lock) |
| 240 | { |
| 241 | return __ticket_spin_is_locked(lock); |
| 242 | } |
| 243 | |
| 244 | static inline int __raw_spin_is_contended(raw_spinlock_t *lock) |
| 245 | { |
| 246 | return __ticket_spin_is_contended(lock); |
| 247 | } |
| 248 | |
| 249 | static __always_inline void __raw_spin_lock(raw_spinlock_t *lock) |
| 250 | { |
| 251 | __ticket_spin_lock(lock); |
| 252 | } |
| 253 | |
| 254 | static __always_inline int __raw_spin_trylock(raw_spinlock_t *lock) |
| 255 | { |
| 256 | return __ticket_spin_trylock(lock); |
| 257 | } |
| 258 | |
| 259 | static __always_inline void __raw_spin_unlock(raw_spinlock_t *lock) |
| 260 | { |
| 261 | __ticket_spin_unlock(lock); |
| 262 | } |
Jeremy Fitzhardinge | 63d3a75 | 2008-08-19 13:19:36 -0700 | [diff] [blame] | 263 | |
| 264 | static __always_inline void __raw_spin_lock_flags(raw_spinlock_t *lock, |
| 265 | unsigned long flags) |
| 266 | { |
| 267 | __raw_spin_lock(lock); |
| 268 | } |
| 269 | |
Jeremy Fitzhardinge | 74d4aff | 2008-07-07 12:07:50 -0700 | [diff] [blame] | 270 | #endif /* CONFIG_PARAVIRT */ |
| 271 | |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 272 | static inline void __raw_spin_unlock_wait(raw_spinlock_t *lock) |
| 273 | { |
| 274 | while (__raw_spin_is_locked(lock)) |
| 275 | cpu_relax(); |
| 276 | } |
| 277 | |
| 278 | /* |
| 279 | * Read-write spinlocks, allowing multiple readers |
| 280 | * but only one writer. |
| 281 | * |
| 282 | * NOTE! it is quite common to have readers in interrupts |
| 283 | * but no interrupt writers. For those circumstances we |
| 284 | * can "mix" irq-safe locks - any writer needs to get a |
| 285 | * irq-safe write-lock, but readers can get non-irqsafe |
| 286 | * read-locks. |
| 287 | * |
| 288 | * On x86, we implement read-write locks as a 32-bit counter |
| 289 | * with the high bit (sign) being the "contended" bit. |
| 290 | */ |
| 291 | |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 292 | /** |
| 293 | * read_can_lock - would read_trylock() succeed? |
| 294 | * @lock: the rwlock in question. |
| 295 | */ |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 296 | static inline int __raw_read_can_lock(raw_rwlock_t *lock) |
| 297 | { |
| 298 | return (int)(lock)->lock > 0; |
| 299 | } |
| 300 | |
Nick Piggin | 314cdbe | 2008-01-30 13:31:21 +0100 | [diff] [blame] | 301 | /** |
| 302 | * write_can_lock - would write_trylock() succeed? |
| 303 | * @lock: the rwlock in question. |
| 304 | */ |
Thomas Gleixner | 1075cf7 | 2008-01-30 13:30:34 +0100 | [diff] [blame] | 305 | static inline int __raw_write_can_lock(raw_rwlock_t *lock) |
| 306 | { |
| 307 | return (lock)->lock == RW_LOCK_BIAS; |
| 308 | } |
| 309 | |
| 310 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
| 311 | { |
| 312 | asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" |
| 313 | "jns 1f\n" |
| 314 | "call __read_lock_failed\n\t" |
| 315 | "1:\n" |
| 316 | ::LOCK_PTR_REG (rw) : "memory"); |
| 317 | } |
| 318 | |
| 319 | static inline void __raw_write_lock(raw_rwlock_t *rw) |
| 320 | { |
| 321 | asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" |
| 322 | "jz 1f\n" |
| 323 | "call __write_lock_failed\n\t" |
| 324 | "1:\n" |
| 325 | ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); |
| 326 | } |
| 327 | |
| 328 | static inline int __raw_read_trylock(raw_rwlock_t *lock) |
| 329 | { |
| 330 | atomic_t *count = (atomic_t *)lock; |
| 331 | |
| 332 | atomic_dec(count); |
| 333 | if (atomic_read(count) >= 0) |
| 334 | return 1; |
| 335 | atomic_inc(count); |
| 336 | return 0; |
| 337 | } |
| 338 | |
| 339 | static inline int __raw_write_trylock(raw_rwlock_t *lock) |
| 340 | { |
| 341 | atomic_t *count = (atomic_t *)lock; |
| 342 | |
| 343 | if (atomic_sub_and_test(RW_LOCK_BIAS, count)) |
| 344 | return 1; |
| 345 | atomic_add(RW_LOCK_BIAS, count); |
| 346 | return 0; |
| 347 | } |
| 348 | |
| 349 | static inline void __raw_read_unlock(raw_rwlock_t *rw) |
| 350 | { |
| 351 | asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); |
| 352 | } |
| 353 | |
| 354 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
| 355 | { |
| 356 | asm volatile(LOCK_PREFIX "addl %1, %0" |
| 357 | : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); |
| 358 | } |
| 359 | |
| 360 | #define _raw_spin_relax(lock) cpu_relax() |
| 361 | #define _raw_read_relax(lock) cpu_relax() |
| 362 | #define _raw_write_relax(lock) cpu_relax() |
| 363 | |
H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 364 | #endif /* _ASM_X86_SPINLOCK_H */ |