Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SPINLOCK_H |
| 2 | #define __ASM_SPINLOCK_H |
| 3 | |
| 4 | #if __LINUX_ARM_ARCH__ < 6 |
| 5 | #error SMP not supported on pre-ARMv6 CPUs |
| 6 | #endif |
| 7 | |
| 8 | /* |
| 9 | * ARMv6 Spin-locking. |
| 10 | * |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 11 | * We exclusively read the old value. If it is zero, we may have |
| 12 | * won the lock, so we try exclusively storing it. A memory barrier |
| 13 | * is required after we get a lock, and before we release it, because |
| 14 | * V6 CPUs are assumed to have weakly ordered memory. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | * |
| 16 | * Unlocked value: 0 |
| 17 | * Locked value: 1 |
| 18 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 20 | #define __raw_spin_is_locked(x) ((x)->lock != 0) |
| 21 | #define __raw_spin_unlock_wait(lock) \ |
| 22 | do { while (__raw_spin_is_locked(lock)) cpu_relax(); } while (0) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 24 | #define __raw_spin_lock_flags(lock, flags) __raw_spin_lock(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 26 | static inline void __raw_spin_lock(raw_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | { |
| 28 | unsigned long tmp; |
| 29 | |
| 30 | __asm__ __volatile__( |
| 31 | "1: ldrex %0, [%1]\n" |
| 32 | " teq %0, #0\n" |
| 33 | " strexeq %0, %2, [%1]\n" |
| 34 | " teqeq %0, #0\n" |
| 35 | " bne 1b" |
| 36 | : "=&r" (tmp) |
| 37 | : "r" (&lock->lock), "r" (1) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 38 | : "cc"); |
| 39 | |
| 40 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | } |
| 42 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 43 | static inline int __raw_spin_trylock(raw_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | { |
| 45 | unsigned long tmp; |
| 46 | |
| 47 | __asm__ __volatile__( |
| 48 | " ldrex %0, [%1]\n" |
| 49 | " teq %0, #0\n" |
| 50 | " strexeq %0, %2, [%1]" |
| 51 | : "=&r" (tmp) |
| 52 | : "r" (&lock->lock), "r" (1) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 53 | : "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 55 | if (tmp == 0) { |
| 56 | smp_mb(); |
| 57 | return 1; |
| 58 | } else { |
| 59 | return 0; |
| 60 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | } |
| 62 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 63 | static inline void __raw_spin_unlock(raw_spinlock_t *lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | { |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 65 | smp_mb(); |
| 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | __asm__ __volatile__( |
| 68 | " str %1, [%0]" |
| 69 | : |
| 70 | : "r" (&lock->lock), "r" (0) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 71 | : "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | /* |
| 75 | * RWLOCKS |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 76 | * |
| 77 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 78 | * Write locks are easy - we just set bit 31. When unlocking, we can |
| 79 | * just write zero since the lock is exclusively held. |
| 80 | */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 81 | #define rwlock_is_locked(x) (*((volatile unsigned int *)(x)) != 0) |
| 82 | |
| 83 | static inline void __raw_write_lock(rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 84 | { |
| 85 | unsigned long tmp; |
| 86 | |
| 87 | __asm__ __volatile__( |
| 88 | "1: ldrex %0, [%1]\n" |
| 89 | " teq %0, #0\n" |
| 90 | " strexeq %0, %2, [%1]\n" |
| 91 | " teq %0, #0\n" |
| 92 | " bne 1b" |
| 93 | : "=&r" (tmp) |
| 94 | : "r" (&rw->lock), "r" (0x80000000) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 95 | : "cc"); |
| 96 | |
| 97 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 98 | } |
| 99 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 100 | static inline int __raw_write_trylock(rwlock_t *rw) |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 101 | { |
| 102 | unsigned long tmp; |
| 103 | |
| 104 | __asm__ __volatile__( |
| 105 | "1: ldrex %0, [%1]\n" |
| 106 | " teq %0, #0\n" |
| 107 | " strexeq %0, %2, [%1]" |
| 108 | : "=&r" (tmp) |
| 109 | : "r" (&rw->lock), "r" (0x80000000) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 110 | : "cc"); |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 111 | |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 112 | if (tmp == 0) { |
| 113 | smp_mb(); |
| 114 | return 1; |
| 115 | } else { |
| 116 | return 0; |
| 117 | } |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 118 | } |
| 119 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 120 | static inline void __raw_write_unlock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | { |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 122 | smp_mb(); |
| 123 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 124 | __asm__ __volatile__( |
| 125 | "str %1, [%0]" |
| 126 | : |
| 127 | : "r" (&rw->lock), "r" (0) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 128 | : "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | } |
| 130 | |
| 131 | /* |
| 132 | * Read locks are a bit more hairy: |
| 133 | * - Exclusively load the lock value. |
| 134 | * - Increment it. |
| 135 | * - Store new lock value if positive, and we still own this location. |
| 136 | * If the value is negative, we've already failed. |
| 137 | * - If we failed to store the value, we want a negative result. |
| 138 | * - If we failed, try again. |
| 139 | * Unlocking is similarly hairy. We may have multiple read locks |
| 140 | * currently active. However, we know we won't have any write |
| 141 | * locks. |
| 142 | */ |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 143 | static inline void __raw_read_lock(raw_rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | { |
| 145 | unsigned long tmp, tmp2; |
| 146 | |
| 147 | __asm__ __volatile__( |
| 148 | "1: ldrex %0, [%2]\n" |
| 149 | " adds %0, %0, #1\n" |
| 150 | " strexpl %1, %0, [%2]\n" |
| 151 | " rsbpls %0, %1, #0\n" |
| 152 | " bmi 1b" |
| 153 | : "=&r" (tmp), "=&r" (tmp2) |
| 154 | : "r" (&rw->lock) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 155 | : "cc"); |
| 156 | |
| 157 | smp_mb(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | } |
| 159 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 160 | static inline void __raw_read_unlock(rwlock_t *rw) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | { |
Russell King | 4e8fd22 | 2005-07-24 12:13:40 +0100 | [diff] [blame] | 162 | unsigned long tmp, tmp2; |
| 163 | |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 164 | smp_mb(); |
| 165 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | __asm__ __volatile__( |
| 167 | "1: ldrex %0, [%2]\n" |
| 168 | " sub %0, %0, #1\n" |
| 169 | " strex %1, %0, [%2]\n" |
| 170 | " teq %1, #0\n" |
| 171 | " bne 1b" |
| 172 | : "=&r" (tmp), "=&r" (tmp2) |
| 173 | : "r" (&rw->lock) |
Russell King | 6d9b37a | 2005-07-26 19:44:26 +0100 | [diff] [blame] | 174 | : "cc"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | } |
| 176 | |
Ingo Molnar | fb1c8f9 | 2005-09-10 00:25:56 -0700 | [diff] [blame] | 177 | #define __raw_read_trylock(lock) generic__raw_read_trylock(lock) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | #endif /* __ASM_SPINLOCK_H */ |