Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * MIPS-specific semaphore code. |
| 3 | * |
| 4 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
| 5 | * Copyright (C) 2004 Ralf Baechle <ralf@linux-mips.org> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or |
| 8 | * modify it under the terms of the GNU General Public License |
| 9 | * as published by the Free Software Foundation; either version |
| 10 | * 2 of the License, or (at your option) any later version. |
| 11 | * |
| 12 | * April 2001 - Reworked by Paul Mackerras <paulus@samba.org> |
| 13 | * to eliminate the SMP races in the old version between the updates |
| 14 | * of `count' and `waking'. Now we use negative `count' values to |
| 15 | * indicate that some process(es) are waiting for the semaphore. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/sched.h> |
| 20 | #include <linux/init.h> |
| 21 | #include <asm/atomic.h> |
| 22 | #include <asm/cpu-features.h> |
| 23 | #include <asm/errno.h> |
| 24 | #include <asm/semaphore.h> |
| 25 | #include <asm/war.h> |
| 26 | /* |
| 27 | * Atomically update sem->count. |
| 28 | * This does the equivalent of the following: |
| 29 | * |
| 30 | * old_count = sem->count; |
| 31 | * tmp = MAX(old_count, 0) + incr; |
| 32 | * sem->count = tmp; |
| 33 | * return old_count; |
| 34 | * |
| 35 | * On machines without lld/scd we need a spinlock to make the manipulation of |
| 36 | * sem->count and sem->waking atomic. Scalability isn't an issue because |
| 37 | * this lock is used on UP only so it's just an empty variable. |
| 38 | */ |
| 39 | static inline int __sem_update_count(struct semaphore *sem, int incr) |
| 40 | { |
| 41 | int old_count, tmp; |
| 42 | |
| 43 | if (cpu_has_llsc && R10000_LLSC_WAR) { |
| 44 | __asm__ __volatile__( |
Maciej W. Rozycki | c4559f6 | 2005-06-23 15:57:15 +0000 | [diff] [blame] | 45 | " .set mips3 \n" |
Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 46 | "1: ll %0, %2 # __sem_update_count \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | " sra %1, %0, 31 \n" |
| 48 | " not %1 \n" |
| 49 | " and %1, %0, %1 \n" |
Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 50 | " addu %1, %1, %3 \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | " sc %1, %2 \n" |
| 52 | " beqzl %1, 1b \n" |
Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 53 | " .set mips0 \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) |
| 55 | : "r" (incr), "m" (sem->count)); |
| 56 | } else if (cpu_has_llsc) { |
| 57 | __asm__ __volatile__( |
Maciej W. Rozycki | c4559f6 | 2005-06-23 15:57:15 +0000 | [diff] [blame] | 58 | " .set mips3 \n" |
Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 59 | "1: ll %0, %2 # __sem_update_count \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | " sra %1, %0, 31 \n" |
| 61 | " not %1 \n" |
| 62 | " and %1, %0, %1 \n" |
Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 63 | " addu %1, %1, %3 \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | " sc %1, %2 \n" |
| 65 | " beqz %1, 1b \n" |
Maciej W. Rozycki | aac8aa7 | 2005-06-14 17:35:03 +0000 | [diff] [blame] | 66 | " .set mips0 \n" |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count) |
| 68 | : "r" (incr), "m" (sem->count)); |
| 69 | } else { |
| 70 | static DEFINE_SPINLOCK(semaphore_lock); |
| 71 | unsigned long flags; |
| 72 | |
| 73 | spin_lock_irqsave(&semaphore_lock, flags); |
| 74 | old_count = atomic_read(&sem->count); |
| 75 | tmp = max_t(int, old_count, 0) + incr; |
| 76 | atomic_set(&sem->count, tmp); |
| 77 | spin_unlock_irqrestore(&semaphore_lock, flags); |
| 78 | } |
| 79 | |
| 80 | return old_count; |
| 81 | } |
| 82 | |
| 83 | void __up(struct semaphore *sem) |
| 84 | { |
| 85 | /* |
| 86 | * Note that we incremented count in up() before we came here, |
| 87 | * but that was ineffective since the result was <= 0, and |
| 88 | * any negative value of count is equivalent to 0. |
| 89 | * This ends up setting count to 1, unless count is now > 0 |
| 90 | * (i.e. because some other cpu has called up() in the meantime), |
| 91 | * in which case we just increment count. |
| 92 | */ |
| 93 | __sem_update_count(sem, 1); |
| 94 | wake_up(&sem->wait); |
| 95 | } |
| 96 | |
| 97 | EXPORT_SYMBOL(__up); |
| 98 | |
| 99 | /* |
| 100 | * Note that when we come in to __down or __down_interruptible, |
| 101 | * we have already decremented count, but that decrement was |
| 102 | * ineffective since the result was < 0, and any negative value |
| 103 | * of count is equivalent to 0. |
| 104 | * Thus it is only when we decrement count from some value > 0 |
| 105 | * that we have actually got the semaphore. |
| 106 | */ |
| 107 | void __sched __down(struct semaphore *sem) |
| 108 | { |
| 109 | struct task_struct *tsk = current; |
| 110 | DECLARE_WAITQUEUE(wait, tsk); |
| 111 | |
| 112 | __set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| 113 | add_wait_queue_exclusive(&sem->wait, &wait); |
| 114 | |
| 115 | /* |
| 116 | * Try to get the semaphore. If the count is > 0, then we've |
| 117 | * got the semaphore; we decrement count and exit the loop. |
| 118 | * If the count is 0 or negative, we set it to -1, indicating |
| 119 | * that we are asleep, and then sleep. |
| 120 | */ |
| 121 | while (__sem_update_count(sem, -1) <= 0) { |
| 122 | schedule(); |
| 123 | set_task_state(tsk, TASK_UNINTERRUPTIBLE); |
| 124 | } |
| 125 | remove_wait_queue(&sem->wait, &wait); |
| 126 | __set_task_state(tsk, TASK_RUNNING); |
| 127 | |
| 128 | /* |
| 129 | * If there are any more sleepers, wake one of them up so |
| 130 | * that it can either get the semaphore, or set count to -1 |
| 131 | * indicating that there are still processes sleeping. |
| 132 | */ |
| 133 | wake_up(&sem->wait); |
| 134 | } |
| 135 | |
| 136 | EXPORT_SYMBOL(__down); |
| 137 | |
| 138 | int __sched __down_interruptible(struct semaphore * sem) |
| 139 | { |
| 140 | int retval = 0; |
| 141 | struct task_struct *tsk = current; |
| 142 | DECLARE_WAITQUEUE(wait, tsk); |
| 143 | |
| 144 | __set_task_state(tsk, TASK_INTERRUPTIBLE); |
| 145 | add_wait_queue_exclusive(&sem->wait, &wait); |
| 146 | |
| 147 | while (__sem_update_count(sem, -1) <= 0) { |
| 148 | if (signal_pending(current)) { |
| 149 | /* |
| 150 | * A signal is pending - give up trying. |
| 151 | * Set sem->count to 0 if it is negative, |
| 152 | * since we are no longer sleeping. |
| 153 | */ |
| 154 | __sem_update_count(sem, 0); |
| 155 | retval = -EINTR; |
| 156 | break; |
| 157 | } |
| 158 | schedule(); |
| 159 | set_task_state(tsk, TASK_INTERRUPTIBLE); |
| 160 | } |
| 161 | remove_wait_queue(&sem->wait, &wait); |
| 162 | __set_task_state(tsk, TASK_RUNNING); |
| 163 | |
| 164 | wake_up(&sem->wait); |
| 165 | return retval; |
| 166 | } |
| 167 | |
| 168 | EXPORT_SYMBOL(__down_interruptible); |