Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Pull in the generic implementation for the mutex fastpath. |
| 3 | * |
| 4 | * TODO: implement optimized primitives instead, or leave the generic |
| 5 | * implementation in place, or pick the atomic_xchg() based generic |
| 6 | * implementation. (see asm-generic/mutex-xchg.h for details) |
| 7 | */ |
| 8 | |
Graf Yang | 6b3087c | 2009-01-07 23:14:39 +0800 | [diff] [blame] | 9 | #ifndef _ASM_MUTEX_H |
| 10 | #define _ASM_MUTEX_H |
| 11 | |
| 12 | #ifndef CONFIG_SMP |
Bryan Wu | 1394f03 | 2007-05-06 14:50:22 -0700 | [diff] [blame] | 13 | #include <asm-generic/mutex-dec.h> |
Graf Yang | 6b3087c | 2009-01-07 23:14:39 +0800 | [diff] [blame] | 14 | #else |
| 15 | |
| 16 | static inline void |
| 17 | __mutex_fastpath_lock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
| 18 | { |
| 19 | if (unlikely(atomic_dec_return(count) < 0)) |
| 20 | fail_fn(count); |
| 21 | else |
| 22 | smp_mb(); |
| 23 | } |
| 24 | |
| 25 | static inline int |
| 26 | __mutex_fastpath_lock_retval(atomic_t *count, int (*fail_fn)(atomic_t *)) |
| 27 | { |
| 28 | if (unlikely(atomic_dec_return(count) < 0)) |
| 29 | return fail_fn(count); |
| 30 | else { |
| 31 | smp_mb(); |
| 32 | return 0; |
| 33 | } |
| 34 | } |
| 35 | |
| 36 | static inline void |
| 37 | __mutex_fastpath_unlock(atomic_t *count, void (*fail_fn)(atomic_t *)) |
| 38 | { |
| 39 | smp_mb(); |
| 40 | if (unlikely(atomic_inc_return(count) <= 0)) |
| 41 | fail_fn(count); |
| 42 | } |
| 43 | |
| 44 | #define __mutex_slowpath_needs_to_unlock() 1 |
| 45 | |
| 46 | static inline int |
| 47 | __mutex_fastpath_trylock(atomic_t *count, int (*fail_fn)(atomic_t *)) |
| 48 | { |
| 49 | /* |
| 50 | * We have two variants here. The cmpxchg based one is the best one |
| 51 | * because it never induce a false contention state. It is included |
| 52 | * here because architectures using the inc/dec algorithms over the |
| 53 | * xchg ones are much more likely to support cmpxchg natively. |
| 54 | * |
| 55 | * If not we fall back to the spinlock based variant - that is |
| 56 | * just as efficient (and simpler) as a 'destructive' probing of |
| 57 | * the mutex state would be. |
| 58 | */ |
| 59 | #ifdef __HAVE_ARCH_CMPXCHG |
| 60 | if (likely(atomic_cmpxchg(count, 1, 0) == 1)) { |
| 61 | smp_mb(); |
| 62 | return 1; |
| 63 | } |
| 64 | return 0; |
| 65 | #else |
| 66 | return fail_fn(count); |
| 67 | #endif |
| 68 | } |
| 69 | |
| 70 | #endif |
| 71 | |
| 72 | #endif |