FRV: Add support for emulation of userspace atomic ops [try #2]

Use traps 120-126 to emulate atomic cmpxchg32, xchg32, and XOR-, OR-, AND-, SUB-
and ADD-to-memory operations for userspace.

Signed-off-by: David Howells <dhowells@redhat.com>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
diff --git a/arch/frv/kernel/traps.c b/arch/frv/kernel/traps.c
index 2e6098c..2f7e668 100644
--- a/arch/frv/kernel/traps.c
+++ b/arch/frv/kernel/traps.c
@@ -102,6 +102,233 @@
 
 /*****************************************************************************/
 /*
+ * handle atomic operations with errors
+ * - arguments in gr8, gr9, gr10
+ * - original memory value placed in gr5
+ * - replacement memory value placed in gr9
+ */
+asmlinkage void atomic_operation(unsigned long esfr1, unsigned long epcr0,
+				 unsigned long esr0)
+{
+	static DEFINE_SPINLOCK(atomic_op_lock);
+	unsigned long x, y, z, *p;
+	mm_segment_t oldfs;
+	siginfo_t info;
+	int ret;
+
+	y = 0;
+	z = 0;
+
+	oldfs = get_fs();
+	if (!user_mode(__frame))
+		set_fs(KERNEL_DS);
+
+	switch (__frame->tbr & TBR_TT) {
+		/* TIRA gr0,#120
+		 * u32 __atomic_user_cmpxchg32(u32 *ptr, u32 test, u32 new)
+		 */
+	case TBR_TT_ATOMIC_CMPXCHG32:
+		p = (unsigned long *) __frame->gr8;
+		x = __frame->gr9;
+		y = __frame->gr10;
+
+		for (;;) {
+			ret = get_user(z, p);
+			if (ret < 0)
+				goto error;
+
+			if (z != x)
+				goto done;
+
+			spin_lock_irq(&atomic_op_lock);
+
+			if (__get_user(z, p) == 0) {
+				if (z != x)
+					goto done2;
+
+				if (__put_user(y, p) == 0)
+					goto done2;
+				goto error2;
+			}
+
+			spin_unlock_irq(&atomic_op_lock);
+		}
+
+		/* TIRA gr0,#121
+		 * u32 __atomic_kernel_xchg32(void *v, u32 new)
+		 */
+	case TBR_TT_ATOMIC_XCHG32:
+		p = (unsigned long *) __frame->gr8;
+		y = __frame->gr9;
+
+		for (;;) {
+			ret = get_user(z, p);
+			if (ret < 0)
+				goto error;
+
+			spin_lock_irq(&atomic_op_lock);
+
+			if (__get_user(z, p) == 0) {
+				if (__put_user(y, p) == 0)
+					goto done2;
+				goto error2;
+			}
+
+			spin_unlock_irq(&atomic_op_lock);
+		}
+
+		/* TIRA gr0,#122
+		 * ulong __atomic_kernel_XOR_return(ulong i, ulong *v)
+		 */
+	case TBR_TT_ATOMIC_XOR:
+		p = (unsigned long *) __frame->gr8;
+		x = __frame->gr9;
+
+		for (;;) {
+			ret = get_user(z, p);
+			if (ret < 0)
+				goto error;
+
+			spin_lock_irq(&atomic_op_lock);
+
+			if (__get_user(z, p) == 0) {
+				y = x ^ z;
+				if (__put_user(y, p) == 0)
+					goto done2;
+				goto error2;
+			}
+
+			spin_unlock_irq(&atomic_op_lock);
+		}
+
+		/* TIRA gr0,#123
+		 * ulong __atomic_kernel_OR_return(ulong i, ulong *v)
+		 */
+	case TBR_TT_ATOMIC_OR:
+		p = (unsigned long *) __frame->gr8;
+		x = __frame->gr9;
+
+		for (;;) {
+			ret = get_user(z, p);
+			if (ret < 0)
+				goto error;
+
+			spin_lock_irq(&atomic_op_lock);
+
+			if (__get_user(z, p) == 0) {
+				y = x ^ z;
+				if (__put_user(y, p) == 0)
+					goto done2;
+				goto error2;
+			}
+
+			spin_unlock_irq(&atomic_op_lock);
+		}
+
+		/* TIRA gr0,#124
+		 * ulong __atomic_kernel_AND_return(ulong i, ulong *v)
+		 */
+	case TBR_TT_ATOMIC_AND:
+		p = (unsigned long *) __frame->gr8;
+		x = __frame->gr9;
+
+		for (;;) {
+			ret = get_user(z, p);
+			if (ret < 0)
+				goto error;
+
+			spin_lock_irq(&atomic_op_lock);
+
+			if (__get_user(z, p) == 0) {
+				y = x & z;
+				if (__put_user(y, p) == 0)
+					goto done2;
+				goto error2;
+			}
+
+			spin_unlock_irq(&atomic_op_lock);
+		}
+
+		/* TIRA gr0,#125
+		 * int __atomic_user_sub_return(atomic_t *v, int i)
+		 */
+	case TBR_TT_ATOMIC_SUB:
+		p = (unsigned long *) __frame->gr8;
+		x = __frame->gr9;
+
+		for (;;) {
+			ret = get_user(z, p);
+			if (ret < 0)
+				goto error;
+
+			spin_lock_irq(&atomic_op_lock);
+
+			if (__get_user(z, p) == 0) {
+				y = z - x;
+				if (__put_user(y, p) == 0)
+					goto done2;
+				goto error2;
+			}
+
+			spin_unlock_irq(&atomic_op_lock);
+		}
+
+		/* TIRA gr0,#126
+		 * int __atomic_user_add_return(atomic_t *v, int i)
+		 */
+	case TBR_TT_ATOMIC_ADD:
+		p = (unsigned long *) __frame->gr8;
+		x = __frame->gr9;
+
+		for (;;) {
+			ret = get_user(z, p);
+			if (ret < 0)
+				goto error;
+
+			spin_lock_irq(&atomic_op_lock);
+
+			if (__get_user(z, p) == 0) {
+				y = z + x;
+				if (__put_user(y, p) == 0)
+					goto done2;
+				goto error2;
+			}
+
+			spin_unlock_irq(&atomic_op_lock);
+		}
+
+	default:
+		BUG();
+	}
+
+done2:
+	spin_unlock_irq(&atomic_op_lock);
+done:
+	if (!user_mode(__frame))
+		set_fs(oldfs);
+	__frame->gr5 = z;
+	__frame->gr9 = y;
+	return;
+
+error2:
+	spin_unlock_irq(&atomic_op_lock);
+error:
+	if (!user_mode(__frame))
+		set_fs(oldfs);
+	__frame->pc -= 4;
+
+	die_if_kernel("-- Atomic Op Error --\n");
+
+	info.si_signo	= SIGSEGV;
+	info.si_code	= SEGV_ACCERR;
+	info.si_errno	= 0;
+	info.si_addr	= (void *) __frame->pc;
+
+	force_sig_info(info.si_signo, &info, current);
+}
+
+/*****************************************************************************/
+/*
  *
  */
 asmlinkage void media_exception(unsigned long msr0, unsigned long msr1)