[MIPS] MT: Improved multithreading support.
    
Signed-off-by: Ralf Baechle <ralf@linux-mips.org>
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index a7bac04..f9be549 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -1447,6 +1447,10 @@
 	prompt "MIPS MT options"
 	depends on MIPS_MT
 
+config MIPS_MT_SMTC
+	bool "SMTC: Use all TCs on all VPEs for SMP"
+	select SMP
+
 config MIPS_MT_SMP
 	bool "Use 1 TC on each available VPE for SMP"
 	select SMP
@@ -1613,7 +1617,7 @@
 
 config SMP
 	bool "Multi-Processing support"
-	depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP
+	depends on CPU_RM9000 || ((SIBYTE_BCM1x80 || SIBYTE_BCM1x55 || SIBYTE_SB1250 || QEMU) && !SIBYTE_STANDALONE) || SGI_IP27 || MIPS_MT_SMP || MIPS_MT_SMTC
 	---help---
 	  This enables support for systems with more than one CPU. If you have
 	  a system with only one CPU, like most personal computers, say N. If
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index 9ec01de..34e8a25 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -34,7 +34,9 @@
 
 obj-$(CONFIG_SMP)		+= smp.o
 
-obj-$(CONFIG_MIPS_MT_SMP)	+= smp_mt.o
+obj-$(CONFIG_MIPS_MT)		+= mips-mt.o
+obj-$(CONFIG_MIPS_MT_SMTC)	+= smtc.o smtc-asm.o smtc-proc.o
+obj-$(CONFIG_MIPS_MT_SMP)	+= smp-mt.o
 
 obj-$(CONFIG_MIPS_APSP_KSPD)	+= kspd.o
 obj-$(CONFIG_MIPS_VPE_LOADER)	+= vpe.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index ca6b03c..92b28b6 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -69,6 +69,9 @@
 	offset("#define PT_BVADDR ", struct pt_regs, cp0_badvaddr);
 	offset("#define PT_STATUS ", struct pt_regs, cp0_status);
 	offset("#define PT_CAUSE  ", struct pt_regs, cp0_cause);
+#ifdef CONFIG_MIPS_MT_SMTC
+	offset("#define PT_TCSTATUS  ", struct pt_regs, cp0_tcstatus);
+#endif /* CONFIG_MIPS_MT_SMTC */
 	size("#define PT_SIZE   ", struct pt_regs);
 	linefeed;
 }
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index b1939a4..d101d2f 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -17,6 +17,9 @@
 #include <asm/isadep.h>
 #include <asm/thread_info.h>
 #include <asm/war.h>
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+#endif
 
 #ifdef CONFIG_PREEMPT
 	.macro	preempt_stop
@@ -75,6 +78,37 @@
 	bnez	t0, syscall_exit_work
 
 FEXPORT(restore_all)			# restore full frame
+#ifdef CONFIG_MIPS_MT_SMTC
+/* Detect and execute deferred IPI "interrupts" */
+	move	a0,sp
+	jal	deferred_smtc_ipi
+/* Re-arm any temporarily masked interrupts not explicitly "acked" */
+	mfc0	v0, CP0_TCSTATUS
+	ori	v1, v0, TCSTATUS_IXMT
+	mtc0	v1, CP0_TCSTATUS
+	andi	v0, TCSTATUS_IXMT
+	ehb
+	mfc0	t0, CP0_TCCONTEXT
+	DMT	9				# dmt t1
+	jal	mips_ihb
+	mfc0	t2, CP0_STATUS
+	andi	t3, t0, 0xff00
+	or	t2, t2, t3
+	mtc0	t2, CP0_STATUS
+	ehb
+	andi	t1, t1, VPECONTROL_TE
+	beqz	t1, 1f
+	EMT
+1:
+	mfc0	v1, CP0_TCSTATUS
+	/* We set IXMT above, XOR should cler it here */
+	xori	v1, v1, TCSTATUS_IXMT
+	or	v1, v0, v1
+	mtc0	v1, CP0_TCSTATUS
+	ehb
+	xor	t0, t0, t3
+	mtc0	t0, CP0_TCCONTEXT
+#endif /* CONFIG_MIPS_MT_SMTC */
 	.set	noat
 	RESTORE_TEMP
 	RESTORE_AT
diff --git a/arch/mips/kernel/gdb-low.S b/arch/mips/kernel/gdb-low.S
index 235ad9f..10f28fb 100644
--- a/arch/mips/kernel/gdb-low.S
+++ b/arch/mips/kernel/gdb-low.S
@@ -283,11 +283,33 @@
  */
 
 3:
+#ifdef CONFIG_MIPS_MT_SMTC
+		/* Read-modify write of Status must be atomic */
+		mfc0	t2, CP0_TCSTATUS
+		ori	t1, t2, TCSTATUS_IXMT
+		mtc0	t1, CP0_TCSTATUS
+		andi	t2, t2, TCSTATUS_IXMT
+		ehb
+		DMT	9				# dmt	t1
+		jal	mips_ihb
+		nop
+#endif /* CONFIG_MIPS_MT_SMTC */
 		mfc0	t0, CP0_STATUS
 		ori	t0, 0x1f
 		xori	t0, 0x1f
 		mtc0	t0, CP0_STATUS
-
+#ifdef CONFIG_MIPS_MT_SMTC
+        	andi    t1, t1, VPECONTROL_TE
+        	beqz    t1, 9f
+		nop
+        	EMT					# emt
+9:
+		mfc0	t1, CP0_TCSTATUS
+		xori	t1, t1, TCSTATUS_IXMT
+		or	t1, t1, t2
+		mtc0	t1, CP0_TCSTATUS
+		ehb
+#endif /* CONFIG_MIPS_MT_SMTC */
 		LONG_L	v0, GDB_FR_STATUS(sp)
 		LONG_L	v1, GDB_FR_EPC(sp)
 		mtc0	v0, CP0_STATUS
diff --git a/arch/mips/kernel/gdb-stub.c b/arch/mips/kernel/gdb-stub.c
index d4f88e0..6ecbdc1 100644
--- a/arch/mips/kernel/gdb-stub.c
+++ b/arch/mips/kernel/gdb-stub.c
@@ -140,6 +140,7 @@
 #include <asm/system.h>
 #include <asm/gdb-stub.h>
 #include <asm/inst.h>
+#include <asm/smp.h>
 
 /*
  * external low-level support routines
@@ -669,6 +670,64 @@
 	local_irq_restore(flags);
 }
 
+/*
+ * GDB stub needs to call kgdb_wait on all processor with interrupts
+ * disabled, so it uses it's own special variant.
+ */
+static int kgdb_smp_call_kgdb_wait(void)
+{
+#ifdef CONFIG_SMP
+	struct call_data_struct data;
+	int i, cpus = num_online_cpus() - 1;
+	int cpu = smp_processor_id();
+
+	/*
+	 * Can die spectacularly if this CPU isn't yet marked online
+	 */
+	BUG_ON(!cpu_online(cpu));
+
+	if (!cpus)
+		return 0;
+
+	if (spin_is_locked(&smp_call_lock)) {
+		/*
+		 * Some other processor is trying to make us do something
+		 * but we're not going to respond... give up
+		 */
+		return -1;
+		}
+
+	/*
+	 * We will continue here, accepting the fact that
+	 * the kernel may deadlock if another CPU attempts
+	 * to call smp_call_function now...
+	 */
+
+	data.func = kgdb_wait;
+	data.info = NULL;
+	atomic_set(&data.started, 0);
+	data.wait = 0;
+
+	spin_lock(&smp_call_lock);
+	call_data = &data;
+	mb();
+
+	/* Send a message to all other CPUs and wait for them to respond */
+	for (i = 0; i < NR_CPUS; i++)
+		if (cpu_online(i) && i != cpu)
+			core_send_ipi(i, SMP_CALL_FUNCTION);
+
+	/* Wait for response */
+	/* FIXME: lock-up detection, backtrace on lock-up */
+	while (atomic_read(&data.started) != cpus)
+		barrier();
+
+	call_data = NULL;
+	spin_unlock(&smp_call_lock);
+#endif
+
+	return 0;
+}
 
 /*
  * This function does all command processing for interfacing to gdb.  It
@@ -718,7 +777,7 @@
 	/*
 	 * force other cpus to enter kgdb
 	 */
-	smp_call_function(kgdb_wait, NULL, 0, 0);
+	kgdb_smp_call_kgdb_wait();
 
 	/*
 	 * If we're in breakpoint() increment the PC
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index 04418b6..ff7af36 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -12,6 +12,7 @@
 #include <linux/init.h>
 
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/cacheops.h>
 #include <asm/regdef.h>
 #include <asm/fpregdef.h>
@@ -171,6 +172,15 @@
 	SAVE_AT
 	.set	push
 	.set	noreorder
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * To keep from blindly blocking *all* interrupts
+	 * during service by SMTC kernel, we also want to
+	 * pass the IM value to be cleared.
+	 */
+EXPORT(except_vec_vi_mori)
+	ori	a0, $0, 0
+#endif /* CONFIG_MIPS_MT_SMTC */
 EXPORT(except_vec_vi_lui)
 	lui	v0, 0		/* Patched */
 	j	except_vec_vi_handler
@@ -187,6 +197,25 @@
 NESTED(except_vec_vi_handler, 0, sp)
 	SAVE_TEMP
 	SAVE_STATIC
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC has an interesting problem that interrupts are level-triggered,
+	 * and the CLI macro will clear EXL, potentially causing a duplicate
+	 * interrupt service invocation. So we need to clear the associated
+	 * IM bit of Status prior to doing CLI, and restore it after the
+	 * service routine has been invoked - we must assume that the
+	 * service routine will have cleared the state, and any active
+	 * level represents a new or otherwised unserviced event...
+	 */
+	mfc0	t1, CP0_STATUS
+	and	t0, a0, t1
+	mfc0	t2, CP0_TCCONTEXT
+	or	t0, t0, t2
+	mtc0	t0, CP0_TCCONTEXT
+	xor	t1, t1, t0
+	mtc0	t1, CP0_STATUS
+	ehb
+#endif /* CONFIG_MIPS_MT_SMTC */
 	CLI
 	move	a0, sp
 	jalr	v0
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 2e9122a..bdf6f6e 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -18,6 +18,7 @@
 #include <linux/threads.h>
 
 #include <asm/asm.h>
+#include <asm/asmmacro.h>
 #include <asm/regdef.h>
 #include <asm/page.h>
 #include <asm/mipsregs.h>
@@ -82,12 +83,33 @@
 	 */
 	.macro	setup_c0_status set clr
 	.set	push
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * For SMTC, we need to set privilege and disable interrupts only for
+	 * the current TC, using the TCStatus register.
+	 */
+	mfc0	t0, CP0_TCSTATUS
+	/* Fortunately CU 0 is in the same place in both registers */
+	/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
+	li	t1, ST0_CU0 | 0x08001c00
+	or	t0, t1
+	/* Clear TKSU, leave IXMT */
+	xori	t0, 0x00001800
+	mtc0	t0, CP0_TCSTATUS
+	ehb
+	/* We need to leave the global IE bit set, but clear EXL...*/
+	mfc0	t0, CP0_STATUS
+	or	t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
+	xor	t0, ST0_EXL | ST0_ERL | \clr
+	mtc0	t0, CP0_STATUS
+#else
 	mfc0	t0, CP0_STATUS
 	or	t0, ST0_CU0|\set|0x1f|\clr
 	xor	t0, 0x1f|\clr
 	mtc0	t0, CP0_STATUS
 	.set	noreorder
 	sll	zero,3				# ehb
+#endif
 	.set	pop
 	.endm
 
@@ -134,6 +156,24 @@
 
 	ARC64_TWIDDLE_PC
 
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * In SMTC kernel, "CLI" is thread-specific, in TCStatus.
+	 * We still need to enable interrupts globally in Status,
+	 * and clear EXL/ERL.
+	 *
+	 * TCContext is used to track interrupt levels under
+	 * service in SMTC kernel. Clear for boot TC before
+	 * allowing any interrupts.
+	 */
+	mtc0	zero, CP0_TCCONTEXT
+
+	mfc0	t0, CP0_STATUS
+	ori	t0, t0, 0xff1f
+	xori	t0, t0, 0x001e
+	mtc0	t0, CP0_STATUS
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 	PTR_LA		t0, __bss_start		# clear .bss
 	LONG_S		zero, (t0)
 	PTR_LA		t1, __bss_stop - LONGSIZE
@@ -166,8 +206,25 @@
  * function after setting up the stack and gp registers.
  */
 NESTED(smp_bootstrap, 16, sp)
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * Read-modify-writes of Status must be atomic, and this
+	 * is one case where CLI is invoked without EXL being
+	 * necessarily set. The CLI and setup_c0_status will
+	 * in fact be redundant for all but the first TC of
+	 * each VPE being booted.
+	 */
+	DMT	10	# dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
+	jal	mips_ihb
+#endif /* CONFIG_MIPS_MT_SMTC */
 	setup_c0_status_sec
 	smp_slave_setup
+#ifdef CONFIG_MIPS_MT_SMTC
+	andi	t2, t2, VPECONTROL_TE
+	beqz	t2, 2f
+	EMT		# emt
+2:
+#endif /* CONFIG_MIPS_MT_SMTC */
 	j	start_secondary
 	END(smp_bootstrap)
 #endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/i8259.c b/arch/mips/kernel/i8259.c
index b974ac9..2125ba5 100644
--- a/arch/mips/kernel/i8259.c
+++ b/arch/mips/kernel/i8259.c
@@ -187,6 +187,10 @@
 		outb(cached_21,0x21);
 		outb(0x60+irq,0x20);	/* 'Specific EOI' to master */
 	}
+#ifdef CONFIG_MIPS_MT_SMTC
+        if (irq_hwmask[irq] & ST0_IM)
+        	set_c0_status(irq_hwmask[irq] & ST0_IM);
+#endif /* CONFIG_MIPS_MT_SMTC */
 	spin_unlock_irqrestore(&i8259A_lock, flags);
 	return;
 
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
index 3f653c7..97ebdc7 100644
--- a/arch/mips/kernel/irq-msc01.c
+++ b/arch/mips/kernel/irq-msc01.c
@@ -76,6 +76,11 @@
 	mask_msc_irq(irq);
 	if (!cpu_has_veic)
 		MSCIC_WRITE(MSC01_IC_EOI, 0);
+#ifdef CONFIG_MIPS_MT_SMTC
+	/* This actually needs to be a call into platform code */
+	if (irq_hwmask[irq] & ST0_IM)
+		set_c0_status(irq_hwmask[irq] & ST0_IM);
+#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /*
@@ -92,6 +97,10 @@
 		MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
 		MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
 	}
+#ifdef CONFIG_MIPS_MT_SMTC
+	if (irq_hwmask[irq] & ST0_IM)
+		set_c0_status(irq_hwmask[irq] & ST0_IM);
+#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /*
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index e0efc4f..3dce742 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -38,6 +38,15 @@
 
 atomic_t irq_err_count;
 
+#ifdef CONFIG_MIPS_MT_SMTC
+/*
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
+ */
+unsigned long irq_hwmask[NR_IRQS];
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 #undef do_IRQ
 
 /*
@@ -49,6 +58,7 @@
 {
 	irq_enter();
 
+	__DO_IRQ_SMTC_HOOK();
 	__do_IRQ(irq, regs);
 
 	irq_exit();
@@ -129,6 +139,9 @@
 		irq_desc[i].depth   = 1;
 		irq_desc[i].handler = &no_irq_type;
 		spin_lock_init(&irq_desc[i].lock);
+#ifdef CONFIG_MIPS_MT_SMTC
+		irq_hwmask[i] = 0;
+#endif /* CONFIG_MIPS_MT_SMTC */
 	}
 
 	arch_init_irq();
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
new file mode 100644
index 0000000..02237a6
--- /dev/null
+++ b/arch/mips/kernel/mips-mt.c
@@ -0,0 +1,449 @@
+/*
+ * General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/mipsmtregs.h>
+#include <asm/r4kcache.h>
+#include <asm/cacheflush.h>
+
+/*
+ * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
+ */
+
+cpumask_t mt_fpu_cpumask;
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <asm/uaccess.h>
+
+unsigned long mt_fpemul_threshold = 0;
+
+/*
+ * Replacement functions for the sys_sched_setaffinity() and
+ * sys_sched_getaffinity() system calls, so that we can integrate
+ * FPU affinity with the user's requested processor affinity.
+ * This code is 98% identical with the sys_sched_setaffinity()
+ * and sys_sched_getaffinity() system calls, and should be
+ * updated when kernel/sched.c changes.
+ */
+
+/*
+ * find_process_by_pid - find a process with a matching PID value.
+ * used in sys_sched_set/getaffinity() in kernel/sched.c, so
+ * cloned here.
+ */
+static inline task_t *find_process_by_pid(pid_t pid)
+{
+	return pid ? find_task_by_pid(pid) : current;
+}
+
+
+/*
+ * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
+				      unsigned long __user *user_mask_ptr)
+{
+	cpumask_t new_mask;
+	cpumask_t effective_mask;
+	int retval;
+	task_t *p;
+
+	if (len < sizeof(new_mask))
+		return -EINVAL;
+
+	if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
+		return -EFAULT;
+
+	lock_cpu_hotplug();
+	read_lock(&tasklist_lock);
+
+	p = find_process_by_pid(pid);
+	if (!p) {
+		read_unlock(&tasklist_lock);
+		unlock_cpu_hotplug();
+		return -ESRCH;
+	}
+
+	/*
+	 * It is not safe to call set_cpus_allowed with the
+	 * tasklist_lock held.  We will bump the task_struct's
+	 * usage count and drop tasklist_lock before invoking
+	 * set_cpus_allowed.
+	 */
+	get_task_struct(p);
+
+	retval = -EPERM;
+	if ((current->euid != p->euid) && (current->euid != p->uid) &&
+			!capable(CAP_SYS_NICE)) {
+		read_unlock(&tasklist_lock);
+		goto out_unlock;
+	}
+
+	/* Record new user-specified CPU set for future reference */
+	p->thread.user_cpus_allowed = new_mask;
+
+	/* Unlock the task list */
+	read_unlock(&tasklist_lock);
+
+	/* Compute new global allowed CPU set if necessary */
+	if( (p->thread.mflags & MF_FPUBOUND)
+	&& cpus_intersects(new_mask, mt_fpu_cpumask)) {
+		cpus_and(effective_mask, new_mask, mt_fpu_cpumask);
+		retval = set_cpus_allowed(p, effective_mask);
+	} else {
+		p->thread.mflags &= ~MF_FPUBOUND;
+		retval = set_cpus_allowed(p, new_mask);
+	}
+
+
+out_unlock:
+	put_task_struct(p);
+	unlock_cpu_hotplug();
+	return retval;
+}
+
+/*
+ * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+				      unsigned long __user *user_mask_ptr)
+{
+	unsigned int real_len;
+	cpumask_t mask;
+	int retval;
+	task_t *p;
+
+	real_len = sizeof(mask);
+	if (len < real_len)
+		return -EINVAL;
+
+	lock_cpu_hotplug();
+	read_lock(&tasklist_lock);
+
+	retval = -ESRCH;
+	p = find_process_by_pid(pid);
+	if (!p)
+		goto out_unlock;
+
+	retval = 0;
+
+	cpus_and(mask, p->thread.user_cpus_allowed, cpu_possible_map);
+
+out_unlock:
+	read_unlock(&tasklist_lock);
+	unlock_cpu_hotplug();
+	if (retval)
+		return retval;
+	if (copy_to_user(user_mask_ptr, &mask, real_len))
+		return -EFAULT;
+	return real_len;
+}
+
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+/*
+ * Dump new MIPS MT state for the core. Does not leave TCs halted.
+ * Takes an argument which taken to be a pre-call MVPControl value.
+ */
+
+void mips_mt_regdump(unsigned long mvpctl)
+{
+	unsigned long flags;
+	unsigned long vpflags;
+	unsigned long mvpconf0;
+	int nvpe;
+	int ntc;
+	int i;
+	int tc;
+	unsigned long haltval;
+	unsigned long tcstatval;
+#ifdef CONFIG_MIPS_MT_SMTC
+	void smtc_soft_dump(void);
+#endif /* CONFIG_MIPT_MT_SMTC */
+
+	local_irq_save(flags);
+	vpflags = dvpe();
+	printk("=== MIPS MT State Dump ===\n");
+	printk("-- Global State --\n");
+	printk("   MVPControl Passed: %08lx\n", mvpctl);
+	printk("   MVPControl Read: %08lx\n", vpflags);
+	printk("   MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
+	nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+	ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+	printk("-- per-VPE State --\n");
+	for(i = 0; i < nvpe; i++) {
+	    for(tc = 0; tc < ntc; tc++) {
+			settc(tc);
+		if((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+		    printk("  VPE %d\n", i);
+		    printk("   VPEControl : %08lx\n", read_vpe_c0_vpecontrol());
+		    printk("   VPEConf0 : %08lx\n", read_vpe_c0_vpeconf0());
+		    printk("   VPE%d.Status : %08lx\n",
+				i, read_vpe_c0_status());
+		    printk("   VPE%d.EPC : %08lx\n", i, read_vpe_c0_epc());
+		    printk("   VPE%d.Cause : %08lx\n", i, read_vpe_c0_cause());
+		    printk("   VPE%d.Config7 : %08lx\n",
+				i, read_vpe_c0_config7());
+		    break; /* Next VPE */
+		}
+	    }
+	}
+	printk("-- per-TC State --\n");
+	for(tc = 0; tc < ntc; tc++) {
+		settc(tc);
+		if(read_tc_c0_tcbind() == read_c0_tcbind()) {
+			/* Are we dumping ourself?  */
+			haltval = 0; /* Then we're not halted, and mustn't be */
+			tcstatval = flags; /* And pre-dump TCStatus is flags */
+			printk("  TC %d (current TC with VPE EPC above)\n", tc);
+		} else {
+			haltval = read_tc_c0_tchalt();
+			write_tc_c0_tchalt(1);
+			tcstatval = read_tc_c0_tcstatus();
+			printk("  TC %d\n", tc);
+		}
+		printk("   TCStatus : %08lx\n", tcstatval);
+		printk("   TCBind : %08lx\n", read_tc_c0_tcbind());
+		printk("   TCRestart : %08lx\n", read_tc_c0_tcrestart());
+		printk("   TCHalt : %08lx\n", haltval);
+		printk("   TCContext : %08lx\n", read_tc_c0_tccontext());
+		if (!haltval)
+			write_tc_c0_tchalt(0);
+	}
+#ifdef CONFIG_MIPS_MT_SMTC
+	smtc_soft_dump();
+#endif /* CONFIG_MIPT_MT_SMTC */
+	printk("===========================\n");
+	evpe(vpflags);
+	local_irq_restore(flags);
+}
+
+static int mt_opt_norps = 0;
+static int mt_opt_rpsctl = -1;
+static int mt_opt_nblsu = -1;
+static int mt_opt_forceconfig7 = 0;
+static int mt_opt_config7 = -1;
+
+static int __init rps_disable(char *s)
+{
+	mt_opt_norps = 1;
+	return 1;
+}
+__setup("norps", rps_disable);
+
+static int __init rpsctl_set(char *str)
+{
+	get_option(&str, &mt_opt_rpsctl);
+	return 1;
+}
+__setup("rpsctl=", rpsctl_set);
+
+static int __init nblsu_set(char *str)
+{
+	get_option(&str, &mt_opt_nblsu);
+	return 1;
+}
+__setup("nblsu=", nblsu_set);
+
+static int __init config7_set(char *str)
+{
+	get_option(&str, &mt_opt_config7);
+	mt_opt_forceconfig7 = 1;
+	return 1;
+}
+__setup("config7=", config7_set);
+
+/* Experimental cache flush control parameters that should go away some day */
+int mt_protiflush = 0;
+int mt_protdflush = 0;
+int mt_n_iflushes = 1;
+int mt_n_dflushes = 1;
+
+static int __init set_protiflush(char *s)
+{
+	mt_protiflush = 1;
+	return 1;
+}
+__setup("protiflush", set_protiflush);
+
+static int __init set_protdflush(char *s)
+{
+	mt_protdflush = 1;
+	return 1;
+}
+__setup("protdflush", set_protdflush);
+
+static int __init niflush(char *s)
+{
+	get_option(&s, &mt_n_iflushes);
+	return 1;
+}
+__setup("niflush=", niflush);
+
+static int __init ndflush(char *s)
+{
+	get_option(&s, &mt_n_dflushes);
+	return 1;
+}
+__setup("ndflush=", ndflush);
+#ifdef CONFIG_MIPS_MT_FPAFF
+static int fpaff_threshold = -1;
+
+static int __init fpaff_thresh(char *str)
+{
+	get_option(&str, &fpaff_threshold);
+	return 1;
+}
+
+__setup("fpaff=", fpaff_thresh);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+static unsigned int itc_base = 0;
+
+static int __init set_itc_base(char *str)
+{
+	get_option(&str, &itc_base);
+	return 1;
+}
+
+__setup("itcbase=", set_itc_base);
+
+void mips_mt_set_cpuoptions(void)
+{
+	unsigned int oconfig7 = read_c0_config7();
+	unsigned int nconfig7 = oconfig7;
+
+	if (mt_opt_norps) {
+		printk("\"norps\" option deprectated: use \"rpsctl=\"\n");
+	}
+	if (mt_opt_rpsctl >= 0) {
+		printk("34K return prediction stack override set to %d.\n",
+			mt_opt_rpsctl);
+		if (mt_opt_rpsctl)
+			nconfig7 |= (1 << 2);
+		else
+			nconfig7 &= ~(1 << 2);
+	}
+	if (mt_opt_nblsu >= 0) {
+		printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
+		if (mt_opt_nblsu)
+			nconfig7 |= (1 << 5);
+		else
+			nconfig7 &= ~(1 << 5);
+	}
+	if (mt_opt_forceconfig7) {
+		printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
+		nconfig7 = mt_opt_config7;
+	}
+	if (oconfig7 != nconfig7) {
+		__asm__ __volatile("sync");
+		write_c0_config7(nconfig7);
+		ehb ();
+		printk("Config7: 0x%08x\n", read_c0_config7());
+	}
+
+	/* Report Cache management debug options */
+	if (mt_protiflush)
+		printk("I-cache flushes single-threaded\n");
+	if (mt_protdflush)
+		printk("D-cache flushes single-threaded\n");
+	if (mt_n_iflushes != 1)
+		printk("I-Cache Flushes Repeated %d times\n", mt_n_iflushes);
+	if (mt_n_dflushes != 1)
+		printk("D-Cache Flushes Repeated %d times\n", mt_n_dflushes);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+	/* FPU Use Factor empirically derived from experiments on 34K */
+#define FPUSEFACTOR 333
+
+	if (fpaff_threshold >= 0) {
+		mt_fpemul_threshold = fpaff_threshold;
+	} else {
+		mt_fpemul_threshold =
+			(FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
+	}
+	printk("FPU Affinity set after %ld emulations\n",
+			mt_fpemul_threshold);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+	if (itc_base != 0) {
+		/*
+		 * Configure ITC mapping.  This code is very
+		 * specific to the 34K core family, which uses
+		 * a special mode bit ("ITC") in the ErrCtl
+		 * register to enable access to ITC control
+		 * registers via cache "tag" operations.
+		 */
+		unsigned long ectlval;
+		unsigned long itcblkgrn;
+
+		/* ErrCtl register is known as "ecc" to Linux */
+		ectlval = read_c0_ecc();
+		write_c0_ecc(ectlval | (0x1 << 26));
+		ehb();
+#define INDEX_0 (0x80000000)
+#define INDEX_8 (0x80000008)
+		/* Read "cache tag" for Dcache pseudo-index 8 */
+		cache_op(Index_Load_Tag_D, INDEX_8);
+		ehb();
+		itcblkgrn = read_c0_dtaglo();
+		itcblkgrn &= 0xfffe0000;
+		/* Set for 128 byte pitch of ITC cells */
+		itcblkgrn |= 0x00000c00;
+		/* Stage in Tag register */
+		write_c0_dtaglo(itcblkgrn);
+		ehb();
+		/* Write out to ITU with CACHE op */
+		cache_op(Index_Store_Tag_D, INDEX_8);
+		/* Now set base address, and turn ITC on with 0x1 bit */
+		write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
+		ehb();
+		/* Write out to ITU with CACHE op */
+		cache_op(Index_Store_Tag_D, INDEX_0);
+		write_c0_ecc(ectlval);
+		ehb();
+		printk("Mapped %ld ITC cells starting at 0x%08x\n",
+			((itcblkgrn & 0x7fe00000) >> 20), itc_base);
+	}
+}
+
+/*
+ * Function to protect cache flushes from concurrent execution
+ * depends on MP software model chosen.
+ */
+
+void mt_cflush_lockdown(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+	void smtc_cflush_lockdown(void);
+
+	smtc_cflush_lockdown();
+#endif /* CONFIG_MIPS_MT_SMTC */
+	/* FILL IN VSMP and AP/SP VERSIONS HERE */
+}
+
+void mt_cflush_release(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+	void smtc_cflush_release(void);
+
+	smtc_cflush_release();
+#endif /* CONFIG_MIPS_MT_SMTC */
+	/* FILL IN VSMP and AP/SP VERSIONS HERE */
+}
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index c66db5e..8b393df 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -41,6 +41,10 @@
 #include <asm/elf.h>
 #include <asm/isadep.h>
 #include <asm/inst.h>
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+extern void smtc_idle_loop_hook(void);
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 /*
  * The idle thread. There's no useful work to be done, so just try to conserve
@@ -51,9 +55,13 @@
 {
 	/* endless idle loop with no priority at all */
 	while (1) {
-		while (!need_resched())
+		while (!need_resched()) {
+#ifdef CONFIG_MIPS_MT_SMTC
+			smtc_idle_loop_hook();
+#endif /* CONFIG_MIPS_MT_SMTC */
 			if (cpu_wait)
 				(*cpu_wait)();
+		}
 		preempt_enable_no_resched();
 		schedule();
 		preempt_disable();
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index f838b36..f3106d0 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -248,10 +248,20 @@
 			break;
 		case FPC_EIR: {	/* implementation / version register */
 			unsigned int flags;
+#ifdef CONFIG_MIPS_MT_SMTC
+			unsigned int irqflags;
+			unsigned int mtflags;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 			if (!cpu_has_fpu)
 				break;
 
+#ifdef CONFIG_MIPS_MT_SMTC
+			/* Read-modify-write of Status must be atomic */
+			local_irq_save(irqflags);
+			mtflags = dmt();
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 			preempt_disable();
 			if (cpu_has_mipsmt) {
 				unsigned int vpflags = dvpe();
@@ -266,6 +276,10 @@
 				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
 				write_c0_status(flags);
 			}
+#ifdef CONFIG_MIPS_MT_SMTC
+			emt(mtflags);
+			local_irq_restore(irqflags);
+#endif /* CONFIG_MIPS_MT_SMTC */
 			preempt_enable();
 			break;
 		}
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
index 0d5cf97..8704dc0 100644
--- a/arch/mips/kernel/ptrace32.c
+++ b/arch/mips/kernel/ptrace32.c
@@ -173,12 +173,22 @@
 			break;
 		case FPC_EIR: {	/* implementation / version register */
 			unsigned int flags;
+#ifdef CONFIG_MIPS_MT_SMTC
+			unsigned int irqflags;
+			unsigned int mtflags;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 			if (!cpu_has_fpu) {
 				tmp = 0;
 				break;
 			}
 
+#ifdef CONFIG_MIPS_MT_SMTC
+			/* Read-modify-write of Status must be atomic */
+			local_irq_save(irqflags);
+			mtflags = dmt();
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 			preempt_disable();
 			if (cpu_has_mipsmt) {
 				unsigned int vpflags = dvpe();
@@ -193,6 +203,10 @@
 				__asm__ __volatile__("cfc1\t%0,$0": "=r" (tmp));
 				write_c0_status(flags);
 			}
+#ifdef CONFIG_MIPS_MT_SMTC
+			emt(mtflags);
+			local_irq_restore(irqflags);
+#endif /* CONFIG_MIPS_MT_SMTC */
 			preempt_enable();
 			break;
 		}
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index d2afbd1..0b1b54a 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -88,7 +88,18 @@
 
 	PTR_ADDIU	t0, $28, _THREAD_SIZE - 32
 	set_saved_sp	t0, t1, t2
-
+#ifdef CONFIG_MIPS_MT_SMTC
+	/* Read-modify-writes of Status must be atomic on a VPE */
+	mfc0	t2, CP0_TCSTATUS
+	ori	t1, t2, TCSTATUS_IXMT
+	mtc0	t1, CP0_TCSTATUS
+	andi	t2, t2, TCSTATUS_IXMT
+	ehb
+	DMT	8				# dmt	t0
+	move	t1,ra
+	jal	mips_ihb
+	move	ra,t1
+#endif /* CONFIG_MIPS_MT_SMTC */
 	mfc0	t1, CP0_STATUS		/* Do we really need this? */
 	li	a3, 0xff01
 	and	t1, a3
@@ -97,6 +108,18 @@
 	and	a2, a3
 	or	a2, t1
 	mtc0	a2, CP0_STATUS
+#ifdef CONFIG_MIPS_MT_SMTC
+	ehb
+	andi	t0, t0, VPECONTROL_TE
+	beqz	t0, 1f
+	emt
+1:
+	mfc0	t1, CP0_TCSTATUS
+	xori	t1, t1, TCSTATUS_IXMT
+	or	t1, t1, t2
+	mtc0	t1, CP0_TCSTATUS
+	ehb
+#endif /* CONFIG_MIPS_MT_SMTC */
 	move	v0, a0
 	jr	ra
 	END(resume)
@@ -131,10 +154,19 @@
 #define FPU_DEFAULT  0x00000000
 
 LEAF(_init_fpu)
+#ifdef CONFIG_MIPS_MT_SMTC
+	/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
+	mfc0	t0, CP0_TCSTATUS
+	/* Bit position is the same for Status, TCStatus */
+	li	t1, ST0_CU1
+	or	t0, t1
+	mtc0	t0, CP0_TCSTATUS
+#else /* Normal MIPS CU1 enable */
 	mfc0	t0, CP0_STATUS
 	li	t1, ST0_CU1
 	or	t0, t1
 	mtc0	t0, CP0_STATUS
+#endif /* CONFIG_MIPS_MT_SMTC */
 	fpu_enable_hazard
 
 	li	t1, FPU_DEFAULT
diff --git a/arch/mips/kernel/smp_mt.c b/arch/mips/kernel/smp-mt.c
similarity index 92%
rename from arch/mips/kernel/smp_mt.c
rename to arch/mips/kernel/smp-mt.c
index 993b8bf..19b8e4b 100644
--- a/arch/mips/kernel/smp_mt.c
+++ b/arch/mips/kernel/smp-mt.c
@@ -1,8 +1,4 @@
 /*
- * Copyright (C) 2004, 2005 MIPS Technologies, Inc.  All rights reserved.
- *
- *  Elizabeth Clarke (beth@mips.com)
- *
  *  This program is free software; you can distribute it and/or modify it
  *  under the terms of the GNU General Public License (Version 2) as
  *  published by the Free Software Foundation.
@@ -16,6 +12,10 @@
  *  with this program; if not, write to the Free Software Foundation, Inc.,
  *  59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
  *
+ * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
+ *    Elizabeth Clarke (beth@mips.com)
+ *    Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
  */
 #include <linux/kernel.h>
 #include <linux/sched.h>
@@ -24,6 +24,7 @@
 #include <linux/compiler.h>
 
 #include <asm/atomic.h>
+#include <asm/cacheflush.h>
 #include <asm/cpu.h>
 #include <asm/processor.h>
 #include <asm/system.h>
@@ -33,8 +34,8 @@
 #include <asm/time.h>
 #include <asm/mipsregs.h>
 #include <asm/mipsmtregs.h>
-#include <asm/cacheflush.h>
-#include <asm/mips-boards/maltaint.h>
+#include <asm/mips_mt.h>
+#include <asm/mips-boards/maltaint.h>  /* This is f*cking wrong */
 
 #define MIPS_CPU_IPI_RESCHED_IRQ 0
 #define MIPS_CPU_IPI_CALL_IRQ 1
@@ -66,6 +67,7 @@
 	if (!cpu_has_mipsmt)
 		return;
 
+	/* Enable VPC */
 	set_c0_mvpcontrol(MVPCONTROL_VPC);
 
 	back_to_back_c0_hazard();
@@ -106,12 +108,12 @@
 
 static void ipi_resched_dispatch (struct pt_regs *regs)
 {
-	do_IRQ(MIPS_CPU_IPI_RESCHED_IRQ, regs);
+	do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ, regs);
 }
 
 static void ipi_call_dispatch (struct pt_regs *regs)
 {
-	do_IRQ(MIPS_CPU_IPI_CALL_IRQ, regs);
+	do_IRQ(MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ, regs);
 }
 
 irqreturn_t ipi_resched_interrupt(int irq, void *dev_id, struct pt_regs *regs)
@@ -155,6 +157,8 @@
 	dvpe();
 	dmt();
 
+	mips_mt_set_cpuoptions();
+
 	/* Put MVPE's into 'configuration state' */
 	set_c0_mvpcontrol(MVPCONTROL_VPC);
 
@@ -189,11 +193,13 @@
 
 			if (i != 0) {
 				write_vpe_c0_status((read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
-				write_vpe_c0_cause(read_vpe_c0_cause() & ~CAUSEF_IP);
 
 				/* set config to be the same as vpe0, particularly kseg0 coherency alg */
 				write_vpe_c0_config( read_c0_config());
 
+				/* make sure there are no software interrupts pending */
+				write_vpe_c0_cause(read_vpe_c0_cause() & ~(C_SW1|C_SW0));
+
 				/* Propagate Config7 */
 				write_vpe_c0_config7(read_c0_config7());
 			}
@@ -233,16 +239,16 @@
 	/* We'll wait until starting the secondaries before starting MVPE */
 
 	printk(KERN_INFO "Detected %i available secondary CPU(s)\n", num);
+}
 
+void __init plat_prepare_cpus(unsigned int max_cpus)
+{
 	/* set up ipi interrupts */
 	if (cpu_has_vint) {
 		set_vi_handler (MIPS_CPU_IPI_RESCHED_IRQ, ipi_resched_dispatch);
 		set_vi_handler (MIPS_CPU_IPI_CALL_IRQ, ipi_call_dispatch);
 	}
-}
 
-void __init plat_prepare_cpus(unsigned int max_cpus)
-{
 	cpu_ipi_resched_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_RESCHED_IRQ;
 	cpu_ipi_call_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_CALL_IRQ;
 
@@ -287,7 +293,8 @@
 	/* global pointer */
 	write_tc_gpr_gp((unsigned long)gp);
 
-	flush_icache_range((unsigned long)gp, (unsigned long)(gp + 1));
+	flush_icache_range((unsigned long)gp,
+	                   (unsigned long)(gp + sizeof(struct thread_info)));
 
 	/* finally out of configuration and into chaos */
 	clear_c0_mvpcontrol(MVPCONTROL_VPC);
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 72a287a..d42f358 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -38,6 +38,10 @@
 #include <asm/mmu_context.h>
 #include <asm/smp.h>
 
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/mipsmtregs.h>
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 cpumask_t phys_cpu_present_map;		/* Bitmask of available CPUs */
 volatile cpumask_t cpu_callin_map;	/* Bitmask of started secondaries */
 cpumask_t cpu_online_map;		/* Bitmask of currently online CPUs */
@@ -85,6 +89,10 @@
 {
 	unsigned int cpu;
 
+#ifdef CONFIG_MIPS_MT_SMTC
+	/* Only do cpu_probe for first TC of CPU */
+	if ((read_c0_tcbind() & TCBIND_CURTC) == 0)
+#endif /* CONFIG_MIPS_MT_SMTC */
 	cpu_probe();
 	cpu_report();
 	per_cpu_trap_init();
@@ -179,11 +187,13 @@
 	if (wait)
 		while (atomic_read(&data.finished) != cpus)
 			barrier();
+	call_data = NULL;
 	spin_unlock(&smp_call_lock);
 
 	return 0;
 }
 
+
 void smp_call_function_interrupt(void)
 {
 	void (*func) (void *info) = call_data->func;
diff --git a/arch/mips/kernel/smtc-asm.S b/arch/mips/kernel/smtc-asm.S
new file mode 100644
index 0000000..c9d6519
--- /dev/null
+++ b/arch/mips/kernel/smtc-asm.S
@@ -0,0 +1,130 @@
+/*
+ * Assembly Language Functions for MIPS MT SMTC support
+ */
+
+/*
+ * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
+
+#include <asm/regdef.h>
+#include <asm/asmmacro.h>
+#include <asm/stackframe.h>
+#include <asm/stackframe.h>
+
+/*
+ * "Software Interrupt" linkage.
+ *
+ * This is invoked when an "Interrupt" is sent from one TC to another,
+ * where the TC to be interrupted is halted, has it's Restart address
+ * and Status values saved by the "remote control" thread, then modified
+ * to cause execution to begin here, in kenel mode. This code then
+ * disguises the TC state as that of an exception and transfers
+ * control to the general exception or vectored interrupt handler.
+ */
+	.set noreorder
+
+/*
+The __smtc_ipi_vector would use k0 and k1 as temporaries and
+1) Set EXL (this is per-VPE, so this can't be done by proxy!)
+2) Restore the K/CU and IXMT bits to the pre "exception" state
+   (EXL means no interrupts and access to the kernel map).
+3) Set EPC to be the saved value of TCRestart.
+4) Jump to the exception handler entry point passed by the sender.
+
+CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
+*/
+
+/*
+ * Reviled and slandered vision: Set EXL and restore K/CU/IXMT
+ * state of pre-halt thread, then save everything and call
+ * thought some function pointer to imaginary_exception, which
+ * will parse a register value or memory message queue to
+ * deliver things like interprocessor interrupts. On return
+ * from that function, jump to the global ret_from_irq code
+ * to invoke the scheduler and return as appropriate.
+ */
+
+#define PT_PADSLOT4 (PT_R0-8)
+#define PT_PADSLOT5 (PT_R0-4)
+
+	.text
+	.align 5
+FEXPORT(__smtc_ipi_vector)
+	.set	noat
+	/* Disable thread scheduling to make Status update atomic */
+	DMT	27					# dmt	k1
+	ehb
+	/* Set EXL */
+	mfc0	k0,CP0_STATUS
+	ori	k0,k0,ST0_EXL
+	mtc0	k0,CP0_STATUS
+	ehb
+	/* Thread scheduling now inhibited by EXL. Restore TE state. */
+	andi	k1,k1,VPECONTROL_TE
+	beqz	k1,1f
+	emt
+1:
+	/*
+	 * The IPI sender has put some information on the anticipated
+	 * kernel stack frame.  If we were in user mode, this will be
+	 * built above the saved kernel SP.  If we were already in the
+	 * kernel, it will be built above the current CPU SP.
+	 *
+	 * Were we in kernel mode, as indicated by CU0?
+	 */
+	sll	k1,k0,3
+	.set noreorder
+	bltz	k1,2f
+	move	k1,sp
+	.set reorder
+	/*
+	 * If previously in user mode, set CU0 and use kernel stack.
+	 */
+	li	k1,ST0_CU0
+	or	k1,k1,k0
+	mtc0	k1,CP0_STATUS
+	ehb
+	get_saved_sp
+	/* Interrupting TC will have pre-set values in slots in the new frame */
+2:	subu	k1,k1,PT_SIZE
+	/* Load TCStatus Value */
+	lw	k0,PT_TCSTATUS(k1)
+	/* Write it to TCStatus to restore CU/KSU/IXMT state */
+	mtc0	k0,$2,1
+	ehb
+	lw	k0,PT_EPC(k1)
+	mtc0	k0,CP0_EPC
+	/* Save all will redundantly recompute the SP, but use it for now */
+	SAVE_ALL
+	CLI
+	move	a0,sp
+	/* Function to be invoked passed stack pad slot 5 */
+	lw	t0,PT_PADSLOT5(sp)
+	/* Argument from sender passed in stack pad slot 4 */
+	lw	a1,PT_PADSLOT4(sp)
+	jalr	t0
+	nop
+	j	ret_from_irq
+	nop
+
+/*
+ * Called from idle loop to provoke processing of queued IPIs
+ * First IPI message in queue passed as argument.
+ */
+
+LEAF(self_ipi)
+	/* Before anything else, block interrupts */
+	mfc0	t0,CP0_TCSTATUS
+	ori	t1,t0,TCSTATUS_IXMT
+	mtc0	t1,CP0_TCSTATUS
+	ehb
+	/* We know we're in kernel mode, so prepare stack frame */
+	subu	t1,sp,PT_SIZE
+	sw	ra,PT_EPC(t1)
+	sw	a0,PT_PADSLOT4(t1)
+	la	t2,ipi_decode
+	sw	t2,PT_PADSLOT5(t1)
+	/* Save pre-disable value of TCStatus */
+	sw	t0,PT_TCSTATUS(t1)
+	j	__smtc_ipi_vector
+	nop
+END(self_ipi)
diff --git a/arch/mips/kernel/smtc-proc.c b/arch/mips/kernel/smtc-proc.c
new file mode 100644
index 0000000..6f37099
--- /dev/null
+++ b/arch/mips/kernel/smtc-proc.c
@@ -0,0 +1,93 @@
+/*
+ * /proc hooks for SMTC kernel
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/mipsregs.h>
+#include <asm/cacheflush.h>
+#include <linux/proc_fs.h>
+
+#include <asm/smtc_proc.h>
+
+/*
+ * /proc diagnostic and statistics hooks
+ */
+
+/*
+ * Statistics gathered
+ */
+unsigned long selfipis[NR_CPUS];
+
+struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
+
+static struct proc_dir_entry *smtc_stats;
+
+atomic_t smtc_fpu_recoveries;
+
+static int proc_read_smtc(char *page, char **start, off_t off,
+                          int count, int *eof, void *data)
+{
+	int totalen = 0;
+	int len;
+	int i;
+	extern unsigned long ebase;
+
+	len = sprintf(page, "SMTC Status Word: 0x%08x\n", smtc_status);
+	totalen += len;
+	page += len;
+	len = sprintf(page, "Config7: 0x%08x\n", read_c0_config7());
+	totalen += len;
+	page += len;
+	len = sprintf(page, "EBASE: 0x%08lx\n", ebase);
+	totalen += len;
+	page += len;
+	len = sprintf(page, "Counter Interrupts taken per CPU (TC)\n");
+	totalen += len;
+	page += len;
+	for (i=0; i < NR_CPUS; i++) {
+		len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
+		totalen += len;
+		page += len;
+	}
+	len = sprintf(page, "Self-IPIs by CPU:\n");
+	totalen += len;
+	page += len;
+	for(i = 0; i < NR_CPUS; i++) {
+		len = sprintf(page, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
+		totalen += len;
+		page += len;
+	}
+	len = sprintf(page, "%d Recoveries of \"stolen\" FPU\n",
+	              atomic_read(&smtc_fpu_recoveries));
+	totalen += len;
+	page += len;
+
+	return totalen;
+}
+
+void init_smtc_stats(void)
+{
+	int i;
+
+	for (i=0; i<NR_CPUS; i++) {
+		smtc_cpu_stats[i].timerints = 0;
+		smtc_cpu_stats[i].selfipis = 0;
+	}
+
+	atomic_set(&smtc_fpu_recoveries, 0);
+
+	smtc_stats = create_proc_read_entry("smtc", 0444, NULL,
+	                                    proc_read_smtc, NULL);
+}
diff --git a/arch/mips/kernel/smtc.c b/arch/mips/kernel/smtc.c
new file mode 100644
index 0000000..2e8e52c
--- /dev/null
+++ b/arch/mips/kernel/smtc.c
@@ -0,0 +1,1322 @@
+/* Copyright (C) 2004 Mips Technologies, Inc */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/atomic.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/hazards.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/mipsregs.h>
+#include <asm/cacheflush.h>
+#include <asm/time.h>
+#include <asm/addrspace.h>
+#include <asm/smtc.h>
+#include <asm/smtc_ipi.h>
+#include <asm/smtc_proc.h>
+
+/*
+ * This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set.
+ */
+
+/*
+ * MIPSCPU_INT_BASE is identically defined in both
+ * asm-mips/mips-boards/maltaint.h and asm-mips/mips-boards/simint.h,
+ * but as yet there's no properly organized include structure that
+ * will ensure that the right *int.h file will be included for a
+ * given platform build.
+ */
+
+#define MIPSCPU_INT_BASE	16
+
+#define MIPS_CPU_IPI_IRQ	1
+
+#define LOCK_MT_PRA() \
+	local_irq_save(flags); \
+	mtflags = dmt()
+
+#define UNLOCK_MT_PRA() \
+	emt(mtflags); \
+	local_irq_restore(flags)
+
+#define LOCK_CORE_PRA() \
+	local_irq_save(flags); \
+	mtflags = dvpe()
+
+#define UNLOCK_CORE_PRA() \
+	evpe(mtflags); \
+	local_irq_restore(flags)
+
+/*
+ * Data structures purely associated with SMTC parallelism
+ */
+
+
+/*
+ * Table for tracking ASIDs whose lifetime is prolonged.
+ */
+
+asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
+
+/*
+ * Clock interrupt "latch" buffers, per "CPU"
+ */
+
+unsigned int ipi_timer_latch[NR_CPUS];
+
+/*
+ * Number of InterProcessor Interupt (IPI) message buffers to allocate
+ */
+
+#define IPIBUF_PER_CPU 4
+
+struct smtc_ipi_q IPIQ[NR_CPUS];
+struct smtc_ipi_q freeIPIq;
+
+
+/* Forward declarations */
+
+void ipi_decode(struct pt_regs *, struct smtc_ipi *);
+void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
+void setup_cross_vpe_interrupts(void);
+void init_smtc_stats(void);
+
+/* Global SMTC Status */
+
+unsigned int smtc_status = 0;
+
+/* Boot command line configuration overrides */
+
+static int vpelimit = 0;
+static int tclimit = 0;
+static int ipibuffers = 0;
+static int nostlb = 0;
+static int asidmask = 0;
+unsigned long smtc_asid_mask = 0xff;
+
+static int __init maxvpes(char *str)
+{
+	get_option(&str, &vpelimit);
+	return 1;
+}
+
+static int __init maxtcs(char *str)
+{
+	get_option(&str, &tclimit);
+	return 1;
+}
+
+static int __init ipibufs(char *str)
+{
+	get_option(&str, &ipibuffers);
+	return 1;
+}
+
+static int __init stlb_disable(char *s)
+{
+	nostlb = 1;
+	return 1;
+}
+
+static int __init asidmask_set(char *str)
+{
+	get_option(&str, &asidmask);
+	switch(asidmask) {
+	case 0x1:
+	case 0x3:
+	case 0x7:
+	case 0xf:
+	case 0x1f:
+	case 0x3f:
+	case 0x7f:
+	case 0xff:
+		smtc_asid_mask = (unsigned long)asidmask;
+		break;
+	default:
+		printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
+	}
+	return 1;
+}
+
+__setup("maxvpes=", maxvpes);
+__setup("maxtcs=", maxtcs);
+__setup("ipibufs=", ipibufs);
+__setup("nostlb", stlb_disable);
+__setup("asidmask=", asidmask_set);
+
+/* Enable additional debug checks before going into CPU idle loop */
+#define SMTC_IDLE_HOOK_DEBUG
+
+#ifdef SMTC_IDLE_HOOK_DEBUG
+
+static int hang_trig = 0;
+
+static int __init hangtrig_enable(char *s)
+{
+	hang_trig = 1;
+	return 1;
+}
+
+
+__setup("hangtrig", hangtrig_enable);
+
+#define DEFAULT_BLOCKED_IPI_LIMIT 32
+
+static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
+
+static int __init tintq(char *str)
+{
+	get_option(&str, &timerq_limit);
+	return 1;
+}
+
+__setup("tintq=", tintq);
+
+int imstuckcount[2][8];
+/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
+int vpemask[2][8] = {{0,1,1,0,0,0,0,1},{0,1,0,0,0,0,0,1}};
+int tcnoprog[NR_CPUS];
+static atomic_t idle_hook_initialized = {0};
+static int clock_hang_reported[NR_CPUS];
+
+#endif /* SMTC_IDLE_HOOK_DEBUG */
+
+/* Initialize shared TLB - the should probably migrate to smtc_setup_cpus() */
+
+void __init sanitize_tlb_entries(void)
+{
+	printk("Deprecated sanitize_tlb_entries() invoked\n");
+}
+
+
+/*
+ * Configure shared TLB - VPC configuration bit must be set by caller
+ */
+
+void smtc_configure_tlb(void)
+{
+	int i,tlbsiz,vpes;
+	unsigned long mvpconf0;
+	unsigned long config1val;
+
+	/* Set up ASID preservation table */
+	for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
+	    for(i = 0; i < MAX_SMTC_ASIDS; i++) {
+		smtc_live_asid[vpes][i] = 0;
+	    }
+	}
+	mvpconf0 = read_c0_mvpconf0();
+
+	if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
+			>> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
+	    /* If we have multiple VPEs, try to share the TLB */
+	    if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
+		/*
+		 * If TLB sizing is programmable, shared TLB
+		 * size is the total available complement.
+		 * Otherwise, we have to take the sum of all
+		 * static VPE TLB entries.
+		 */
+		if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
+				>> MVPCONF0_PTLBE_SHIFT)) == 0) {
+		    /*
+		     * If there's more than one VPE, there had better
+		     * be more than one TC, because we need one to bind
+		     * to each VPE in turn to be able to read
+		     * its configuration state!
+		     */
+		    settc(1);
+		    /* Stop the TC from doing anything foolish */
+		    write_tc_c0_tchalt(TCHALT_H);
+		    mips_ihb();
+		    /* No need to un-Halt - that happens later anyway */
+		    for (i=0; i < vpes; i++) {
+		    	write_tc_c0_tcbind(i);
+			/*
+			 * To be 100% sure we're really getting the right
+			 * information, we exit the configuration state
+			 * and do an IHB after each rebinding.
+			 */
+			write_c0_mvpcontrol(
+				read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
+			mips_ihb();
+			/*
+			 * Only count if the MMU Type indicated is TLB
+			 */
+			if(((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
+				config1val = read_vpe_c0_config1();
+				tlbsiz += ((config1val >> 25) & 0x3f) + 1;
+			}
+
+			/* Put core back in configuration state */
+			write_c0_mvpcontrol(
+				read_c0_mvpcontrol() | MVPCONTROL_VPC );
+			mips_ihb();
+		    }
+		}
+		write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
+
+		/*
+		 * Setup kernel data structures to use software total,
+		 * rather than read the per-VPE Config1 value. The values
+		 * for "CPU 0" gets copied to all the other CPUs as part
+		 * of their initialization in smtc_cpu_setup().
+		 */
+
+		tlbsiz = tlbsiz & 0x3f;	/* MIPS32 limits TLB indices to 64 */
+		cpu_data[0].tlbsize = tlbsiz;
+		smtc_status |= SMTC_TLB_SHARED;
+
+		printk("TLB of %d entry pairs shared by %d VPEs\n",
+			tlbsiz, vpes);
+	    } else {
+		printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
+	    }
+	}
+}
+
+
+/*
+ * Incrementally build the CPU map out of constituent MIPS MT cores,
+ * using the specified available VPEs and TCs.  Plaform code needs
+ * to ensure that each MIPS MT core invokes this routine on reset,
+ * one at a time(!).
+ *
+ * This version of the build_cpu_map and prepare_cpus routines assumes
+ * that *all* TCs of a MIPS MT core will be used for Linux, and that
+ * they will be spread across *all* available VPEs (to minimise the
+ * loss of efficiency due to exception service serialization).
+ * An improved version would pick up configuration information and
+ * possibly leave some TCs/VPEs as "slave" processors.
+ *
+ * Use c0_MVPConf0 to find out how many TCs are available, setting up
+ * phys_cpu_present_map and the logical/physical mappings.
+ */
+
+int __init mipsmt_build_cpu_map(int start_cpu_slot)
+{
+	int i, ntcs;
+
+	/*
+	 * The CPU map isn't actually used for anything at this point,
+	 * so it's not clear what else we should do apart from set
+	 * everything up so that "logical" = "physical".
+	 */
+	ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+	for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
+		cpu_set(i, phys_cpu_present_map);
+		__cpu_number_map[i] = i;
+		__cpu_logical_map[i] = i;
+	}
+	/* Initialize map of CPUs with FPUs */
+	cpus_clear(mt_fpu_cpumask);
+
+	/* One of those TC's is the one booting, and not a secondary... */
+	printk("%i available secondary CPU TC(s)\n", i - 1);
+
+	return i;
+}
+
+/*
+ * Common setup before any secondaries are started
+ * Make sure all CPU's are in a sensible state before we boot any of the
+ * secondaries.
+ *
+ * For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
+ * as possible across the available VPEs.
+ */
+
+static void smtc_tc_setup(int vpe, int tc, int cpu)
+{
+	settc(tc);
+	write_tc_c0_tchalt(TCHALT_H);
+	mips_ihb();
+	write_tc_c0_tcstatus((read_tc_c0_tcstatus()
+			& ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
+			| TCSTATUS_A);
+	write_tc_c0_tccontext(0);
+	/* Bind tc to vpe */
+	write_tc_c0_tcbind(vpe);
+	/* In general, all TCs should have the same cpu_data indications */
+	memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
+	/* For 34Kf, start with TC/CPU 0 as sole owner of single FPU context */
+	if (cpu_data[0].cputype == CPU_34K)
+		cpu_data[cpu].options &= ~MIPS_CPU_FPU;
+	cpu_data[cpu].vpe_id = vpe;
+	cpu_data[cpu].tc_id = tc;
+}
+
+
+void mipsmt_prepare_cpus(void)
+{
+	int i, vpe, tc, ntc, nvpe, tcpervpe, slop, cpu;
+	unsigned long flags;
+	unsigned long val;
+	int nipi;
+	struct smtc_ipi *pipi;
+
+	/* disable interrupts so we can disable MT */
+	local_irq_save(flags);
+	/* disable MT so we can configure */
+	dvpe();
+	dmt();
+
+	freeIPIq.lock = SPIN_LOCK_UNLOCKED;
+
+	/*
+	 * We probably don't have as many VPEs as we do SMP "CPUs",
+	 * but it's possible - and in any case we'll never use more!
+	 */
+	for (i=0; i<NR_CPUS; i++) {
+		IPIQ[i].head = IPIQ[i].tail = NULL;
+		IPIQ[i].lock = SPIN_LOCK_UNLOCKED;
+		IPIQ[i].depth = 0;
+		ipi_timer_latch[i] = 0;
+	}
+
+	/* cpu_data index starts at zero */
+	cpu = 0;
+	cpu_data[cpu].vpe_id = 0;
+	cpu_data[cpu].tc_id = 0;
+	cpu++;
+
+	/* Report on boot-time options */
+	mips_mt_set_cpuoptions ();
+	if (vpelimit > 0)
+		printk("Limit of %d VPEs set\n", vpelimit);
+	if (tclimit > 0)
+		printk("Limit of %d TCs set\n", tclimit);
+	if (nostlb) {
+		printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
+	}
+	if (asidmask)
+		printk("ASID mask value override to 0x%x\n", asidmask);
+
+	/* Temporary */
+#ifdef SMTC_IDLE_HOOK_DEBUG
+	if (hang_trig)
+		printk("Logic Analyser Trigger on suspected TC hang\n");
+#endif /* SMTC_IDLE_HOOK_DEBUG */
+
+	/* Put MVPE's into 'configuration state' */
+	write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
+
+	val = read_c0_mvpconf0();
+	nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+	if (vpelimit > 0 && nvpe > vpelimit)
+		nvpe = vpelimit;
+	ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+	if (ntc > NR_CPUS)
+		ntc = NR_CPUS;
+	if (tclimit > 0 && ntc > tclimit)
+		ntc = tclimit;
+	tcpervpe = ntc / nvpe;
+	slop = ntc % nvpe;	/* Residual TCs, < NVPE */
+
+	/* Set up shared TLB */
+	smtc_configure_tlb();
+
+	for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
+		/*
+		 * Set the MVP bits.
+		 */
+		settc(tc);
+		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_MVP);
+		if (vpe != 0)
+			printk(", ");
+		printk("VPE %d: TC", vpe);
+		for (i = 0; i < tcpervpe; i++) {
+			/*
+			 * TC 0 is bound to VPE 0 at reset,
+			 * and is presumably executing this
+			 * code.  Leave it alone!
+			 */
+			if (tc != 0) {
+				smtc_tc_setup(vpe,tc, cpu);
+				cpu++;
+			}
+			printk(" %d", tc);
+			tc++;
+		}
+		if (slop) {
+			if (tc != 0) {
+				smtc_tc_setup(vpe,tc, cpu);
+				cpu++;
+			}
+			printk(" %d", tc);
+			tc++;
+			slop--;
+		}
+		if (vpe != 0) {
+			/*
+			 * Clear any stale software interrupts from VPE's Cause
+			 */
+			write_vpe_c0_cause(0);
+
+			/*
+			 * Clear ERL/EXL of VPEs other than 0
+			 * and set restricted interrupt enable/mask.
+			 */
+			write_vpe_c0_status((read_vpe_c0_status()
+				& ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
+				| (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
+				| ST0_IE));
+			/*
+			 * set config to be the same as vpe0,
+			 *  particularly kseg0 coherency alg
+			 */
+			write_vpe_c0_config(read_c0_config());
+			/* Clear any pending timer interrupt */
+			write_vpe_c0_compare(0);
+			/* Propagate Config7 */
+			write_vpe_c0_config7(read_c0_config7());
+		}
+		/* enable multi-threading within VPE */
+		write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
+		/* enable the VPE */
+		write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+	}
+
+	/*
+	 * Pull any physically present but unused TCs out of circulation.
+	 */
+	while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
+		cpu_clear(tc, phys_cpu_present_map);
+		cpu_clear(tc, cpu_present_map);
+		tc++;
+	}
+
+	/* release config state */
+	write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
+
+	printk("\n");
+
+	/* Set up coprocessor affinity CPU mask(s) */
+
+	for (tc = 0; tc < ntc; tc++) {
+		if(cpu_data[tc].options & MIPS_CPU_FPU)
+			cpu_set(tc, mt_fpu_cpumask);
+	}
+
+	/* set up ipi interrupts... */
+
+	/* If we have multiple VPEs running, set up the cross-VPE interrupt */
+
+	if (nvpe > 1)
+		setup_cross_vpe_interrupts();
+
+	/* Set up queue of free IPI "messages". */
+	nipi = NR_CPUS * IPIBUF_PER_CPU;
+	if (ipibuffers > 0)
+		nipi = ipibuffers;
+
+	pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
+	if (pipi == NULL)
+		panic("kmalloc of IPI message buffers failed\n");
+	else
+		printk("IPI buffer pool of %d buffers\n", nipi);
+	for (i = 0; i < nipi; i++) {
+		smtc_ipi_nq(&freeIPIq, pipi);
+		pipi++;
+	}
+
+	/* Arm multithreading and enable other VPEs - but all TCs are Halted */
+	emt(EMT_ENABLE);
+	evpe(EVPE_ENABLE);
+	local_irq_restore(flags);
+	/* Initialize SMTC /proc statistics/diagnostics */
+	init_smtc_stats();
+}
+
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it
+ * running!
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ *
+ */
+void smtc_boot_secondary(int cpu, struct task_struct *idle)
+{
+	extern u32 kernelsp[NR_CPUS];
+	long flags;
+	int mtflags;
+
+	LOCK_MT_PRA();
+	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+		dvpe();
+	}
+	settc(cpu_data[cpu].tc_id);
+
+	/* pc */
+	write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
+
+	/* stack pointer */
+	kernelsp[cpu] = __KSTK_TOS(idle);
+	write_tc_gpr_sp(__KSTK_TOS(idle));
+
+	/* global pointer */
+	write_tc_gpr_gp((unsigned long)idle->thread_info);
+
+	smtc_status |= SMTC_MTC_ACTIVE;
+	write_tc_c0_tchalt(0);
+	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+		evpe(EVPE_ENABLE);
+	}
+	UNLOCK_MT_PRA();
+}
+
+void smtc_init_secondary(void)
+{
+	/*
+	 * Start timer on secondary VPEs if necessary.
+	 * mips_timer_setup should already have been invoked by init/main
+	 * on "boot" TC.  Like per_cpu_trap_init() hack, this assumes that
+	 * SMTC init code assigns TCs consdecutively and in ascending order
+	 * to across available VPEs.
+	 */
+	if(((read_c0_tcbind() & TCBIND_CURTC) != 0)
+	&& ((read_c0_tcbind() & TCBIND_CURVPE)
+	    != cpu_data[smp_processor_id() - 1].vpe_id)){
+		write_c0_compare (read_c0_count() + mips_hpt_frequency/HZ);
+	}
+
+	local_irq_enable();
+}
+
+void smtc_smp_finish(void)
+{
+	printk("TC %d going on-line as CPU %d\n",
+		cpu_data[smp_processor_id()].tc_id, smp_processor_id());
+}
+
+void smtc_cpus_done(void)
+{
+}
+
+/*
+ * Support for SMTC-optimized driver IRQ registration
+ */
+
+/*
+ * SMTC Kernel needs to manipulate low-level CPU interrupt mask
+ * in do_IRQ. These are passed in setup_irq_smtc() and stored
+ * in this table.
+ */
+
+int setup_irq_smtc(unsigned int irq, struct irqaction * new,
+			unsigned long hwmask)
+{
+	irq_hwmask[irq] = hwmask;
+
+	return setup_irq(irq, new);
+}
+
+/*
+ * IPI model for SMTC is tricky, because interrupts aren't TC-specific.
+ * Within a VPE one TC can interrupt another by different approaches.
+ * The easiest to get right would probably be to make all TCs except
+ * the target IXMT and set a software interrupt, but an IXMT-based
+ * scheme requires that a handler must run before a new IPI could
+ * be sent, which would break the "broadcast" loops in MIPS MT.
+ * A more gonzo approach within a VPE is to halt the TC, extract
+ * its Restart, Status, and a couple of GPRs, and program the Restart
+ * address to emulate an interrupt.
+ *
+ * Within a VPE, one can be confident that the target TC isn't in
+ * a critical EXL state when halted, since the write to the Halt
+ * register could not have issued on the writing thread if the
+ * halting thread had EXL set. So k0 and k1 of the target TC
+ * can be used by the injection code.  Across VPEs, one can't
+ * be certain that the target TC isn't in a critical exception
+ * state. So we try a two-step process of sending a software
+ * interrupt to the target VPE, which either handles the event
+ * itself (if it was the target) or injects the event within
+ * the VPE.
+ */
+
+void smtc_ipi_qdump(void)
+{
+	int i;
+
+	for (i = 0; i < NR_CPUS ;i++) {
+		printk("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
+			i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
+			IPIQ[i].depth);
+	}
+}
+
+/*
+ * The standard atomic.h primitives don't quite do what we want
+ * here: We need an atomic add-and-return-previous-value (which
+ * could be done with atomic_add_return and a decrement) and an
+ * atomic set/zero-and-return-previous-value (which can't really
+ * be done with the atomic.h primitives). And since this is
+ * MIPS MT, we can assume that we have LL/SC.
+ */
+static __inline__ int atomic_postincrement(unsigned int *pv)
+{
+	unsigned long result;
+
+	unsigned long temp;
+
+	__asm__ __volatile__(
+	"1:	ll	%0, %2					\n"
+	"	addu	%1, %0, 1				\n"
+	"	sc	%1, %2					\n"
+	"	beqz	%1, 1b					\n"
+	"	sync						\n"
+	: "=&r" (result), "=&r" (temp), "=m" (*pv)
+	: "m" (*pv)
+	: "memory");
+
+	return result;
+}
+
+/* No longer used in IPI dispatch, but retained for future recycling */
+
+static __inline__ int atomic_postclear(unsigned int *pv)
+{
+	unsigned long result;
+
+	unsigned long temp;
+
+	__asm__ __volatile__(
+	"1:	ll	%0, %2					\n"
+	"	or	%1, $0, $0				\n"
+	"	sc	%1, %2					\n"
+	"	beqz	%1, 1b					\n"
+	"	sync						\n"
+	: "=&r" (result), "=&r" (temp), "=m" (*pv)
+	: "m" (*pv)
+	: "memory");
+
+	return result;
+}
+
+
+void smtc_send_ipi(int cpu, int type, unsigned int action)
+{
+	int tcstatus;
+	struct smtc_ipi *pipi;
+	long flags;
+	int mtflags;
+
+	if (cpu == smp_processor_id()) {
+		printk("Cannot Send IPI to self!\n");
+		return;
+	}
+	/* Set up a descriptor, to be delivered either promptly or queued */
+	pipi = smtc_ipi_dq(&freeIPIq);
+	if (pipi == NULL) {
+		bust_spinlocks(1);
+		mips_mt_regdump(dvpe());
+		panic("IPI Msg. Buffers Depleted\n");
+	}
+	pipi->type = type;
+	pipi->arg = (void *)action;
+	pipi->dest = cpu;
+	if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
+		/* If not on same VPE, enqueue and send cross-VPE interupt */
+		smtc_ipi_nq(&IPIQ[cpu], pipi);
+		LOCK_CORE_PRA();
+		settc(cpu_data[cpu].tc_id);
+		write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
+		UNLOCK_CORE_PRA();
+	} else {
+		/*
+		 * Not sufficient to do a LOCK_MT_PRA (dmt) here,
+		 * since ASID shootdown on the other VPE may
+		 * collide with this operation.
+		 */
+		LOCK_CORE_PRA();
+		settc(cpu_data[cpu].tc_id);
+		/* Halt the targeted TC */
+		write_tc_c0_tchalt(TCHALT_H);
+		mips_ihb();
+
+		/*
+	 	 * Inspect TCStatus - if IXMT is set, we have to queue
+		 * a message. Otherwise, we set up the "interrupt"
+		 * of the other TC
+	 	 */
+		tcstatus = read_tc_c0_tcstatus();
+
+		if ((tcstatus & TCSTATUS_IXMT) != 0) {
+			/*
+			 * Spin-waiting here can deadlock,
+			 * so we queue the message for the target TC.
+			 */
+			write_tc_c0_tchalt(0);
+			UNLOCK_CORE_PRA();
+			/* Try to reduce redundant timer interrupt messages */
+			if(type == SMTC_CLOCK_TICK) {
+			    if(atomic_postincrement(&ipi_timer_latch[cpu])!=0) {
+				smtc_ipi_nq(&freeIPIq, pipi);
+				return;
+			    }
+			}
+			smtc_ipi_nq(&IPIQ[cpu], pipi);
+		} else {
+			post_direct_ipi(cpu, pipi);
+			write_tc_c0_tchalt(0);
+			UNLOCK_CORE_PRA();
+		}
+	}
+}
+
+/*
+ * Send IPI message to Halted TC, TargTC/TargVPE already having been set
+ */
+void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
+{
+	struct pt_regs *kstack;
+	unsigned long tcstatus;
+	unsigned long tcrestart;
+	extern u32 kernelsp[NR_CPUS];
+	extern void __smtc_ipi_vector(void);
+
+	/* Extract Status, EPC from halted TC */
+	tcstatus = read_tc_c0_tcstatus();
+	tcrestart = read_tc_c0_tcrestart();
+	/* If TCRestart indicates a WAIT instruction, advance the PC */
+	if ((tcrestart & 0x80000000)
+	    && ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
+		tcrestart += 4;
+	}
+	/*
+	 * Save on TC's future kernel stack
+	 *
+	 * CU bit of Status is indicator that TC was
+	 * already running on a kernel stack...
+	 */
+	if(tcstatus & ST0_CU0)  {
+		/* Note that this "- 1" is pointer arithmetic */
+		kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
+	} else {
+		kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
+	}
+
+	kstack->cp0_epc = (long)tcrestart;
+	/* Save TCStatus */
+	kstack->cp0_tcstatus = tcstatus;
+	/* Pass token of operation to be performed kernel stack pad area */
+	kstack->pad0[4] = (unsigned long)pipi;
+	/* Pass address of function to be called likewise */
+	kstack->pad0[5] = (unsigned long)&ipi_decode;
+	/* Set interrupt exempt and kernel mode */
+	tcstatus |= TCSTATUS_IXMT;
+	tcstatus &= ~TCSTATUS_TKSU;
+	write_tc_c0_tcstatus(tcstatus);
+	ehb();
+	/* Set TC Restart address to be SMTC IPI vector */
+	write_tc_c0_tcrestart(__smtc_ipi_vector);
+}
+
+void ipi_resched_interrupt(struct pt_regs *regs)
+{
+	/* Return from interrupt should be enough to cause scheduler check */
+}
+
+
+void ipi_call_interrupt(struct pt_regs *regs)
+{
+	/* Invoke generic function invocation code in smp.c */
+	smp_call_function_interrupt();
+}
+
+void ipi_decode(struct pt_regs *regs, struct smtc_ipi *pipi)
+{
+	void *arg_copy = pipi->arg;
+	int type_copy = pipi->type;
+	int dest_copy = pipi->dest;
+
+	smtc_ipi_nq(&freeIPIq, pipi);
+	switch (type_copy) {
+		case SMTC_CLOCK_TICK:
+			/* Invoke Clock "Interrupt" */
+			ipi_timer_latch[dest_copy] = 0;
+#ifdef SMTC_IDLE_HOOK_DEBUG
+			clock_hang_reported[dest_copy] = 0;
+#endif /* SMTC_IDLE_HOOK_DEBUG */
+			local_timer_interrupt(0, NULL, regs);
+			break;
+		case LINUX_SMP_IPI:
+			switch ((int)arg_copy) {
+			case SMP_RESCHEDULE_YOURSELF:
+				ipi_resched_interrupt(regs);
+				break;
+			case SMP_CALL_FUNCTION:
+				ipi_call_interrupt(regs);
+				break;
+			default:
+				printk("Impossible SMTC IPI Argument 0x%x\n",
+					(int)arg_copy);
+				break;
+			}
+			break;
+		default:
+			printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
+			break;
+	}
+}
+
+void deferred_smtc_ipi(struct pt_regs *regs)
+{
+	struct smtc_ipi *pipi;
+	unsigned long flags;
+/* DEBUG */
+	int q = smp_processor_id();
+
+	/*
+	 * Test is not atomic, but much faster than a dequeue,
+	 * and the vast majority of invocations will have a null queue.
+	 */
+	if(IPIQ[q].head != NULL) {
+		while((pipi = smtc_ipi_dq(&IPIQ[q])) != NULL) {
+			/* ipi_decode() should be called with interrupts off */
+			local_irq_save(flags);
+			ipi_decode(regs, pipi);
+			local_irq_restore(flags);
+		}
+	}
+}
+
+/*
+ * Send clock tick to all TCs except the one executing the funtion
+ */
+
+void smtc_timer_broadcast(int vpe)
+{
+	int cpu;
+	int myTC = cpu_data[smp_processor_id()].tc_id;
+	int myVPE = cpu_data[smp_processor_id()].vpe_id;
+
+	smtc_cpu_stats[smp_processor_id()].timerints++;
+
+	for_each_online_cpu(cpu) {
+		if (cpu_data[cpu].vpe_id == myVPE &&
+		    cpu_data[cpu].tc_id != myTC)
+			smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
+	}
+}
+
+/*
+ * Cross-VPE interrupts in the SMTC prototype use "software interrupts"
+ * set via cross-VPE MTTR manipulation of the Cause register. It would be
+ * in some regards preferable to have external logic for "doorbell" hardware
+ * interrupts.
+ */
+
+static int cpu_ipi_irq = MIPSCPU_INT_BASE + MIPS_CPU_IPI_IRQ;
+
+static irqreturn_t ipi_interrupt(int irq, void *dev_idm, struct pt_regs *regs)
+{
+	int my_vpe = cpu_data[smp_processor_id()].vpe_id;
+	int my_tc = cpu_data[smp_processor_id()].tc_id;
+	int cpu;
+	struct smtc_ipi *pipi;
+	unsigned long tcstatus;
+	int sent;
+	long flags;
+	unsigned int mtflags;
+	unsigned int vpflags;
+
+	/*
+	 * So long as cross-VPE interrupts are done via
+	 * MFTR/MTTR read-modify-writes of Cause, we need
+	 * to stop other VPEs whenever the local VPE does
+	 * anything similar.
+	 */
+	local_irq_save(flags);
+	vpflags = dvpe();
+	clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
+	set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
+	irq_enable_hazard();
+	evpe(vpflags);
+	local_irq_restore(flags);
+
+	/*
+	 * Cross-VPE Interrupt handler: Try to directly deliver IPIs
+	 * queued for TCs on this VPE other than the current one.
+	 * Return-from-interrupt should cause us to drain the queue
+	 * for the current TC, so we ought not to have to do it explicitly here.
+	 */
+
+	for_each_online_cpu(cpu) {
+		if (cpu_data[cpu].vpe_id != my_vpe)
+			continue;
+
+		pipi = smtc_ipi_dq(&IPIQ[cpu]);
+		if (pipi != NULL) {
+			if (cpu_data[cpu].tc_id != my_tc) {
+				sent = 0;
+				LOCK_MT_PRA();
+				settc(cpu_data[cpu].tc_id);
+				write_tc_c0_tchalt(TCHALT_H);
+				mips_ihb();
+				tcstatus = read_tc_c0_tcstatus();
+				if ((tcstatus & TCSTATUS_IXMT) == 0) {
+					post_direct_ipi(cpu, pipi);
+					sent = 1;
+				}
+				write_tc_c0_tchalt(0);
+				UNLOCK_MT_PRA();
+				if (!sent) {
+					smtc_ipi_req(&IPIQ[cpu], pipi);
+				}
+			} else {
+				/*
+				 * ipi_decode() should be called
+				 * with interrupts off
+				 */
+				local_irq_save(flags);
+				ipi_decode(regs, pipi);
+				local_irq_restore(flags);
+			}
+		}
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void ipi_irq_dispatch(struct pt_regs *regs)
+{
+	do_IRQ(cpu_ipi_irq, regs);
+}
+
+static struct irqaction irq_ipi;
+
+void setup_cross_vpe_interrupts(void)
+{
+	if (!cpu_has_vint)
+		panic("SMTC Kernel requires Vectored Interupt support");
+
+	set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
+
+	irq_ipi.handler = ipi_interrupt;
+	irq_ipi.flags = SA_INTERRUPT;
+	irq_ipi.name = "SMTC_IPI";
+
+	setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
+
+	irq_desc[cpu_ipi_irq].status |= IRQ_PER_CPU;
+}
+
+/*
+ * SMTC-specific hacks invoked from elsewhere in the kernel.
+ */
+
+void smtc_idle_loop_hook(void)
+{
+#ifdef SMTC_IDLE_HOOK_DEBUG
+	int im;
+	int flags;
+	int mtflags;
+	int bit;
+	int vpe;
+	int tc;
+	int hook_ntcs;
+	/*
+	 * printk within DMT-protected regions can deadlock,
+	 * so buffer diagnostic messages for later output.
+	 */
+	char *pdb_msg;
+	char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
+
+	if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
+		if (atomic_add_return(1, &idle_hook_initialized) == 1) {
+			int mvpconf0;
+			/* Tedious stuff to just do once */
+			mvpconf0 = read_c0_mvpconf0();
+			hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+			if (hook_ntcs > NR_CPUS)
+				hook_ntcs = NR_CPUS;
+			for (tc = 0; tc < hook_ntcs; tc++) {
+				tcnoprog[tc] = 0;
+				clock_hang_reported[tc] = 0;
+	    		}
+			for (vpe = 0; vpe < 2; vpe++)
+				for (im = 0; im < 8; im++)
+					imstuckcount[vpe][im] = 0;
+			printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
+			atomic_set(&idle_hook_initialized, 1000);
+		} else {
+			/* Someone else is initializing in parallel - let 'em finish */
+			while (atomic_read(&idle_hook_initialized) < 1000)
+				;
+		}
+	}
+
+	/* Have we stupidly left IXMT set somewhere? */
+	if (read_c0_tcstatus() & 0x400) {
+		write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
+		ehb();
+		printk("Dangling IXMT in cpu_idle()\n");
+	}
+
+	/* Have we stupidly left an IM bit turned off? */
+#define IM_LIMIT 2000
+	local_irq_save(flags);
+	mtflags = dmt();
+	pdb_msg = &id_ho_db_msg[0];
+	im = read_c0_status();
+	vpe = cpu_data[smp_processor_id()].vpe_id;
+	for (bit = 0; bit < 8; bit++) {
+		/*
+		 * In current prototype, I/O interrupts
+		 * are masked for VPE > 0
+		 */
+		if (vpemask[vpe][bit]) {
+			if (!(im & (0x100 << bit)))
+				imstuckcount[vpe][bit]++;
+			else
+				imstuckcount[vpe][bit] = 0;
+			if (imstuckcount[vpe][bit] > IM_LIMIT) {
+				set_c0_status(0x100 << bit);
+				ehb();
+				imstuckcount[vpe][bit] = 0;
+				pdb_msg += sprintf(pdb_msg,
+					"Dangling IM %d fixed for VPE %d\n", bit,
+					vpe);
+			}
+		}
+	}
+
+	/*
+	 * Now that we limit outstanding timer IPIs, check for hung TC
+	 */
+	for (tc = 0; tc < NR_CPUS; tc++) {
+		/* Don't check ourself - we'll dequeue IPIs just below */
+		if ((tc != smp_processor_id()) &&
+		    ipi_timer_latch[tc] > timerq_limit) {
+		    if (clock_hang_reported[tc] == 0) {
+			pdb_msg += sprintf(pdb_msg,
+				"TC %d looks hung with timer latch at %d\n",
+				tc, ipi_timer_latch[tc]);
+			clock_hang_reported[tc]++;
+			}
+		}
+	}
+	emt(mtflags);
+	local_irq_restore(flags);
+	if (pdb_msg != &id_ho_db_msg[0])
+		printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
+#endif /* SMTC_IDLE_HOOK_DEBUG */
+	/*
+	 * To the extent that we've ever turned interrupts off,
+	 * we may have accumulated deferred IPIs.  This is subtle.
+	 * If we use the smtc_ipi_qdepth() macro, we'll get an
+	 * exact number - but we'll also disable interrupts
+	 * and create a window of failure where a new IPI gets
+	 * queued after we test the depth but before we re-enable
+	 * interrupts. So long as IXMT never gets set, however,
+	 * we should be OK:  If we pick up something and dispatch
+	 * it here, that's great. If we see nothing, but concurrent
+	 * with this operation, another TC sends us an IPI, IXMT
+	 * is clear, and we'll handle it as a real pseudo-interrupt
+	 * and not a pseudo-pseudo interrupt.
+	 */
+	if (IPIQ[smp_processor_id()].depth > 0) {
+		struct smtc_ipi *pipi;
+		extern void self_ipi(struct smtc_ipi *);
+
+		if ((pipi = smtc_ipi_dq(&IPIQ[smp_processor_id()])) != NULL) {
+			self_ipi(pipi);
+			smtc_cpu_stats[smp_processor_id()].selfipis++;
+		}
+	}
+}
+
+void smtc_soft_dump(void)
+{
+	int i;
+
+	printk("Counter Interrupts taken per CPU (TC)\n");
+	for (i=0; i < NR_CPUS; i++) {
+		printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
+	}
+	printk("Self-IPI invocations:\n");
+	for (i=0; i < NR_CPUS; i++) {
+		printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
+	}
+	smtc_ipi_qdump();
+	printk("Timer IPI Backlogs:\n");
+	for (i=0; i < NR_CPUS; i++) {
+		printk("%d: %d\n", i, ipi_timer_latch[i]);
+	}
+	printk("%d Recoveries of \"stolen\" FPU\n",
+	       atomic_read(&smtc_fpu_recoveries));
+}
+
+
+/*
+ * TLB management routines special to SMTC
+ */
+
+void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
+{
+	unsigned long flags, mtflags, tcstat, prevhalt, asid;
+	int tlb, i;
+
+	/*
+	 * It would be nice to be able to use a spinlock here,
+	 * but this is invoked from within TLB flush routines
+	 * that protect themselves with DVPE, so if a lock is
+         * held by another TC, it'll never be freed.
+	 *
+	 * DVPE/DMT must not be done with interrupts enabled,
+	 * so even so most callers will already have disabled
+	 * them, let's be really careful...
+	 */
+
+	local_irq_save(flags);
+	if (smtc_status & SMTC_TLB_SHARED) {
+		mtflags = dvpe();
+		tlb = 0;
+	} else {
+		mtflags = dmt();
+		tlb = cpu_data[cpu].vpe_id;
+	}
+	asid = asid_cache(cpu);
+
+	do {
+		if (!((asid += ASID_INC) & ASID_MASK) ) {
+			if (cpu_has_vtag_icache)
+				flush_icache_all();
+			/* Traverse all online CPUs (hack requires contigous range) */
+			for (i = 0; i < num_online_cpus(); i++) {
+				/*
+				 * We don't need to worry about our own CPU, nor those of
+				 * CPUs who don't share our TLB.
+				 */
+				if ((i != smp_processor_id()) &&
+				    ((smtc_status & SMTC_TLB_SHARED) ||
+				     (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
+					settc(cpu_data[i].tc_id);
+					prevhalt = read_tc_c0_tchalt() & TCHALT_H;
+					if (!prevhalt) {
+						write_tc_c0_tchalt(TCHALT_H);
+						mips_ihb();
+					}
+					tcstat = read_tc_c0_tcstatus();
+					smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
+					if (!prevhalt)
+						write_tc_c0_tchalt(0);
+				}
+			}
+			if (!asid)		/* fix version if needed */
+				asid = ASID_FIRST_VERSION;
+			local_flush_tlb_all();	/* start new asid cycle */
+		}
+	} while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
+
+	/*
+	 * SMTC shares the TLB within VPEs and possibly across all VPEs.
+	 */
+	for (i = 0; i < num_online_cpus(); i++) {
+		if ((smtc_status & SMTC_TLB_SHARED) ||
+		    (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
+			cpu_context(i, mm) = asid_cache(i) = asid;
+	}
+
+	if (smtc_status & SMTC_TLB_SHARED)
+		evpe(mtflags);
+	else
+		emt(mtflags);
+	local_irq_restore(flags);
+}
+
+/*
+ * Invoked from macros defined in mmu_context.h
+ * which must already have disabled interrupts
+ * and done a DVPE or DMT as appropriate.
+ */
+
+void smtc_flush_tlb_asid(unsigned long asid)
+{
+	int entry;
+	unsigned long ehi;
+
+	entry = read_c0_wired();
+
+	/* Traverse all non-wired entries */
+	while (entry < current_cpu_data.tlbsize) {
+		write_c0_index(entry);
+		ehb();
+		tlb_read();
+		ehb();
+		ehi = read_c0_entryhi();
+		if((ehi & ASID_MASK) == asid) {
+		    /*
+		     * Invalidate only entries with specified ASID,
+		     * makiing sure all entries differ.
+		     */
+		    write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
+		    write_c0_entrylo0(0);
+		    write_c0_entrylo1(0);
+		    mtc0_tlbw_hazard();
+		    tlb_write_indexed();
+		}
+		entry++;
+	}
+	write_c0_index(PARKED_INDEX);
+	tlbw_use_hazard();
+}
+
+/*
+ * Support for single-threading cache flush operations.
+ */
+
+int halt_state_save[NR_CPUS];
+
+/*
+ * To really, really be sure that nothing is being done
+ * by other TCs, halt them all.  This code assumes that
+ * a DVPE has already been done, so while their Halted
+ * state is theoretically architecturally unstable, in
+ * practice, it's not going to change while we're looking
+ * at it.
+ */
+
+void smtc_cflush_lockdown(void)
+{
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		if (cpu != smp_processor_id()) {
+			settc(cpu_data[cpu].tc_id);
+			halt_state_save[cpu] = read_tc_c0_tchalt();
+			write_tc_c0_tchalt(TCHALT_H);
+		}
+	}
+	mips_ihb();
+}
+
+/* It would be cheating to change the cpu_online states during a flush! */
+
+void smtc_cflush_release(void)
+{
+	int cpu;
+
+	/*
+	 * Start with a hazard barrier to ensure
+	 * that all CACHE ops have played through.
+	 */
+	mips_ihb();
+
+	for_each_online_cpu(cpu) {
+		if (cpu != smp_processor_id()) {
+			settc(cpu_data[cpu].tc_id);
+			write_tc_c0_tchalt(halt_state_save[cpu]);
+		}
+	}
+	mips_ihb();
+}
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
index 5e51a2d..13ff4da 100644
--- a/arch/mips/kernel/time.c
+++ b/arch/mips/kernel/time.c
@@ -116,8 +116,7 @@
 	write_c0_compare(expirelo);
 
 	/* Check to see if we have missed any timer interrupts.  */
-	count = read_c0_count();
-	if ((count - expirelo) < 0x7fffffff) {
+	while (((count = read_c0_count()) - expirelo) < 0x7fffffff) {
 		/* missed_timer_count++; */
 		expirelo = count + cycles_per_jiffy;
 		write_c0_compare(expirelo);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index 081e6ed..6336fe8 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -280,9 +280,16 @@
 NORET_TYPE void ATTRIB_NORET die(const char * str, struct pt_regs * regs)
 {
 	static int die_counter;
+#ifdef CONFIG_MIPS_MT_SMTC
+	unsigned long dvpret = dvpe();
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 	console_verbose();
 	spin_lock_irq(&die_lock);
+	bust_spinlocks(1);
+#ifdef CONFIG_MIPS_MT_SMTC
+	mips_mt_regdump(dvpret);
+#endif /* CONFIG_MIPS_MT_SMTC */
 	printk("%s[#%d]:\n", str, ++die_counter);
 	show_registers(regs);
 	spin_unlock_irq(&die_lock);
@@ -757,6 +764,7 @@
 
 	case 2:
 	case 3:
+		die_if_kernel("do_cpu invoked from kernel context!", regs);
 		break;
 	}
 
@@ -794,6 +802,36 @@
 
 asmlinkage void do_mt(struct pt_regs *regs)
 {
+	int subcode;
+
+	die_if_kernel("MIPS MT Thread exception in kernel", regs);
+
+	subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
+			>> VPECONTROL_EXCPT_SHIFT;
+	switch (subcode) {
+	case 0:
+		printk(KERN_ERR "Thread Underflow\n");
+		break;
+	case 1:
+		printk(KERN_ERR "Thread Overflow\n");
+		break;
+	case 2:
+		printk(KERN_ERR "Invalid YIELD Qualifier\n");
+		break;
+	case 3:
+		printk(KERN_ERR "Gating Storage Exception\n");
+		break;
+	case 4:
+		printk(KERN_ERR "YIELD Scheduler Exception\n");
+		break;
+	case 5:
+		printk(KERN_ERR "Gating Storage Schedulier Exception\n");
+		break;
+	default:
+		printk(KERN_ERR "*** UNKNOWN THREAD EXCEPTION %d ***\n",
+			subcode);
+		break;
+	}
 	die_if_kernel("MIPS MT Thread exception in kernel", regs);
 
 	force_sig(SIGILL, current);
@@ -929,7 +967,15 @@
  */
 void nmi_exception_handler(struct pt_regs *regs)
 {
+#ifdef CONFIG_MIPS_MT_SMTC
+	unsigned long dvpret = dvpe();
+	bust_spinlocks(1);
 	printk("NMI taken!!!!\n");
+	mips_mt_regdump(dvpret);
+#else
+	bust_spinlocks(1);
+	printk("NMI taken!!!!\n");
+#endif /* CONFIG_MIPS_MT_SMTC */
 	die("NMI", regs);
 	while(1) ;
 }
@@ -1007,7 +1053,7 @@
 	return set;
 }
 
-void mips_srs_free (int set)
+void mips_srs_free(int set)
 {
 	struct shadow_registers *sr = &shadow_registers;
 
@@ -1027,8 +1073,7 @@
 	if (addr == NULL) {
 		handler = (unsigned long) do_default_vi;
 		srs = 0;
-	}
-	else
+	} else
 		handler = (unsigned long) addr;
 	vi_handlers[n] = (unsigned long) addr;
 
@@ -1040,8 +1085,7 @@
 	if (cpu_has_veic) {
 		if (board_bind_eic_interrupt)
 			board_bind_eic_interrupt (n, srs);
-	}
-	else if (cpu_has_vint) {
+	} else if (cpu_has_vint) {
 		/* SRSMap is only defined if shadow sets are implemented */
 		if (mips_srs_max() > 1)
 			change_c0_srsmap (0xf << n*4, srs << n*4);
@@ -1055,6 +1099,15 @@
 
 		extern char except_vec_vi, except_vec_vi_lui;
 		extern char except_vec_vi_ori, except_vec_vi_end;
+#ifdef CONFIG_MIPS_MT_SMTC
+		/*
+		 * We need to provide the SMTC vectored interrupt handler
+		 * not only with the address of the handler, but with the
+		 * Status.IM bit to be masked before going there.
+		 */
+		extern char except_vec_vi_mori;
+		const int mori_offset = &except_vec_vi_mori - &except_vec_vi;
+#endif /* CONFIG_MIPS_MT_SMTC */
 		const int handler_len = &except_vec_vi_end - &except_vec_vi;
 		const int lui_offset = &except_vec_vi_lui - &except_vec_vi;
 		const int ori_offset = &except_vec_vi_ori - &except_vec_vi;
@@ -1068,6 +1121,12 @@
 		}
 
 		memcpy (b, &except_vec_vi, handler_len);
+#ifdef CONFIG_MIPS_MT_SMTC
+		if (n > 7)
+			printk("Vector index %d exceeds SMTC maximum\n", n);
+		w = (u32 *)(b + mori_offset);
+		*w = (*w & 0xffff0000) | (0x100 << n);
+#endif /* CONFIG_MIPS_MT_SMTC */
 		w = (u32 *)(b + lui_offset);
 		*w = (*w & 0xffff0000) | (((u32)handler >> 16) & 0xffff);
 		w = (u32 *)(b + ori_offset);
@@ -1090,7 +1149,7 @@
 	return (void *)old_handler;
 }
 
-void *set_vi_handler (int n, void *addr)
+void *set_vi_handler(int n, void *addr)
 {
 	return set_vi_srs_handler(n, addr, 0);
 }
@@ -1108,8 +1167,29 @@
 extern asmlinkage int fpu_emulator_save_context(struct sigcontext *sc);
 extern asmlinkage int fpu_emulator_restore_context(struct sigcontext *sc);
 
+#ifdef CONFIG_SMP
+static int smp_save_fp_context(struct sigcontext *sc)
+{
+	return cpu_has_fpu
+	       ? _save_fp_context(sc)
+	       : fpu_emulator_save_context(sc);
+}
+
+static int smp_restore_fp_context(struct sigcontext *sc)
+{
+	return cpu_has_fpu
+	       ? _restore_fp_context(sc)
+	       : fpu_emulator_restore_context(sc);
+}
+#endif
+
 static inline void signal_init(void)
 {
+#ifdef CONFIG_SMP
+	/* For now just do the cpu_has_fpu check when the functions are invoked */
+	save_fp_context = smp_save_fp_context;
+	restore_fp_context = smp_restore_fp_context;
+#else
 	if (cpu_has_fpu) {
 		save_fp_context = _save_fp_context;
 		restore_fp_context = _restore_fp_context;
@@ -1117,6 +1197,7 @@
 		save_fp_context = fpu_emulator_save_context;
 		restore_fp_context = fpu_emulator_restore_context;
 	}
+#endif
 }
 
 #ifdef CONFIG_MIPS32_COMPAT
@@ -1153,6 +1234,20 @@
 {
 	unsigned int cpu = smp_processor_id();
 	unsigned int status_set = ST0_CU0;
+#ifdef CONFIG_MIPS_MT_SMTC
+	int secondaryTC = 0;
+	int bootTC = (cpu == 0);
+
+	/*
+	 * Only do per_cpu_trap_init() for first TC of Each VPE.
+	 * Note that this hack assumes that the SMTC init code
+	 * assigns TCs consecutively and in ascending order.
+	 */
+
+	if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
+	    ((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
+		secondaryTC = 1;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 	/*
 	 * Disable coprocessors and select 32-bit or 64-bit addressing
@@ -1175,6 +1270,10 @@
 	write_c0_hwrena (0x0000000f); /* Allow rdhwr to all registers */
 #endif
 
+#ifdef CONFIG_MIPS_MT_SMTC
+	if (!secondaryTC) {
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 	/*
 	 * Interrupt handling.
 	 */
@@ -1191,6 +1290,9 @@
 		} else
 			set_c0_cause(CAUSEF_IV);
 	}
+#ifdef CONFIG_MIPS_MT_SMTC
+	}
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 	cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
 	TLBMISS_HANDLER_SETUP();
@@ -1200,8 +1302,14 @@
 	BUG_ON(current->mm);
 	enter_lazy_tlb(&init_mm, current);
 
-	cpu_cache_init();
-	tlb_init();
+#ifdef CONFIG_MIPS_MT_SMTC
+	if (bootTC) {
+#endif /* CONFIG_MIPS_MT_SMTC */
+		cpu_cache_init();
+		tlb_init();
+#ifdef CONFIG_MIPS_MT_SMTC
+	}
+#endif /* CONFIG_MIPS_MT_SMTC */
 }
 
 /* Install CPU exception handler */
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 2ad0ced..14fa00e 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -2,7 +2,7 @@
 #include <asm/asm-offsets.h>
 #include <asm-generic/vmlinux.lds.h>
 
-#undef mips		/* CPP really sucks for this job  */
+#undef mips
 #define mips mips
 OUTPUT_ARCH(mips)
 ENTRY(kernel_entry)
diff --git a/arch/mips/mips-boards/generic/init.c b/arch/mips/mips-boards/generic/init.c
index eab5a70..17dfe6a 100644
--- a/arch/mips/mips-boards/generic/init.c
+++ b/arch/mips/mips-boards/generic/init.c
@@ -220,7 +220,6 @@
 				generic_putDebugChar (*s++);
 		}
 
-		kgdb_enabled = 1;
 		/* Breakpoint is invoked after interrupts are initialised */
 	}
 }
diff --git a/arch/mips/mips-boards/generic/time.c b/arch/mips/mips-boards/generic/time.c
index 93f3bf2..a9f6124 100644
--- a/arch/mips/mips-boards/generic/time.c
+++ b/arch/mips/mips-boards/generic/time.c
@@ -30,6 +30,7 @@
 #include <linux/mc146818rtc.h>
 
 #include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
 #include <asm/ptrace.h>
 #include <asm/hardirq.h>
 #include <asm/irq.h>
@@ -50,16 +51,23 @@
 static char display_string[] = "        LINUX ON ATLAS       ";
 #endif
 #if defined(CONFIG_MIPS_MALTA)
+#if defined(CONFIG_MIPS_MT_SMTC)
+static char display_string[] = "       SMTC LINUX ON MALTA       ";
+#else
 static char display_string[] = "        LINUX ON MALTA       ";
+#endif /* CONFIG_MIPS_MT_SMTC */
 #endif
 #if defined(CONFIG_MIPS_SEAD)
 static char display_string[] = "        LINUX ON SEAD       ";
 #endif
-static unsigned int display_count = 0;
+static unsigned int display_count;
 #define MAX_DISPLAY_COUNT (sizeof(display_string) - 8)
 
-static unsigned int timer_tick_count=0;
+#define CPUCTR_IMASKBIT (0x100 << MIPSCPU_INT_CPUCTR)
+
+static unsigned int timer_tick_count;
 static int mips_cpu_timer_irq;
+extern void smtc_timer_broadcast(int);
 
 static inline void scroll_display_message(void)
 {
@@ -75,15 +83,55 @@
 	do_IRQ (mips_cpu_timer_irq, regs);
 }
 
+/*
+ * Redeclare until I get around mopping the timer code insanity on MIPS.
+ */
 extern int null_perf_irq(struct pt_regs *regs);
 
 extern int (*perf_irq)(struct pt_regs *regs);
 
 irqreturn_t mips_timer_interrupt(int irq, void *dev_id, struct pt_regs *regs)
 {
-	int r2 = cpu_has_mips_r2;
 	int cpu = smp_processor_id();
+	int r2 = cpu_has_mips_r2;
 
+#ifdef CONFIG_MIPS_MT_SMTC
+        /*
+	 *  In an SMTC system, one Count/Compare set exists per VPE.
+	 *  Which TC within a VPE gets the interrupt is essentially
+	 *  random - we only know that it shouldn't be one with
+	 *  IXMT set. Whichever TC gets the interrupt needs to
+	 *  send special interprocessor interrupts to the other
+	 *  TCs to make sure that they schedule, etc.
+	 *
+	 *  That code is specific to the SMTC kernel, not to
+	 *  the a particular platform, so it's invoked from
+	 *  the general MIPS timer_interrupt routine.
+	 */
+
+	/*
+	 * DVPE is necessary so long as cross-VPE interrupts
+	 * are done via read-modify-write of Cause register.
+	 */
+	int vpflags = dvpe();
+	write_c0_compare (read_c0_count() - 1);
+	clear_c0_cause(CPUCTR_IMASKBIT);
+	evpe(vpflags);
+
+	if (cpu_data[cpu].vpe_id == 0) {
+		timer_interrupt(irq, dev_id, regs);
+		scroll_display_message();
+	} else
+		write_c0_compare (read_c0_count() + ( mips_hpt_frequency/HZ));
+	smtc_timer_broadcast(cpu_data[cpu].vpe_id);
+
+	if (cpu != 0)
+		/*
+		 * Other CPUs should do profiling and process accounting
+		 */
+		local_timer_interrupt(irq, dev_id, regs);
+
+#else /* CONFIG_MIPS_MT_SMTC */
 	if (cpu == 0) {
 		/*
 		 * CPU 0 handles the global timer interrupt job and process
@@ -107,12 +155,14 @@
 		 * More support needs to be added to kernel/time for
 		 * counter/timer interrupts on multiple CPU's
 		 */
-		write_c0_compare (read_c0_count() + (mips_hpt_frequency/HZ));
+		write_c0_compare(read_c0_count() + (mips_hpt_frequency/HZ));
+
 		/*
-		 * other CPUs should do profiling and process accounting
+		 * Other CPUs should do profiling and process accounting
 		 */
-		local_timer_interrupt (irq, dev_id, regs);
+		local_timer_interrupt(irq, dev_id, regs);
 	}
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 out:
 	return IRQ_HANDLED;
@@ -126,7 +176,7 @@
 	unsigned int prid = read_c0_prid() & 0xffff00;
 	unsigned int count;
 
-#ifdef CONFIG_MIPS_SEAD
+#if defined(CONFIG_MIPS_SEAD) || defined(CONFIG_MIPS_SIM)
 	/*
 	 * The SEAD board doesn't have a real time clock, so we can't
 	 * really calculate the timer frequency
@@ -211,7 +261,11 @@
 
 	/* we are using the cpu counter for timer interrupts */
 	irq->handler = mips_timer_interrupt;	/* we use our own handler */
+#ifdef CONFIG_MIPS_MT_SMTC
+	setup_irq_smtc(mips_cpu_timer_irq, irq, CPUCTR_IMASKBIT);
+#else
 	setup_irq(mips_cpu_timer_irq, irq);
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 #ifdef CONFIG_SMP
 	/* irq_desc(riptor) is a global resource, when the interrupt overlaps
diff --git a/arch/mips/mips-boards/malta/Makefile b/arch/mips/mips-boards/malta/Makefile
index fd4c143..77ee5c6 100644
--- a/arch/mips/mips-boards/malta/Makefile
+++ b/arch/mips/mips-boards/malta/Makefile
@@ -20,3 +20,4 @@
 #
 
 obj-y := malta_int.o malta_setup.o
+obj-$(CONFIG_SMP) += malta_smp.o
diff --git a/arch/mips/mips-boards/malta/malta_int.c b/arch/mips/mips-boards/malta/malta_int.c
index 1da8c18..64db07d 100644
--- a/arch/mips/mips-boards/malta/malta_int.c
+++ b/arch/mips/mips-boards/malta/malta_int.c
@@ -118,8 +118,9 @@
 	int irq;
 
 	irq = get_int();
-	if (irq < 0)
+	if (irq < 0) {
 		return;  /* interrupt has already been cleared */
+	}
 
 	do_IRQ(MALTA_INT_BASE+irq, regs);
 }
@@ -324,9 +325,15 @@
 	else if (cpu_has_vint) {
 		set_vi_handler (MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
 		set_vi_handler (MIPSCPU_INT_COREHI, corehi_irqdispatch);
-
+#ifdef CONFIG_MIPS_MT_SMTC
+		setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq,
+			(0x100 << MIPSCPU_INT_I8259A));
+		setup_irq_smtc (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI,
+			&corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI));
+#else /* Not SMTC */
 		setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
 		setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_COREHI, &corehi_irqaction);
+#endif /* CONFIG_MIPS_MT_SMTC */
 	}
 	else {
 		setup_irq (MIPSCPU_INT_BASE+MIPSCPU_INT_I8259A, &i8259irq);
diff --git a/arch/mips/mips-boards/malta/malta_smp.c b/arch/mips/mips-boards/malta/malta_smp.c
new file mode 100644
index 0000000..6c6c8ee
--- /dev/null
+++ b/arch/mips/mips-boards/malta/malta_smp.c
@@ -0,0 +1,128 @@
+/*
+ * Malta Platform-specific hooks for SMP operation
+ */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+
+#include <asm/atomic.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/system.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#ifdef CONFIG_MIPS_MT_SMTC
+#include <asm/smtc_ipi.h>
+#endif /* CONFIG_MIPS_MT_SMTC */
+
+/* VPE/SMP Prototype implements platform interfaces directly */
+#if !defined(CONFIG_MIPS_MT_SMP)
+
+/*
+ * Cause the specified action to be performed on a targeted "CPU"
+ */
+
+void core_send_ipi(int cpu, unsigned int action)
+{
+/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
+#ifdef CONFIG_MIPS_MT_SMTC
+	smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
+#endif /* CONFIG_MIPS_MT_SMTC */
+}
+
+/*
+ * Detect available CPUs/VPEs/TCs and populate phys_cpu_present_map
+ */
+
+void __init prom_build_cpu_map(void)
+{
+	int nextslot;
+
+	/*
+	 * As of November, 2004, MIPSsim only simulates one core
+	 * at a time.  However, that core may be a MIPS MT core
+	 * with multiple virtual processors and thread contexts.
+	 */
+
+	if (read_c0_config3() & (1<<2)) {
+		nextslot = mipsmt_build_cpu_map(1);
+	}
+}
+
+/*
+ * Platform "CPU" startup hook
+ */
+
+void prom_boot_secondary(int cpu, struct task_struct *idle)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+	smtc_boot_secondary(cpu, idle);
+#endif /* CONFIG_MIPS_MT_SMTC */
+}
+
+/*
+ * Post-config but pre-boot cleanup entry point
+ */
+
+void prom_init_secondary(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+        void smtc_init_secondary(void);
+	int myvpe;
+
+	/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
+	myvpe = read_c0_tcbind() & TCBIND_CURVPE;
+	if (myvpe != 0) {
+		/* Ideally, this should be done only once per VPE, but... */
+		clear_c0_status(STATUSF_IP2);
+		set_c0_status(STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP3
+				| STATUSF_IP4 | STATUSF_IP5 | STATUSF_IP6
+				| STATUSF_IP7);
+	}
+
+        smtc_init_secondary();
+#endif /* CONFIG_MIPS_MT_SMTC */
+}
+
+/*
+ * Platform SMP pre-initialization
+ *
+ * As noted above, we can assume a single CPU for now
+ * but it may be multithreaded.
+ */
+
+void plat_smp_setup(void)
+{
+	if (read_c0_config3() & (1<<2))
+		mipsmt_build_cpu_map(0);
+}
+
+void __init plat_prepare_cpus(unsigned int max_cpus)
+{
+	if (read_c0_config3() & (1<<2))
+		mipsmt_prepare_cpus();
+}
+
+/*
+ * SMP initialization finalization entry point
+ */
+
+void prom_smp_finish(void)
+{
+#ifdef CONFIG_MIPS_MT_SMTC
+	smtc_smp_finish();
+#endif /* CONFIG_MIPS_MT_SMTC */
+}
+
+/*
+ * Hook for after all CPUs are online
+ */
+
+void prom_cpus_done(void)
+{
+}
+
+#endif /* CONFIG_MIPS32R2_MT_SMP */
diff --git a/arch/mips/mips-boards/sim/cmdline.c b/arch/mips/mips-boards/sim/cmdline.c
deleted file mode 100644
index fef9fbd..0000000
--- a/arch/mips/mips-boards/sim/cmdline.c
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Carsten Langgaard, carstenl@mips.com
- * Copyright (C) 1999,2000 MIPS Technologies, Inc.  All rights reserved.
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License (Version 2) as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
- * for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA.
- *
- * Kernel command line creation using the prom monitor (YAMON) argc/argv.
- */
-#include <linux/init.h>
-#include <linux/string.h>
-
-#include <asm/bootinfo.h>
-
-extern int prom_argc;
-extern int *_prom_argv;
-
-/*
- * YAMON (32-bit PROM) pass arguments and environment as 32-bit pointer.
- * This macro take care of sign extension.
- */
-#define prom_argv(index) ((char *)(((int *)(int)_prom_argv)[(index)]))
-
-char arcs_cmdline[CL_SIZE];
-
-char * __init prom_getcmdline(void)
-{
-	return &(arcs_cmdline[0]);
-}
-
-
-void  __init prom_init_cmdline(void)
-{
-	char *cp;
-	int actr;
-
-	actr = 1; /* Always ignore argv[0] */
-
-	cp = &(arcs_cmdline[0]);
-	while(actr < prom_argc) {
-	        strcpy(cp, prom_argv(actr));
-		cp += strlen(prom_argv(actr));
-		*cp++ = ' ';
-		actr++;
-	}
-	if (cp != &(arcs_cmdline[0])) /* get rid of trailing space */
-		--cp;
-	*cp = '\0';
-}
diff --git a/arch/mips/mips-boards/sim/sim_cmdline.c b/arch/mips/mips-boards/sim/sim_cmdline.c
index 9df37c6..c63021a 100644
--- a/arch/mips/mips-boards/sim/sim_cmdline.c
+++ b/arch/mips/mips-boards/sim/sim_cmdline.c
@@ -26,8 +26,10 @@
 	return arcs_cmdline;
 }
 
-
 void  __init prom_init_cmdline(void)
 {
-    /* nothing to do */
+	char *cp;
+	cp = arcs_cmdline;
+	/* Get boot line from environment? */
+	*cp = '\0';
 }
diff --git a/arch/mips/mips-boards/sim/sim_smp.c b/arch/mips/mips-boards/sim/sim_smp.c
index a9f0c2b..b7084e7 100644
--- a/arch/mips/mips-boards/sim/sim_smp.c
+++ b/arch/mips/mips-boards/sim/sim_smp.c
@@ -44,8 +44,6 @@
 void core_send_ipi(int cpu, unsigned int action)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-	void smtc_send_ipi(int, int, unsigned int);
-
 	smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
 #endif /* CONFIG_MIPS_MT_SMTC */
 /* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
@@ -59,15 +57,8 @@
 void __init prom_build_cpu_map(void)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-	extern int mipsmt_build_cpu_map(int startslot);
 	int nextslot;
 
-	cpus_clear(phys_cpu_present_map);
-
-	/* Register the boot CPU */
-
-	smp_prepare_boot_cpu();
-
 	/*
 	 * As of November, 2004, MIPSsim only simulates one core
 	 * at a time.  However, that core may be a MIPS MT core
@@ -87,8 +78,6 @@
 void prom_boot_secondary(int cpu, struct task_struct *idle)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-	extern void smtc_boot_secondary(int cpu, struct task_struct *t);
-
 	smtc_boot_secondary(cpu, idle);
 #endif /* CONFIG_MIPS_MT_SMTC */
 }
@@ -113,7 +102,6 @@
 void prom_prepare_cpus(unsigned int max_cpus)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-	void mipsmt_prepare_cpus(int c);
 	/*
 	 * As noted above, we can assume a single CPU for now
 	 * but it may be multithreaded.
@@ -132,8 +120,6 @@
 void prom_smp_finish(void)
 {
 #ifdef CONFIG_MIPS_MT_SMTC
-	void smtc_smp_finish(void);
-
 	smtc_smp_finish();
 #endif /* CONFIG_MIPS_MT_SMTC */
 }
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 2d9624f..e3a6172 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -157,7 +157,6 @@
 	 * Oops. The kernel tried to access some bad page. We'll have to
 	 * terminate things with extreme prejudice.
 	 */
-
 	bust_spinlocks(1);
 
 	printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at "
@@ -188,11 +187,20 @@
 	/* Kernel mode? Handle exceptions or die */
 	if (!user_mode(regs))
 		goto no_context;
-
+	else
 	/*
 	 * Send a sigbus, regardless of whether we were in kernel
 	 * or user mode.
 	 */
+#if 0
+		printk("do_page_fault() #3: sending SIGBUS to %s for "
+		       "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n",
+		       tsk->comm,
+		       write ? "write access to" : "read access from",
+		       field, address,
+		       field, (unsigned long) regs->cp0_epc,
+		       field, (unsigned long) regs->regs[31]);
+#endif
 	tsk->thread.cp0_badvaddr = address;
 	info.si_signo = SIGBUS;
 	info.si_errno = 0;
@@ -201,7 +209,6 @@
 	force_sig_info(SIGBUS, &info, tsk);
 
 	return;
-
 vmalloc_fault:
 	{
 		/*
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index a865f239..9dca099 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -32,13 +32,35 @@
 				     "nop; nop; nop; nop; nop; nop;\n\t" \
 				     ".set reorder\n\t")
 
+/* Atomicity and interruptability */
+#ifdef CONFIG_MIPS_MT_SMTC
+
+#include <asm/smtc.h>
+#include <asm/mipsmtregs.h>
+
+#define ENTER_CRITICAL(flags) \
+	{ \
+	unsigned int mvpflags; \
+	local_irq_save(flags);\
+	mvpflags = dvpe()
+#define EXIT_CRITICAL(flags) \
+	evpe(mvpflags); \
+	local_irq_restore(flags); \
+	}
+#else
+
+#define ENTER_CRITICAL(flags) local_irq_save(flags)
+#define EXIT_CRITICAL(flags) local_irq_restore(flags)
+
+#endif /* CONFIG_MIPS_MT_SMTC */
+
 void local_flush_tlb_all(void)
 {
 	unsigned long flags;
 	unsigned long old_ctx;
 	int entry;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	/* Save old context and create impossible VPN2 value */
 	old_ctx = read_c0_entryhi();
 	write_c0_entrylo0(0);
@@ -57,7 +79,7 @@
 	}
 	tlbw_use_hazard();
 	write_c0_entryhi(old_ctx);
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 /* All entries common to a mm share an asid.  To effectively flush
@@ -87,6 +109,7 @@
 		unsigned long flags;
 		int size;
 
+		ENTER_CRITICAL(flags);
 		size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 		size = (size + 1) >> 1;
 		local_irq_save(flags);
@@ -120,7 +143,7 @@
 		} else {
 			drop_mmu_context(mm, cpu);
 		}
-		local_irq_restore(flags);
+		EXIT_CRITICAL(flags);
 	}
 }
 
@@ -129,9 +152,9 @@
 	unsigned long flags;
 	int size;
 
+	ENTER_CRITICAL(flags);
 	size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
 	size = (size + 1) >> 1;
-	local_irq_save(flags);
 	if (size <= current_cpu_data.tlbsize / 2) {
 		int pid = read_c0_entryhi();
 
@@ -162,7 +185,7 @@
 	} else {
 		local_flush_tlb_all();
 	}
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
@@ -175,7 +198,7 @@
 
 		newpid = cpu_asid(cpu, vma->vm_mm);
 		page &= (PAGE_MASK << 1);
-		local_irq_save(flags);
+		ENTER_CRITICAL(flags);
 		oldpid = read_c0_entryhi();
 		write_c0_entryhi(page | newpid);
 		mtc0_tlbw_hazard();
@@ -194,7 +217,7 @@
 
 	finish:
 		write_c0_entryhi(oldpid);
-		local_irq_restore(flags);
+		EXIT_CRITICAL(flags);
 	}
 }
 
@@ -207,7 +230,7 @@
 	unsigned long flags;
 	int oldpid, idx;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	oldpid = read_c0_entryhi();
 	page &= (PAGE_MASK << 1);
 	write_c0_entryhi(page);
@@ -226,7 +249,7 @@
 	}
 	write_c0_entryhi(oldpid);
 
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 /*
@@ -249,7 +272,7 @@
 	if (current->active_mm != vma->vm_mm)
 		return;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 
 	pid = read_c0_entryhi() & ASID_MASK;
 	address &= (PAGE_MASK << 1);
@@ -277,7 +300,7 @@
 	else
 		tlb_write_indexed();
 	tlbw_use_hazard();
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 #if 0
@@ -291,7 +314,7 @@
 	pte_t *ptep;
 	int idx;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	address &= (PAGE_MASK << 1);
 	asid = read_c0_entryhi() & ASID_MASK;
 	write_c0_entryhi(address | asid);
@@ -310,7 +333,7 @@
 	else
 		tlb_write_indexed();
 	tlbw_use_hazard();
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 #endif
 
@@ -322,7 +345,7 @@
 	unsigned long old_pagemask;
 	unsigned long old_ctx;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	/* Save old context and create impossible VPN2 value */
 	old_ctx = read_c0_entryhi();
 	old_pagemask = read_c0_pagemask();
@@ -342,7 +365,7 @@
 	BARRIER;
 	write_c0_pagemask(old_pagemask);
 	local_flush_tlb_all();
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 }
 
 /*
@@ -362,7 +385,7 @@
 	unsigned long old_pagemask;
 	unsigned long old_ctx;
 
-	local_irq_save(flags);
+	ENTER_CRITICAL(flags);
 	/* Save old context and create impossible VPN2 value */
 	old_ctx = read_c0_entryhi();
 	old_pagemask = read_c0_pagemask();
@@ -386,10 +409,11 @@
 	write_c0_entryhi(old_ctx);
 	write_c0_pagemask(old_pagemask);
 out:
-	local_irq_restore(flags);
+	EXIT_CRITICAL(flags);
 	return ret;
 }
 
+extern void __init sanitize_tlb_entries(void);
 static void __init probe_tlb(unsigned long config)
 {
 	struct cpuinfo_mips *c = &current_cpu_data;
@@ -402,6 +426,14 @@
 	 */
 	if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
 		return;
+#ifdef CONFIG_MIPS_MT_SMTC
+	/*
+	 * If TLB is shared in SMTC system, total size already
+	 * has been calculated and written into cpu_data tlbsize
+	 */
+	if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
+		return;
+#endif /* CONFIG_MIPS_MT_SMTC */
 
 	reg = read_c0_config1();
 	if (!((config >> 7) & 3))
@@ -410,6 +442,15 @@
 	c->tlbsize = ((reg >> 25) & 0x3f) + 1;
 }
 
+static int __initdata ntlb = 0;
+static int __init set_ntlb(char *str)
+{
+	get_option(&str, &ntlb);
+	return 1;
+}
+
+__setup("ntlb=", set_ntlb);
+
 void __init tlb_init(void)
 {
 	unsigned int config = read_c0_config();
@@ -432,5 +473,15 @@
 
 	/* Did I tell you that ARC SUCKS?  */
 
+	if (ntlb) {
+		if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
+			int wired = current_cpu_data.tlbsize - ntlb;
+			write_c0_wired(wired);
+			write_c0_index(wired-1);
+			printk ("Restricting TLB to %d entries\n", ntlb);
+		} else
+			printk("Ignoring invalid argument ntlb=%d\n", ntlb);
+	}
+
 	build_tlb_refill_handler();
 }
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index c5eea6a..053dbac 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -7,6 +7,16 @@
  *
  * Copyright (C) 2004,2005 by Thiemo Seufer
  * Copyright (C) 2005  Maciej W. Rozycki
+ * Copyright (C) 2006  Ralf Baechle (ralf@linux-mips.org)
+ *
+ * ... and the days got worse and worse and now you see
+ * I've gone completly out of my mind.
+ *
+ * They're coming to take me a away haha
+ * they're coming to take me a away hoho hihi haha
+ * to the funny farm where code is beautiful all the time ...
+ *
+ * (Condolences to Napoleon XIV)
  */
 
 #include <stdarg.h>
@@ -68,6 +78,7 @@
 	BIMM = 0x040,
 	JIMM = 0x080,
 	FUNC = 0x100,
+	SET = 0x200
 };
 
 #define OP_MASK		0x2f
@@ -86,6 +97,8 @@
 #define JIMM_SH		0
 #define FUNC_MASK	0x2f
 #define FUNC_SH		0
+#define SET_MASK	0x7
+#define SET_SH		0
 
 enum opcode {
 	insn_invalid,
@@ -129,8 +142,8 @@
 	{ insn_bne, M(bne_op,0,0,0,0,0), RS | RT | BIMM },
 	{ insn_daddiu, M(daddiu_op,0,0,0,0,0), RS | RT | SIMM },
 	{ insn_daddu, M(spec_op,0,0,0,0,daddu_op), RS | RT | RD },
-	{ insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD },
-	{ insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD },
+	{ insn_dmfc0, M(cop0_op,dmfc_op,0,0,0,0), RT | RD | SET},
+	{ insn_dmtc0, M(cop0_op,dmtc_op,0,0,0,0), RT | RD | SET},
 	{ insn_dsll, M(spec_op,0,0,0,0,dsll_op), RT | RD | RE },
 	{ insn_dsll32, M(spec_op,0,0,0,0,dsll32_op), RT | RD | RE },
 	{ insn_dsra, M(spec_op,0,0,0,0,dsra_op), RT | RD | RE },
@@ -145,8 +158,8 @@
 	{ insn_lld, M(lld_op,0,0,0,0,0), RS | RT | SIMM },
 	{ insn_lui, M(lui_op,0,0,0,0,0), RT | SIMM },
 	{ insn_lw, M(lw_op,0,0,0,0,0), RS | RT | SIMM },
-	{ insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD },
-	{ insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD },
+	{ insn_mfc0, M(cop0_op,mfc_op,0,0,0,0), RT | RD | SET},
+	{ insn_mtc0, M(cop0_op,mtc_op,0,0,0,0), RT | RD | SET},
 	{ insn_ori, M(ori_op,0,0,0,0,0), RS | RT | UIMM },
 	{ insn_rfe, M(cop0_op,cop_op,0,0,0,rfe_op), 0 },
 	{ insn_sc, M(sc_op,0,0,0,0,0), RS | RT | SIMM },
@@ -242,6 +255,14 @@
 	return arg & FUNC_MASK;
 }
 
+static __init u32 build_set(u32 arg)
+{
+	if (arg & ~SET_MASK)
+		printk(KERN_WARNING "TLB synthesizer field overflow\n");
+
+	return arg & SET_MASK;
+}
+
 /*
  * The order of opcode arguments is implicitly left to right,
  * starting with RS and ending with FUNC or IMM.
@@ -273,6 +294,7 @@
 	if (ip->fields & BIMM) op |= build_bimm(va_arg(ap, s32));
 	if (ip->fields & JIMM) op |= build_jimm(va_arg(ap, u32));
 	if (ip->fields & FUNC) op |= build_func(va_arg(ap, u32));
+	if (ip->fields & SET) op |= build_set(va_arg(ap, u32));
 	va_end(ap);
 
 	**buf = op;
@@ -358,8 +380,8 @@
 I_u1s2(_bltz);
 I_u1s2(_bltzl);
 I_u1u2s3(_bne);
-I_u1u2(_dmfc0);
-I_u1u2(_dmtc0);
+I_u1u2u3(_dmfc0);
+I_u1u2u3(_dmtc0);
 I_u2u1s3(_daddiu);
 I_u3u1u2(_daddu);
 I_u2u1u3(_dsll);
@@ -376,8 +398,8 @@
 I_u2s3u1(_lld);
 I_u1s2(_lui);
 I_u2s3u1(_lw);
-I_u1u2(_mfc0);
-I_u1u2(_mtc0);
+I_u1u2u3(_mfc0);
+I_u1u2u3(_mtc0);
 I_u2u1u3(_ori);
 I_0(_rfe);
 I_u2s3u1(_sc);
@@ -451,8 +473,8 @@
 # define i_SLL(buf, rs, rt, sh) i_dsll(buf, rs, rt, sh)
 # define i_SRA(buf, rs, rt, sh) i_dsra(buf, rs, rt, sh)
 # define i_SRL(buf, rs, rt, sh) i_dsrl(buf, rs, rt, sh)
-# define i_MFC0(buf, rt, rd) i_dmfc0(buf, rt, rd)
-# define i_MTC0(buf, rt, rd) i_dmtc0(buf, rt, rd)
+# define i_MFC0(buf, rt, rd...) i_dmfc0(buf, rt, rd)
+# define i_MTC0(buf, rt, rd...) i_dmtc0(buf, rt, rd)
 # define i_ADDIU(buf, rs, rt, val) i_daddiu(buf, rs, rt, val)
 # define i_ADDU(buf, rs, rt, rd) i_daddu(buf, rs, rt, rd)
 # define i_SUBU(buf, rs, rt, rd) i_dsubu(buf, rs, rt, rd)
@@ -464,8 +486,8 @@
 # define i_SLL(buf, rs, rt, sh) i_sll(buf, rs, rt, sh)
 # define i_SRA(buf, rs, rt, sh) i_sra(buf, rs, rt, sh)
 # define i_SRL(buf, rs, rt, sh) i_srl(buf, rs, rt, sh)
-# define i_MFC0(buf, rt, rd) i_mfc0(buf, rt, rd)
-# define i_MTC0(buf, rt, rd) i_mtc0(buf, rt, rd)
+# define i_MFC0(buf, rt, rd...) i_mfc0(buf, rt, rd)
+# define i_MTC0(buf, rt, rd...) i_mtc0(buf, rt, rd)
 # define i_ADDIU(buf, rs, rt, val) i_addiu(buf, rs, rt, val)
 # define i_ADDU(buf, rs, rt, rd) i_addu(buf, rs, rt, rd)
 # define i_SUBU(buf, rs, rt, rd) i_subu(buf, rs, rt, rd)
@@ -670,14 +692,15 @@
 #define K1		27
 
 /* Some CP0 registers */
-#define C0_INDEX	0
-#define C0_ENTRYLO0	2
-#define C0_ENTRYLO1	3
-#define C0_CONTEXT	4
-#define C0_BADVADDR	8
-#define C0_ENTRYHI	10
-#define C0_EPC		14
-#define C0_XCONTEXT	20
+#define C0_INDEX	0, 0
+#define C0_ENTRYLO0	2, 0
+#define C0_TCBIND	2, 2
+#define C0_ENTRYLO1	3, 0
+#define C0_CONTEXT	4, 0
+#define C0_BADVADDR	8, 0
+#define C0_ENTRYHI	10, 0
+#define C0_EPC		14, 0
+#define C0_XCONTEXT	20, 0
 
 #ifdef CONFIG_64BIT
 # define GET_CONTEXT(buf, reg) i_MFC0(buf, reg, C0_XCONTEXT)
@@ -951,12 +974,20 @@
 	/* No i_nop needed here, since the next insn doesn't touch TMP. */
 
 #ifdef CONFIG_SMP
+# ifdef  CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC uses TCBind value as "CPU" index
+	 */
+	i_mfc0(p, ptr, C0_TCBIND);
+	i_dsrl(p, ptr, ptr, 19);
+# else
 	/*
 	 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
 	 * stored in CONTEXT.
 	 */
 	i_dmfc0(p, ptr, C0_CONTEXT);
 	i_dsrl(p, ptr, ptr, 23);
+#endif
 	i_LA_mostly(p, tmp, pgdc);
 	i_daddu(p, ptr, ptr, tmp);
 	i_dmfc0(p, tmp, C0_BADVADDR);
@@ -1014,9 +1045,21 @@
 
 	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
 #ifdef CONFIG_SMP
+#ifdef  CONFIG_MIPS_MT_SMTC
+	/*
+	 * SMTC uses TCBind value as "CPU" index
+	 */
+	i_mfc0(p, ptr, C0_TCBIND);
+	i_LA_mostly(p, tmp, pgdc);
+	i_srl(p, ptr, ptr, 19);
+#else
+	/*
+	 * smp_processor_id() << 3 is stored in CONTEXT.
+         */
 	i_mfc0(p, ptr, C0_CONTEXT);
 	i_LA_mostly(p, tmp, pgdc);
 	i_srl(p, ptr, ptr, 23);
+#endif
 	i_addu(p, ptr, tmp, ptr);
 #else
 	i_LA_mostly(p, ptr, pgdc);