Merge branch 'for-2.6.26' of master.kernel.org:/pub/scm/linux/kernel/git/jwboyer/powerpc-4xx into merge
diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c
index 04f74f9..5bf7df1 100644
--- a/arch/powerpc/platforms/cell/interrupt.c
+++ b/arch/powerpc/platforms/cell/interrupt.c
@@ -35,6 +35,7 @@
 #include <linux/percpu.h>
 #include <linux/types.h>
 #include <linux/ioport.h>
+#include <linux/kernel_stat.h>
 
 #include <asm/io.h>
 #include <asm/pgtable.h>
@@ -231,6 +232,54 @@
 				    "IBM,CBEA-Internal-Interrupt-Controller");
 }
 
+extern int noirqdebug;
+
+static void handle_iic_irq(unsigned int irq, struct irq_desc *desc)
+{
+	const unsigned int cpu = smp_processor_id();
+
+	spin_lock(&desc->lock);
+
+	desc->status &= ~(IRQ_REPLAY | IRQ_WAITING);
+
+	/*
+	 * If we're currently running this IRQ, or its disabled,
+	 * we shouldn't process the IRQ. Mark it pending, handle
+	 * the necessary masking and go out
+	 */
+	if (unlikely((desc->status & (IRQ_INPROGRESS | IRQ_DISABLED)) ||
+		    !desc->action)) {
+		desc->status |= IRQ_PENDING;
+		goto out_eoi;
+	}
+
+	kstat_cpu(cpu).irqs[irq]++;
+
+	/* Mark the IRQ currently in progress.*/
+	desc->status |= IRQ_INPROGRESS;
+
+	do {
+		struct irqaction *action = desc->action;
+		irqreturn_t action_ret;
+
+		if (unlikely(!action))
+			goto out_eoi;
+
+		desc->status &= ~IRQ_PENDING;
+		spin_unlock(&desc->lock);
+		action_ret = handle_IRQ_event(irq, action);
+		if (!noirqdebug)
+			note_interrupt(irq, desc, action_ret);
+		spin_lock(&desc->lock);
+
+	} while ((desc->status & (IRQ_PENDING | IRQ_DISABLED)) == IRQ_PENDING);
+
+	desc->status &= ~IRQ_INPROGRESS;
+out_eoi:
+	desc->chip->eoi(irq);
+	spin_unlock(&desc->lock);
+}
+
 static int iic_host_map(struct irq_host *h, unsigned int virq,
 			irq_hw_number_t hw)
 {
@@ -240,10 +289,10 @@
 		break;
 	case IIC_IRQ_TYPE_IOEXC:
 		set_irq_chip_and_handler(virq, &iic_ioexc_chip,
-					 handle_fasteoi_irq);
+					 handle_iic_irq);
 		break;
 	default:
-		set_irq_chip_and_handler(virq, &iic_chip, handle_fasteoi_irq);
+		set_irq_chip_and_handler(virq, &iic_chip, handle_iic_irq);
 	}
 	return 0;
 }
diff --git a/arch/powerpc/platforms/cell/spu_base.c b/arch/powerpc/platforms/cell/spu_base.c
index 6bab44b..70c6601 100644
--- a/arch/powerpc/platforms/cell/spu_base.c
+++ b/arch/powerpc/platforms/cell/spu_base.c
@@ -141,6 +141,10 @@
 
 	if (!test_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags))
 		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
+	else {
+		set_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
+		mb();
+	}
 }
 
 static inline void spu_load_slb(struct spu *spu, int slbe, struct spu_slb *slb)
@@ -226,11 +230,13 @@
 		return 0;
 	}
 
-	spu->class_0_pending = 0;
-	spu->dar = ea;
-	spu->dsisr = dsisr;
+	spu->class_1_dar = ea;
+	spu->class_1_dsisr = dsisr;
 
-	spu->stop_callback(spu);
+	spu->stop_callback(spu, 1);
+
+	spu->class_1_dar = 0;
+	spu->class_1_dsisr = 0;
 
 	return 0;
 }
@@ -318,11 +324,15 @@
 	stat = spu_int_stat_get(spu, 0) & mask;
 
 	spu->class_0_pending |= stat;
-	spu->dsisr = spu_mfc_dsisr_get(spu);
-	spu->dar = spu_mfc_dar_get(spu);
+	spu->class_0_dsisr = spu_mfc_dsisr_get(spu);
+	spu->class_0_dar = spu_mfc_dar_get(spu);
 	spin_unlock(&spu->register_lock);
 
-	spu->stop_callback(spu);
+	spu->stop_callback(spu, 0);
+
+	spu->class_0_pending = 0;
+	spu->class_0_dsisr = 0;
+	spu->class_0_dar = 0;
 
 	spu_int_stat_clear(spu, 0, stat);
 
@@ -363,6 +373,9 @@
 	if (stat & CLASS1_LS_COMPARE_SUSPEND_ON_PUT_INTR)
 		;
 
+	spu->class_1_dsisr = 0;
+	spu->class_1_dar = 0;
+
 	return stat ? IRQ_HANDLED : IRQ_NONE;
 }
 
@@ -396,10 +409,10 @@
 		spu->ibox_callback(spu);
 
 	if (stat & CLASS2_SPU_STOP_INTR)
-		spu->stop_callback(spu);
+		spu->stop_callback(spu, 2);
 
 	if (stat & CLASS2_SPU_HALT_INTR)
-		spu->stop_callback(spu);
+		spu->stop_callback(spu, 2);
 
 	if (stat & CLASS2_SPU_DMA_TAG_GROUP_COMPLETE_INTR)
 		spu->mfc_callback(spu);
diff --git a/arch/powerpc/platforms/cell/spu_priv1_mmio.c b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
index 67fa724..906a0a2 100644
--- a/arch/powerpc/platforms/cell/spu_priv1_mmio.c
+++ b/arch/powerpc/platforms/cell/spu_priv1_mmio.c
@@ -28,6 +28,7 @@
 #include <linux/io.h>
 #include <linux/mutex.h>
 #include <linux/device.h>
+#include <linux/sched.h>
 
 #include <asm/spu.h>
 #include <asm/spu_priv1.h>
@@ -75,8 +76,19 @@
 
 static void cpu_affinity_set(struct spu *spu, int cpu)
 {
-	u64 target = iic_get_target_id(cpu);
-	u64 route = target << 48 | target << 32 | target << 16;
+	u64 target;
+	u64 route;
+
+	if (nr_cpus_node(spu->node)) {
+		cpumask_t spumask = node_to_cpumask(spu->node);
+		cpumask_t cpumask = node_to_cpumask(cpu_to_node(cpu));
+
+		if (!cpus_intersects(spumask, cpumask))
+			return;
+	}
+
+	target = iic_get_target_id(cpu);
+	route = target << 48 | target << 32 | target << 16;
 	out_be64(&spu->priv1->int_route_RW, route);
 }
 
diff --git a/arch/powerpc/platforms/cell/spufs/fault.c b/arch/powerpc/platforms/cell/spufs/fault.c
index e46d300..f093a58 100644
--- a/arch/powerpc/platforms/cell/spufs/fault.c
+++ b/arch/powerpc/platforms/cell/spufs/fault.c
@@ -83,13 +83,18 @@
 		return 0;
 
 	if (stat & CLASS0_DMA_ALIGNMENT_INTR)
-		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_DMA_ALIGNMENT);
+		spufs_handle_event(ctx, ctx->csa.class_0_dar,
+			SPE_EVENT_DMA_ALIGNMENT);
 
 	if (stat & CLASS0_INVALID_DMA_COMMAND_INTR)
-		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_INVALID_DMA);
+		spufs_handle_event(ctx, ctx->csa.class_0_dar,
+			SPE_EVENT_INVALID_DMA);
 
 	if (stat & CLASS0_SPU_ERROR_INTR)
-		spufs_handle_event(ctx, ctx->csa.dar, SPE_EVENT_SPE_ERROR);
+		spufs_handle_event(ctx, ctx->csa.class_0_dar,
+			SPE_EVENT_SPE_ERROR);
+
+	ctx->csa.class_0_pending = 0;
 
 	return -EIO;
 }
@@ -119,8 +124,8 @@
 	 * in time, we can still expect to get the same fault
 	 * the immediately after the context restore.
 	 */
-	ea = ctx->csa.dar;
-	dsisr = ctx->csa.dsisr;
+	ea = ctx->csa.class_1_dar;
+	dsisr = ctx->csa.class_1_dsisr;
 
 	if (!(dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED)))
 		return 0;
@@ -158,7 +163,7 @@
 	 * time slicing will not preempt the context while the page fault
 	 * handler is running. Context switch code removes mappings.
 	 */
-	ctx->csa.dar = ctx->csa.dsisr = 0;
+	ctx->csa.class_1_dar = ctx->csa.class_1_dsisr = 0;
 
 	/*
 	 * If we handled the fault successfully and are in runnable
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 0c32a05..f407b24 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -23,6 +23,7 @@
 
 #include <linux/file.h>
 #include <linux/fs.h>
+#include <linux/fsnotify.h>
 #include <linux/backing-dev.h>
 #include <linux/init.h>
 #include <linux/ioctl.h>
@@ -223,7 +224,7 @@
 	parent = dir->d_parent->d_inode;
 	ctx = SPUFS_I(dir->d_inode)->i_ctx;
 
-	mutex_lock(&parent->i_mutex);
+	mutex_lock_nested(&parent->i_mutex, I_MUTEX_PARENT);
 	ret = spufs_rmdir(parent, dir);
 	mutex_unlock(&parent->i_mutex);
 	WARN_ON(ret);
@@ -618,12 +619,15 @@
 	mode &= ~current->fs->umask;
 
 	if (flags & SPU_CREATE_GANG)
-		return spufs_create_gang(nd->path.dentry->d_inode,
+		ret = spufs_create_gang(nd->path.dentry->d_inode,
 					 dentry, nd->path.mnt, mode);
 	else
-		return spufs_create_context(nd->path.dentry->d_inode,
+		ret = spufs_create_context(nd->path.dentry->d_inode,
 					    dentry, nd->path.mnt, flags, mode,
 					    filp);
+	if (ret >= 0)
+		fsnotify_mkdir(nd->path.dentry->d_inode, dentry);
+	return ret;
 
 out_dput:
 	dput(dentry);
diff --git a/arch/powerpc/platforms/cell/spufs/run.c b/arch/powerpc/platforms/cell/spufs/run.c
index a9c35b7..b7493b8 100644
--- a/arch/powerpc/platforms/cell/spufs/run.c
+++ b/arch/powerpc/platforms/cell/spufs/run.c
@@ -11,7 +11,7 @@
 #include "spufs.h"
 
 /* interrupt-level stop callback function. */
-void spufs_stop_callback(struct spu *spu)
+void spufs_stop_callback(struct spu *spu, int irq)
 {
 	struct spu_context *ctx = spu->ctx;
 
@@ -24,9 +24,19 @@
 	 */
 	if (ctx) {
 		/* Copy exception arguments into module specific structure */
-		ctx->csa.class_0_pending = spu->class_0_pending;
-		ctx->csa.dsisr = spu->dsisr;
-		ctx->csa.dar = spu->dar;
+		switch(irq) {
+		case 0 :
+			ctx->csa.class_0_pending = spu->class_0_pending;
+			ctx->csa.class_0_dsisr = spu->class_0_dsisr;
+			ctx->csa.class_0_dar = spu->class_0_dar;
+			break;
+		case 1 :
+			ctx->csa.class_1_dsisr = spu->class_1_dsisr;
+			ctx->csa.class_1_dar = spu->class_1_dar;
+			break;
+		case 2 :
+			break;
+		}
 
 		/* ensure that the exception status has hit memory before a
 		 * thread waiting on the context's stop queue is woken */
@@ -34,11 +44,6 @@
 
 		wake_up_all(&ctx->stop_wq);
 	}
-
-	/* Clear callback arguments from spu structure */
-	spu->class_0_pending = 0;
-	spu->dsisr = 0;
-	spu->dar = 0;
 }
 
 int spu_stopped(struct spu_context *ctx, u32 *stat)
@@ -56,7 +61,11 @@
 	if (!(*stat & SPU_STATUS_RUNNING) && (*stat & stopped))
 		return 1;
 
-	dsisr = ctx->csa.dsisr;
+	dsisr = ctx->csa.class_0_dsisr;
+	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
+		return 1;
+
+	dsisr = ctx->csa.class_1_dsisr;
 	if (dsisr & (MFC_DSISR_PTE_NOT_FOUND | MFC_DSISR_ACCESS_DENIED))
 		return 1;
 
@@ -294,7 +303,7 @@
 	u32 ls_pointer, npc;
 	void __iomem *ls;
 	long spu_ret;
-	int ret, ret2;
+	int ret;
 
 	/* get syscall block from local store */
 	npc = ctx->ops->npc_read(ctx) & ~3;
@@ -316,11 +325,9 @@
 		if (spu_ret <= -ERESTARTSYS) {
 			ret = spu_handle_restartsys(ctx, &spu_ret, &npc);
 		}
-		ret2 = spu_acquire(ctx);
+		mutex_lock(&ctx->state_mutex);
 		if (ret == -ERESTARTSYS)
 			return ret;
-		if (ret2)
-			return -EINTR;
 	}
 
 	/* need to re-get the ls, as it may have changed when we released the
@@ -343,13 +350,14 @@
 	if (mutex_lock_interruptible(&ctx->run_mutex))
 		return -ERESTARTSYS;
 
-	spu_enable_spu(ctx);
 	ctx->event_return = 0;
 
 	ret = spu_acquire(ctx);
 	if (ret)
 		goto out_unlock;
 
+	spu_enable_spu(ctx);
+
 	spu_update_sched_info(ctx);
 
 	ret = spu_run_init(ctx, npc);
diff --git a/arch/powerpc/platforms/cell/spufs/sched.c b/arch/powerpc/platforms/cell/spufs/sched.c
index 7298e7d..2e411f2 100644
--- a/arch/powerpc/platforms/cell/spufs/sched.c
+++ b/arch/powerpc/platforms/cell/spufs/sched.c
@@ -140,6 +140,9 @@
 	 * if it is timesliced or preempted.
 	 */
 	ctx->cpus_allowed = current->cpus_allowed;
+
+	/* Save the current cpu id for spu interrupt routing. */
+	ctx->last_ran = raw_smp_processor_id();
 }
 
 void spu_update_sched_info(struct spu_context *ctx)
@@ -243,7 +246,6 @@
 	spu_switch_log_notify(spu, ctx, SWITCH_LOG_START, 0);
 	spu_restore(&ctx->csa, spu);
 	spu->timestamp = jiffies;
-	spu_cpu_affinity_set(spu, raw_smp_processor_id());
 	spu_switch_notify(spu, ctx);
 	ctx->state = SPU_STATE_RUNNABLE;
 
@@ -657,7 +659,8 @@
 
 			victim->stats.invol_ctx_switch++;
 			spu->stats.invol_ctx_switch++;
-			spu_add_to_rq(victim);
+			if (test_bit(SPU_SCHED_SPU_RUN, &ctx->sched_flags))
+				spu_add_to_rq(victim);
 
 			mutex_unlock(&victim->state_mutex);
 
diff --git a/arch/powerpc/platforms/cell/spufs/spufs.h b/arch/powerpc/platforms/cell/spufs/spufs.h
index 7312745..454c277 100644
--- a/arch/powerpc/platforms/cell/spufs/spufs.h
+++ b/arch/powerpc/platforms/cell/spufs/spufs.h
@@ -121,6 +121,7 @@
 	cpumask_t cpus_allowed;
 	int policy;
 	int prio;
+	int last_ran;
 
 	/* statistics */
 	struct {
@@ -331,7 +332,7 @@
 /* irq callback funcs. */
 void spufs_ibox_callback(struct spu *spu);
 void spufs_wbox_callback(struct spu *spu);
-void spufs_stop_callback(struct spu *spu);
+void spufs_stop_callback(struct spu *spu, int irq);
 void spufs_mfc_callback(struct spu *spu);
 void spufs_dma_callback(struct spu *spu, int type);
 
diff --git a/arch/powerpc/platforms/cell/spufs/switch.c b/arch/powerpc/platforms/cell/spufs/switch.c
index d2a1249..3df9a36 100644
--- a/arch/powerpc/platforms/cell/spufs/switch.c
+++ b/arch/powerpc/platforms/cell/spufs/switch.c
@@ -132,6 +132,14 @@
 	spu_int_mask_set(spu, 2, 0ul);
 	eieio();
 	spin_unlock_irq(&spu->register_lock);
+
+	/*
+	 * This flag needs to be set before calling synchronize_irq so
+	 * that the update will be visible to the relevant handlers
+	 * via a simple load.
+	 */
+	set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
+	clear_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags);
 	synchronize_irq(spu->irqs[0]);
 	synchronize_irq(spu->irqs[1]);
 	synchronize_irq(spu->irqs[2]);
@@ -166,9 +174,8 @@
 	/* Save, Step 7:
 	 * Restore, Step 5:
 	 *     Set a software context switch pending flag.
+	 *     Done above in Step 3 - disable_interrupts().
 	 */
-	set_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
-	mb();
 }
 
 static inline void save_mfc_cntl(struct spu_state *csa, struct spu *spu)
@@ -186,20 +193,21 @@
 				 MFC_CNTL_SUSPEND_COMPLETE);
 		/* fall through */
 	case MFC_CNTL_SUSPEND_COMPLETE:
-		if (csa) {
+		if (csa)
 			csa->priv2.mfc_control_RW =
-				MFC_CNTL_SUSPEND_MASK |
+				in_be64(&priv2->mfc_control_RW) |
 				MFC_CNTL_SUSPEND_DMA_QUEUE;
-		}
 		break;
 	case MFC_CNTL_NORMAL_DMA_QUEUE_OPERATION:
 		out_be64(&priv2->mfc_control_RW, MFC_CNTL_SUSPEND_DMA_QUEUE);
 		POLL_WHILE_FALSE((in_be64(&priv2->mfc_control_RW) &
 				  MFC_CNTL_SUSPEND_DMA_STATUS_MASK) ==
 				 MFC_CNTL_SUSPEND_COMPLETE);
-		if (csa) {
-			csa->priv2.mfc_control_RW = 0;
-		}
+		if (csa)
+			csa->priv2.mfc_control_RW =
+				in_be64(&priv2->mfc_control_RW) &
+				~MFC_CNTL_SUSPEND_DMA_QUEUE &
+				~MFC_CNTL_SUSPEND_MASK;
 		break;
 	}
 }
@@ -249,16 +257,21 @@
 	}
 }
 
-static inline void save_mfc_decr(struct spu_state *csa, struct spu *spu)
+static inline void save_mfc_stopped_status(struct spu_state *csa,
+		struct spu *spu)
 {
 	struct spu_priv2 __iomem *priv2 = spu->priv2;
+	const u64 mask = MFC_CNTL_DECREMENTER_RUNNING |
+			MFC_CNTL_DMA_QUEUES_EMPTY;
 
 	/* Save, Step 12:
 	 *     Read MFC_CNTL[Ds].  Update saved copy of
 	 *     CSA.MFC_CNTL[Ds].
+	 *
+	 * update: do the same with MFC_CNTL[Q].
 	 */
-	csa->priv2.mfc_control_RW |=
-		in_be64(&priv2->mfc_control_RW) & MFC_CNTL_DECREMENTER_RUNNING;
+	csa->priv2.mfc_control_RW &= ~mask;
+	csa->priv2.mfc_control_RW |= in_be64(&priv2->mfc_control_RW) & mask;
 }
 
 static inline void halt_mfc_decr(struct spu_state *csa, struct spu *spu)
@@ -462,7 +475,9 @@
 	 * Restore, Step 14.
 	 *     Write MFC_CNTL[Pc]=1 (purge queue).
 	 */
-	out_be64(&priv2->mfc_control_RW, MFC_CNTL_PURGE_DMA_REQUEST);
+	out_be64(&priv2->mfc_control_RW,
+			MFC_CNTL_PURGE_DMA_REQUEST |
+			MFC_CNTL_SUSPEND_MASK);
 	eieio();
 }
 
@@ -725,10 +740,14 @@
 	/* Save, Step 48:
 	 * Restore, Step 23.
 	 *     Change the software context switch pending flag
-	 *     to context switch active.
+	 *     to context switch active.  This implementation does
+	 *     not uses a switch active flag.
 	 *
-	 *     This implementation does not uses a switch active flag.
+	 * Now that we have saved the mfc in the csa, we can add in the
+	 * restart command if an exception occurred.
 	 */
+	if (test_bit(SPU_CONTEXT_FAULT_PENDING, &spu->flags))
+		csa->priv2.mfc_control_RW |= MFC_CNTL_RESTART_DMA_COMMAND;
 	clear_bit(SPU_CONTEXT_SWITCH_PENDING, &spu->flags);
 	mb();
 }
@@ -1690,6 +1709,13 @@
 	eieio();
 }
 
+static inline void set_int_route(struct spu_state *csa, struct spu *spu)
+{
+	struct spu_context *ctx = spu->ctx;
+
+	spu_cpu_affinity_set(spu, ctx->last_ran);
+}
+
 static inline void restore_other_spu_access(struct spu_state *csa,
 					    struct spu *spu)
 {
@@ -1721,15 +1747,15 @@
 	 */
 	out_be64(&priv2->mfc_control_RW, csa->priv2.mfc_control_RW);
 	eieio();
+
 	/*
-	 * FIXME: this is to restart a DMA that we were processing
-	 *        before the save. better remember the fault information
-	 *        in the csa instead.
+	 * The queue is put back into the same state that was evident prior to
+	 * the context switch. The suspend flag is added to the saved state in
+	 * the csa, if the operational state was suspending or suspended. In
+	 * this case, the code that suspended the mfc is responsible for
+	 * continuing it. Note that SPE faults do not change the operational
+	 * state of the spu.
 	 */
-	if ((csa->priv2.mfc_control_RW & MFC_CNTL_SUSPEND_DMA_QUEUE_MASK)) {
-		out_be64(&priv2->mfc_control_RW, MFC_CNTL_RESTART_DMA_COMMAND);
-		eieio();
-	}
 }
 
 static inline void enable_user_access(struct spu_state *csa, struct spu *spu)
@@ -1788,7 +1814,7 @@
 	save_spu_runcntl(prev, spu);	        /* Step 9. */
 	save_mfc_sr1(prev, spu);	        /* Step 10. */
 	save_spu_status(prev, spu);	        /* Step 11. */
-	save_mfc_decr(prev, spu);	        /* Step 12. */
+	save_mfc_stopped_status(prev, spu);     /* Step 12. */
 	halt_mfc_decr(prev, spu);	        /* Step 13. */
 	save_timebase(prev, spu);		/* Step 14. */
 	remove_other_spu_access(prev, spu);	/* Step 15. */
@@ -2000,6 +2026,7 @@
 	check_ppuint_mb_stat(next, spu);	/* Step 67. */
 	spu_invalidate_slbs(spu);		/* Modified Step 68. */
 	restore_mfc_sr1(next, spu);	        /* Step 69. */
+	set_int_route(next, spu);		/* NEW      */
 	restore_other_spu_access(next, spu);	/* Step 70. */
 	restore_spu_runcntl(next, spu);	        /* Step 71. */
 	restore_mfc_cntl(next, spu);	        /* Step 72. */
diff --git a/arch/powerpc/xmon/xmon.c b/arch/powerpc/xmon/xmon.c
index 52c7478..1702de9 100644
--- a/arch/powerpc/xmon/xmon.c
+++ b/arch/powerpc/xmon/xmon.c
@@ -2842,9 +2842,11 @@
 	DUMP_FIELD(spu, "0x%lx", ls_size);
 	DUMP_FIELD(spu, "0x%x", node);
 	DUMP_FIELD(spu, "0x%lx", flags);
-	DUMP_FIELD(spu, "0x%lx", dar);
-	DUMP_FIELD(spu, "0x%lx", dsisr);
 	DUMP_FIELD(spu, "%d", class_0_pending);
+	DUMP_FIELD(spu, "0x%lx", class_0_dar);
+	DUMP_FIELD(spu, "0x%lx", class_0_dsisr);
+	DUMP_FIELD(spu, "0x%lx", class_1_dar);
+	DUMP_FIELD(spu, "0x%lx", class_1_dsisr);
 	DUMP_FIELD(spu, "0x%lx", irqs[0]);
 	DUMP_FIELD(spu, "0x%lx", irqs[1]);
 	DUMP_FIELD(spu, "0x%lx", irqs[2]);
diff --git a/include/asm-powerpc/spu.h b/include/asm-powerpc/spu.h
index e3c845b..6abead6 100644
--- a/include/asm-powerpc/spu.h
+++ b/include/asm-powerpc/spu.h
@@ -100,6 +100,7 @@
 
 /* Flag indicating progress during context switch. */
 #define SPU_CONTEXT_SWITCH_PENDING	0UL
+#define SPU_CONTEXT_FAULT_PENDING	1UL
 
 struct spu_context;
 struct spu_runqueue;
@@ -128,9 +129,11 @@
 	unsigned int irqs[3];
 	u32 node;
 	u64 flags;
-	u64 dar;
-	u64 dsisr;
 	u64 class_0_pending;
+	u64 class_0_dar;
+	u64 class_0_dsisr;
+	u64 class_1_dar;
+	u64 class_1_dsisr;
 	size_t ls_size;
 	unsigned int slb_replace;
 	struct mm_struct *mm;
@@ -143,7 +146,7 @@
 
 	void (* wbox_callback)(struct spu *spu);
 	void (* ibox_callback)(struct spu *spu);
-	void (* stop_callback)(struct spu *spu);
+	void (* stop_callback)(struct spu *spu, int irq);
 	void (* mfc_callback)(struct spu *spu);
 
 	char irq_c0[8];
diff --git a/include/asm-powerpc/spu_csa.h b/include/asm-powerpc/spu_csa.h
index 0ab6bff..129ec14 100644
--- a/include/asm-powerpc/spu_csa.h
+++ b/include/asm-powerpc/spu_csa.h
@@ -254,7 +254,8 @@
 	u64 spu_chnldata_RW[32];
 	u32 spu_mailbox_data[4];
 	u32 pu_mailbox_data[1];
-	u64 dar, dsisr, class_0_pending;
+	u64 class_0_dar, class_0_dsisr, class_0_pending;
+	u64 class_1_dar, class_1_dsisr;
 	unsigned long suspend_time;
 	spinlock_t register_lock;
 };