[SPARC64]: Add infrastructure for dynamic TSB sizing.

This also cleans up tsb_context_switch().  The assembler
routine is now __tsb_context_switch() and the former is
an inline function that picks out the bits from the mm_struct
and passes it into the assembler code as arguments.

setup_tsb_parms() computes the locked TLB entry to map the
TSB.  Later when we support using the physical address quad
load instructions of Cheetah+ and later, we'll simply use
the physical address for the TSB register value and set
the map virtual and PTE both to zero.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/binfmt_aout32.c b/arch/sparc64/kernel/binfmt_aout32.c
index a57d7f2..181c8cd 100644
--- a/arch/sparc64/kernel/binfmt_aout32.c
+++ b/arch/sparc64/kernel/binfmt_aout32.c
@@ -330,8 +330,7 @@
 
 	current->mm->start_stack =
 		(unsigned long) create_aout32_tables((char __user *)bprm->p, bprm);
-	tsb_context_switch(__pa(current->mm->pgd),
-	                   current->mm->context.sparc64_tsb);
+	tsb_context_switch(mm);
 
 	start_thread32(regs, ex.a_entry, current->mm->start_stack);
 	if (current->ptrace & PT_PTRACED)
diff --git a/arch/sparc64/kernel/process.c b/arch/sparc64/kernel/process.c
index 2784aab..26548fc 100644
--- a/arch/sparc64/kernel/process.c
+++ b/arch/sparc64/kernel/process.c
@@ -441,8 +441,7 @@
 
 	mm = t->task->mm;
 	if (mm)
-		tsb_context_switch(__pa(mm->pgd),
-				   mm->context.sparc64_tsb);
+		tsb_context_switch(mm);
 
 	set_thread_wsaved(0);
 
diff --git a/arch/sparc64/kernel/tsb.S b/arch/sparc64/kernel/tsb.S
index 76f2c0b..fe266ba 100644
--- a/arch/sparc64/kernel/tsb.S
+++ b/arch/sparc64/kernel/tsb.S
@@ -130,48 +130,36 @@
 	 * schedule() time.
 	 *
 	 * %o0: page table physical address
-	 * %o1:	TSB address
+	 * %o1:	TSB register value
+	 * %o2:	TSB virtual address
+	 * %o3:	TSB mapping locked PTE
+	 *
+	 * We have to run this whole thing with interrupts
+	 * disabled so that the current cpu doesn't change
+	 * due to preemption.
 	 */
 	.align	32
-	.globl	tsb_context_switch
-tsb_context_switch:
+	.globl	__tsb_context_switch
+__tsb_context_switch:
 	rdpr	%pstate, %o5
 	wrpr	%o5, PSTATE_IE, %pstate
 
-	ldub	[%g6 + TI_CPU], %o3
-	sethi	%hi(trap_block), %o4
-	sllx	%o3, TRAP_BLOCK_SZ_SHIFT, %o3
-	or	%o4, %lo(trap_block), %o4
-	add	%o4, %o3, %o4
-	stx	%o0, [%o4 + TRAP_PER_CPU_PGD_PADDR]
+	ldub	[%g6 + TI_CPU], %g1
+	sethi	%hi(trap_block), %g2
+	sllx	%g1, TRAP_BLOCK_SZ_SHIFT, %g1
+	or	%g2, %lo(trap_block), %g2
+	add	%g2, %g1, %g2
+	stx	%o0, [%g2 + TRAP_PER_CPU_PGD_PADDR]
 
-	brgez	%o1, 9f
-	 nop
-
-	/* Lock TSB into D-TLB.  */
-	sethi		%hi(PAGE_SIZE), %o3
-	and		%o3, %o1, %o3
-	sethi		%hi(TSBMAP_BASE), %o2
-	add		%o2, %o3, %o2
-
-	/* XXX handle PAGE_SIZE != 8K correctly...  */
 	mov	TSB_REG, %g1
-	stxa	%o2, [%g1] ASI_DMMU
+	stxa	%o1, [%g1] ASI_DMMU
 	membar	#Sync
 
-	stxa	%o2, [%g1] ASI_IMMU
+	stxa	%o1, [%g1] ASI_IMMU
 	membar	#Sync
 
-#define KERN_HIGHBITS	((_PAGE_VALID|_PAGE_SZBITS)^0xfffff80000000000)
-#define KERN_LOWBITS	(_PAGE_CP | _PAGE_CV | _PAGE_P | _PAGE_W | _PAGE_L)
-	sethi		%uhi(KERN_HIGHBITS), %g2
-	or		%g2, %ulo(KERN_HIGHBITS), %g2
-	sllx		%g2, 32, %g2
-	or		%g2, KERN_LOWBITS, %g2
-#undef KERN_HIGHBITS
-#undef KERN_LOWBITS
-
-	xor		%o1, %g2, %o1	
+	brz	%o2, 9f
+	 nop
 
 	/* We use entry 61 for this locked entry.  This is the spitfire
 	 * TLB entry number, and luckily cheetah masks the value with
@@ -184,11 +172,10 @@
 	stxa		%o2, [%g1] ASI_DMMU
 	membar		#Sync
 	mov		(61 << 3), %g1
-	stxa		%o1, [%g1] ASI_DTLB_DATA_ACCESS
+	stxa		%o3, [%g1] ASI_DTLB_DATA_ACCESS
 	membar		#Sync
-
 9:
 	wrpr	%o5, %pstate
 
 	retl
-	 mov	%o2, %o0
+	 nop