[SPARC64]: Add infrastructure for dynamic TSB sizing.

This also cleans up tsb_context_switch().  The assembler
routine is now __tsb_context_switch() and the former is
an inline function that picks out the bits from the mm_struct
and passes it into the assembler code as arguments.

setup_tsb_parms() computes the locked TLB entry to map the
TSB.  Later when we support using the physical address quad
load instructions of Cheetah+ and later, we'll simply use
the physical address for the TSB register value and set
the map virtual and PTE both to zero.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/include/asm-sparc64/mmu_context.h b/include/asm-sparc64/mmu_context.h
index 0dffb4c..0a950f1 100644
--- a/include/asm-sparc64/mmu_context.h
+++ b/include/asm-sparc64/mmu_context.h
@@ -22,7 +22,15 @@
 extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 extern void destroy_context(struct mm_struct *mm);
 
-extern unsigned long tsb_context_switch(unsigned long pgd_pa, unsigned long *tsb);
+extern void __tsb_context_switch(unsigned long pgd_pa, unsigned long tsb_reg,
+				 unsigned long tsb_vaddr, unsigned long tsb_pte);
+
+static inline void tsb_context_switch(struct mm_struct *mm)
+{
+	__tsb_context_switch(__pa(mm->pgd), mm->context.tsb_reg_val,
+			     mm->context.tsb_map_vaddr,
+			     mm->context.tsb_map_pte);
+}
 
 /* Set MMU context in the actual hardware. */
 #define load_secondary_context(__mm) \
@@ -52,8 +60,7 @@
 
 	if (!ctx_valid || (old_mm != mm)) {
 		load_secondary_context(mm);
-		tsb_context_switch(__pa(mm->pgd),
-				   mm->context.sparc64_tsb);
+		tsb_context_switch(mm);
 	}
 
 	/* Even if (mm == old_mm) we _must_ check
@@ -91,7 +98,7 @@
 
 	load_secondary_context(mm);
 	__flush_tlb_mm(CTX_HWBITS(mm->context), SECONDARY_CONTEXT);
-	tsb_context_switch(__pa(mm->pgd), mm->context.sparc64_tsb);
+	tsb_context_switch(mm);
 }
 
 #endif /* !(__ASSEMBLY__) */