[SPARC64]: Create a seperate kernel TSB for 4MB/256MB mappings.

It can map all of the linear kernel mappings with zero TSB hash
conflicts for systems with 16GB or less ram.  In such cases, on
SUN4V, once we load up this TSB the first time with all the
mappings, we never take a linear kernel mapping TLB miss ever
again, the hypervisor handles them all.

Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/arch/sparc64/kernel/ktlb.S b/arch/sparc64/kernel/ktlb.S
index ae1dac1..efcf38b 100644
--- a/arch/sparc64/kernel/ktlb.S
+++ b/arch/sparc64/kernel/ktlb.S
@@ -121,6 +121,12 @@
 	 nop
 
 	.align		32
+kvmap_dtlb_tsb4m_load:
+	KTSB_LOCK_TAG(%g1, %g2, %g7)
+	KTSB_WRITE(%g1, %g5, %g6)
+	ba,pt		%xcc, kvmap_dtlb_load
+	 nop
+
 kvmap_dtlb:
 	/* %g6: TAG TARGET */
 	mov		TLB_TAG_ACCESS, %g4
@@ -133,6 +139,13 @@
 	brgez,pn	%g4, kvmap_dtlb_nonlinear
 	 nop
 
+	/* Correct TAG_TARGET is already in %g6, check 4mb TSB.  */
+	KERN_TSB4M_LOOKUP_TL1(%g6, %g5, %g1, %g2, %g3, kvmap_dtlb_load)
+
+	/* TSB entry address left in %g1, lookup linear PTE.
+	 * Must preserve %g1 and %g6 (TAG).
+	 */
+kvmap_dtlb_tsb4m_miss:
 	sethi		%hi(kpte_linear_bitmap), %g2
 	or		%g2, %lo(kpte_linear_bitmap), %g2
 
@@ -163,7 +176,7 @@
 
 	.globl		kvmap_linear_patch
 kvmap_linear_patch:
-	ba,pt		%xcc, kvmap_dtlb_load
+	ba,pt		%xcc, kvmap_dtlb_tsb4m_load
 	 xor		%g2, %g4, %g5
 
 kvmap_dtlb_vmalloc_addr:
diff --git a/arch/sparc64/mm/init.c b/arch/sparc64/mm/init.c
index b5869f0..2a12313 100644
--- a/arch/sparc64/mm/init.c
+++ b/arch/sparc64/mm/init.c
@@ -58,6 +58,9 @@
  */
 unsigned long kpte_linear_bitmap[KPTE_BITMAP_BYTES / sizeof(unsigned long)];
 
+/* A special kernel TSB for 4MB and 256MB linear mappings.  */
+struct tsb swapper_4m_tsb[KERNEL_TSB4M_NENTRIES];
+
 #define MAX_BANKS	32
 
 static struct linux_prom64_registers pavail[MAX_BANKS] __initdata;
@@ -1086,6 +1089,7 @@
 {
 	unsigned long ktsb_pa;
 
+	/* First KTSB for PAGE_SIZE mappings.  */
 	ktsb_pa = kern_base + ((unsigned long)&swapper_tsb[0] - KERNBASE);
 
 	switch (PAGE_SIZE) {
@@ -1117,9 +1121,18 @@
 	ktsb_descr[0].tsb_base = ktsb_pa;
 	ktsb_descr[0].resv = 0;
 
-	/* XXX When we have a kernel large page size TSB, describe
-	 * XXX it in ktsb_descr[1] here.
-	 */
+	/* Second KTSB for 4MB/256MB mappings.  */
+	ktsb_pa = (kern_base +
+		   ((unsigned long)&swapper_4m_tsb[0] - KERNBASE));
+
+	ktsb_descr[1].pgsz_idx = HV_PGSZ_IDX_4MB;
+	ktsb_descr[1].pgsz_mask = (HV_PGSZ_MASK_4MB |
+				   HV_PGSZ_MASK_256MB);
+	ktsb_descr[1].assoc = 1;
+	ktsb_descr[1].num_ttes = KERNEL_TSB4M_NENTRIES;
+	ktsb_descr[1].ctx_idx = 0;
+	ktsb_descr[1].tsb_base = ktsb_pa;
+	ktsb_descr[1].resv = 0;
 }
 
 void __cpuinit sun4v_ktsb_register(void)
@@ -1132,8 +1145,7 @@
 	pa = kern_base + ((unsigned long)&ktsb_descr[0] - KERNBASE);
 
 	func = HV_FAST_MMU_TSB_CTX0;
-	/* XXX set arg0 to 2 when we use ktsb_descr[1], see above XXX */
-	arg0 = 1;
+	arg0 = 2;
 	arg1 = pa;
 	__asm__ __volatile__("ta	%6"
 			     : "=&r" (func), "=&r" (arg0), "=&r" (arg1)
@@ -1160,7 +1172,9 @@
 	kern_base = (prom_boot_mapping_phys_low >> 22UL) << 22UL;
 	kern_size = (unsigned long)&_end - (unsigned long)KERNBASE;
 
+	/* Invalidate both kernel TSBs.  */
 	memset(swapper_tsb, 0x40, sizeof(swapper_tsb));
+	memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
 
 	if (tlb_type == hypervisor)
 		sun4v_pgprot_init();
diff --git a/include/asm-sparc64/tsb.h b/include/asm-sparc64/tsb.h
index 6e67680..e82612c 100644
--- a/include/asm-sparc64/tsb.h
+++ b/include/asm-sparc64/tsb.h
@@ -243,6 +243,7 @@
 #define KERNEL_TSB_SIZE_BYTES	(32 * 1024)
 #define KERNEL_TSB_NENTRIES	\
 	(KERNEL_TSB_SIZE_BYTES / 16)
+#define KERNEL_TSB4M_NENTRIES	4096
 
 	/* Do a kernel TSB lookup at tl>0 on VADDR+TAG, branch to OK_LABEL
 	 * on TSB hit.  REG1, REG2, REG3, and REG4 are used as temporaries
@@ -263,4 +264,18 @@
 	be,a,pt		%xcc, OK_LABEL; \
 	 mov		REG4, REG1;
 
+	/* This version uses a trick, the TAG is already (VADDR >> 22) so
+	 * we can make use of that for the index computation.
+	 */
+#define KERN_TSB4M_LOOKUP_TL1(TAG, REG1, REG2, REG3, REG4, OK_LABEL) \
+	sethi		%hi(swapper_4m_tsb), REG1; \
+	or		REG1, %lo(swapper_4m_tsb), REG1; \
+	and		TAG, (KERNEL_TSB_NENTRIES - 1), REG2; \
+	sllx		REG2, 4, REG2; \
+	add		REG1, REG2, REG2; \
+	KTSB_LOAD_QUAD(REG2, REG3); \
+	cmp		REG3, TAG; \
+	be,a,pt		%xcc, OK_LABEL; \
+	 mov		REG4, REG1;
+
 #endif /* !(_SPARC64_TSB_H) */