Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/cris/arch-v10/mm/tlb.c |
| 3 | * |
| 4 | * Low level TLB handling |
| 5 | * |
| 6 | * |
| 7 | * Copyright (C) 2000-2002 Axis Communications AB |
| 8 | * |
| 9 | * Authors: Bjorn Wesen (bjornw@axis.com) |
| 10 | * |
| 11 | */ |
| 12 | |
| 13 | #include <asm/tlb.h> |
| 14 | #include <asm/mmu_context.h> |
| 15 | #include <asm/arch/svinto.h> |
| 16 | |
| 17 | #define D(x) |
| 18 | |
| 19 | /* The TLB can host up to 64 different mm contexts at the same time. |
| 20 | * The running context is R_MMU_CONTEXT, and each TLB entry contains a |
| 21 | * page_id that has to match to give a hit. In page_id_map, we keep track |
| 22 | * of which mm's we have assigned which page_id's, so that we know when |
| 23 | * to invalidate TLB entries. |
| 24 | * |
| 25 | * The last page_id is never running - it is used as an invalid page_id |
| 26 | * so we can make TLB entries that will never match. |
| 27 | * |
| 28 | * Notice that we need to make the flushes atomic, otherwise an interrupt |
| 29 | * handler that uses vmalloced memory might cause a TLB load in the middle |
| 30 | * of a flush causing. |
| 31 | */ |
| 32 | |
| 33 | /* invalidate all TLB entries */ |
| 34 | |
| 35 | void |
| 36 | flush_tlb_all(void) |
| 37 | { |
| 38 | int i; |
| 39 | unsigned long flags; |
| 40 | |
| 41 | /* the vpn of i & 0xf is so we dont write similar TLB entries |
| 42 | * in the same 4-way entry group. details.. |
| 43 | */ |
| 44 | |
| 45 | local_save_flags(flags); |
| 46 | local_irq_disable(); |
| 47 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 48 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); |
| 49 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 50 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
| 51 | |
| 52 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 53 | IO_STATE(R_TLB_LO, valid, no ) | |
| 54 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 55 | IO_STATE(R_TLB_LO, we, no ) | |
| 56 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 57 | } |
| 58 | local_irq_restore(flags); |
| 59 | D(printk("tlb: flushed all\n")); |
| 60 | } |
| 61 | |
| 62 | /* invalidate the selected mm context only */ |
| 63 | |
| 64 | void |
| 65 | flush_tlb_mm(struct mm_struct *mm) |
| 66 | { |
| 67 | int i; |
| 68 | int page_id = mm->context.page_id; |
| 69 | unsigned long flags; |
| 70 | |
| 71 | D(printk("tlb: flush mm context %d (%p)\n", page_id, mm)); |
| 72 | |
| 73 | if(page_id == NO_CONTEXT) |
| 74 | return; |
| 75 | |
| 76 | /* mark the TLB entries that match the page_id as invalid. |
| 77 | * here we could also check the _PAGE_GLOBAL bit and NOT flush |
| 78 | * global pages. is it worth the extra I/O ? |
| 79 | */ |
| 80 | |
| 81 | local_save_flags(flags); |
| 82 | local_irq_disable(); |
| 83 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 84 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); |
| 85 | if (IO_EXTRACT(R_TLB_HI, page_id, *R_TLB_HI) == page_id) { |
| 86 | *R_TLB_HI = ( IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 87 | IO_FIELD(R_TLB_HI, vpn, i & 0xf ) ); |
| 88 | |
| 89 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 90 | IO_STATE(R_TLB_LO, valid, no ) | |
| 91 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 92 | IO_STATE(R_TLB_LO, we, no ) | |
| 93 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 94 | } |
| 95 | } |
| 96 | local_irq_restore(flags); |
| 97 | } |
| 98 | |
| 99 | /* invalidate a single page */ |
| 100 | |
| 101 | void |
| 102 | flush_tlb_page(struct vm_area_struct *vma, |
| 103 | unsigned long addr) |
| 104 | { |
| 105 | struct mm_struct *mm = vma->vm_mm; |
| 106 | int page_id = mm->context.page_id; |
| 107 | int i; |
| 108 | unsigned long flags; |
| 109 | |
| 110 | D(printk("tlb: flush page %p in context %d (%p)\n", addr, page_id, mm)); |
| 111 | |
| 112 | if(page_id == NO_CONTEXT) |
| 113 | return; |
| 114 | |
| 115 | addr &= PAGE_MASK; /* perhaps not necessary */ |
| 116 | |
| 117 | /* invalidate those TLB entries that match both the mm context |
| 118 | * and the virtual address requested |
| 119 | */ |
| 120 | |
| 121 | local_save_flags(flags); |
| 122 | local_irq_disable(); |
| 123 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 124 | unsigned long tlb_hi; |
| 125 | *R_TLB_SELECT = IO_FIELD(R_TLB_SELECT, index, i); |
| 126 | tlb_hi = *R_TLB_HI; |
| 127 | if (IO_EXTRACT(R_TLB_HI, page_id, tlb_hi) == page_id && |
| 128 | (tlb_hi & PAGE_MASK) == addr) { |
| 129 | *R_TLB_HI = IO_FIELD(R_TLB_HI, page_id, INVALID_PAGEID ) | |
| 130 | addr; /* same addr as before works. */ |
| 131 | |
| 132 | *R_TLB_LO = ( IO_STATE(R_TLB_LO, global,no ) | |
| 133 | IO_STATE(R_TLB_LO, valid, no ) | |
| 134 | IO_STATE(R_TLB_LO, kernel,no ) | |
| 135 | IO_STATE(R_TLB_LO, we, no ) | |
| 136 | IO_FIELD(R_TLB_LO, pfn, 0 ) ); |
| 137 | } |
| 138 | } |
| 139 | local_irq_restore(flags); |
| 140 | } |
| 141 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | /* dump the entire TLB for debug purposes */ |
| 143 | |
| 144 | #if 0 |
| 145 | void |
| 146 | dump_tlb_all(void) |
| 147 | { |
| 148 | int i; |
| 149 | unsigned long flags; |
| 150 | |
| 151 | printk("TLB dump. LO is: pfn | reserved | global | valid | kernel | we |\n"); |
| 152 | |
| 153 | local_save_flags(flags); |
| 154 | local_irq_disable(); |
| 155 | for(i = 0; i < NUM_TLB_ENTRIES; i++) { |
| 156 | *R_TLB_SELECT = ( IO_FIELD(R_TLB_SELECT, index, i) ); |
| 157 | printk("Entry %d: HI 0x%08lx, LO 0x%08lx\n", |
| 158 | i, *R_TLB_HI, *R_TLB_LO); |
| 159 | } |
| 160 | local_irq_restore(flags); |
| 161 | } |
| 162 | #endif |
| 163 | |
| 164 | /* |
| 165 | * Initialize the context related info for a new mm_struct |
| 166 | * instance. |
| 167 | */ |
| 168 | |
| 169 | int |
| 170 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 171 | { |
| 172 | mm->context.page_id = NO_CONTEXT; |
| 173 | return 0; |
| 174 | } |
| 175 | |
| 176 | /* called in schedule() just before actually doing the switch_to */ |
| 177 | |
| 178 | void |
| 179 | switch_mm(struct mm_struct *prev, struct mm_struct *next, |
| 180 | struct task_struct *tsk) |
| 181 | { |
| 182 | /* make sure we have a context */ |
| 183 | |
| 184 | get_mmu_context(next); |
| 185 | |
| 186 | /* remember the pgd for the fault handlers |
| 187 | * this is similar to the pgd register in some other CPU's. |
| 188 | * we need our own copy of it because current and active_mm |
| 189 | * might be invalid at points where we still need to derefer |
| 190 | * the pgd. |
| 191 | */ |
| 192 | |
Mikael Starvik | 8d20a54 | 2005-07-27 11:44:42 -0700 | [diff] [blame] | 193 | per_cpu(current_pgd, smp_processor_id()) = next->pgd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | |
| 195 | /* switch context in the MMU */ |
| 196 | |
| 197 | D(printk("switching mmu_context to %d (%p)\n", next->context, next)); |
| 198 | |
| 199 | *R_MMU_CONTEXT = IO_FIELD(R_MMU_CONTEXT, page_id, next->context.page_id); |
| 200 | } |
| 201 | |