Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 1 | /* |
| 2 | * include/asm-xtensa/mmu_context.h |
| 3 | * |
| 4 | * Switch an MMU context. |
| 5 | * |
| 6 | * This file is subject to the terms and conditions of the GNU General Public |
| 7 | * License. See the file "COPYING" in the main directory of this archive |
| 8 | * for more details. |
| 9 | * |
| 10 | * Copyright (C) 2001 - 2005 Tensilica Inc. |
| 11 | */ |
| 12 | |
| 13 | #ifndef _XTENSA_MMU_CONTEXT_H |
| 14 | #define _XTENSA_MMU_CONTEXT_H |
| 15 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 16 | #include <linux/stringify.h> |
| 17 | |
| 18 | #include <asm/pgtable.h> |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 19 | #include <asm/cacheflush.h> |
| 20 | #include <asm/tlbflush.h> |
| 21 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 22 | #define XCHAL_MMU_ASID_BITS 8 |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 23 | |
| 24 | #if (XCHAL_HAVE_TLBS != 1) |
| 25 | # error "Linux must have an MMU!" |
| 26 | #endif |
| 27 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 28 | extern unsigned long asid_cache; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 29 | |
| 30 | /* |
| 31 | * NO_CONTEXT is the invalid ASID value that we don't ever assign to |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 32 | * any user or kernel context. |
| 33 | * |
| 34 | * 0 invalid |
| 35 | * 1 kernel |
| 36 | * 2 reserved |
| 37 | * 3 reserved |
| 38 | * 4...255 available |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 39 | */ |
| 40 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 41 | #define NO_CONTEXT 0 |
| 42 | #define ASID_USER_FIRST 4 |
| 43 | #define ASID_MASK ((1 << XCHAL_MMU_ASID_BITS) - 1) |
| 44 | #define ASID_INSERT(x) (0x03020001 | (((x) & ASID_MASK) << 8)) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 45 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 46 | static inline void set_rasid_register (unsigned long val) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 47 | { |
| 48 | __asm__ __volatile__ (" wsr %0, "__stringify(RASID)"\n\t" |
| 49 | " isync\n" : : "a" (val)); |
| 50 | } |
| 51 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 52 | static inline unsigned long get_rasid_register (void) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 53 | { |
| 54 | unsigned long tmp; |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 55 | __asm__ __volatile__ (" rsr %0,"__stringify(RASID)"\n\t" : "=a" (tmp)); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 56 | return tmp; |
| 57 | } |
| 58 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 59 | static inline void |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 60 | __get_new_mmu_context(struct mm_struct *mm) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 61 | { |
| 62 | extern void flush_tlb_all(void); |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 63 | if (! (++asid_cache & ASID_MASK) ) { |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 64 | flush_tlb_all(); /* start new asid cycle */ |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 65 | asid_cache += ASID_USER_FIRST; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 66 | } |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 67 | mm->context = asid_cache; |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 68 | } |
| 69 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 70 | static inline void |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 71 | __load_mmu_context(struct mm_struct *mm) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 72 | { |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 73 | set_rasid_register(ASID_INSERT(mm->context)); |
| 74 | invalidate_page_directory(); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 75 | } |
| 76 | |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 77 | /* |
| 78 | * Initialize the context related info for a new mm_struct |
| 79 | * instance. |
| 80 | */ |
| 81 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 82 | static inline int |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 83 | init_new_context(struct task_struct *tsk, struct mm_struct *mm) |
| 84 | { |
| 85 | mm->context = NO_CONTEXT; |
| 86 | return 0; |
| 87 | } |
| 88 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 89 | /* |
| 90 | * After we have set current->mm to a new value, this activates |
| 91 | * the context for the new mm so we see the new mappings. |
| 92 | */ |
| 93 | static inline void |
| 94 | activate_mm(struct mm_struct *prev, struct mm_struct *next) |
| 95 | { |
| 96 | /* Unconditionally get a new ASID. */ |
| 97 | |
| 98 | __get_new_mmu_context(next); |
| 99 | __load_mmu_context(next); |
| 100 | } |
| 101 | |
| 102 | |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 103 | static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 104 | struct task_struct *tsk) |
| 105 | { |
| 106 | unsigned long asid = asid_cache; |
| 107 | |
| 108 | /* Check if our ASID is of an older version and thus invalid */ |
| 109 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 110 | if (next->context == NO_CONTEXT || ((next->context^asid) & ~ASID_MASK)) |
| 111 | __get_new_mmu_context(next); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 112 | |
Chris Zankel | 173d668 | 2006-12-10 02:18:48 -0800 | [diff] [blame] | 113 | __load_mmu_context(next); |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 114 | } |
| 115 | |
| 116 | #define deactivate_mm(tsk, mm) do { } while(0) |
| 117 | |
| 118 | /* |
| 119 | * Destroy context related info for an mm_struct that is about |
| 120 | * to be put to rest. |
| 121 | */ |
Adrian Bunk | d99cf71 | 2005-09-03 15:57:53 -0700 | [diff] [blame] | 122 | static inline void destroy_context(struct mm_struct *mm) |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 123 | { |
Chris Zankel | 9a8fd55 | 2005-06-23 22:01:26 -0700 | [diff] [blame] | 124 | invalidate_page_directory(); |
| 125 | } |
| 126 | |
| 127 | |
| 128 | static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) |
| 129 | { |
| 130 | /* Nothing to do. */ |
| 131 | |
| 132 | } |
| 133 | |
| 134 | #endif /* _XTENSA_MMU_CONTEXT_H */ |