| /* |
| * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later) |
| * |
| * Copyright (C) 2003 David Gibson, IBM Corporation. |
| * |
| * Based on the IA-32 version: |
| * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> |
| */ |
| |
| #include <linux/mm.h> |
| #include <linux/hugetlb.h> |
| #include <asm/pgtable.h> |
| #include <asm/pgalloc.h> |
| #include <asm/cacheflush.h> |
| #include <asm/machdep.h> |
| |
| extern long hpte_insert_repeating(unsigned long hash, unsigned long vpn, |
| unsigned long pa, unsigned long rlags, |
| unsigned long vflags, int psize, int ssize); |
| |
| int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid, |
| pte_t *ptep, unsigned long trap, int local, int ssize, |
| unsigned int shift, unsigned int mmu_psize) |
| { |
| unsigned long vpn; |
| unsigned long old_pte, new_pte; |
| unsigned long rflags, pa, sz; |
| long slot; |
| |
| BUG_ON(shift != mmu_psize_defs[mmu_psize].shift); |
| |
| /* Search the Linux page table for a match with va */ |
| vpn = hpt_vpn(ea, vsid, ssize); |
| |
| /* At this point, we have a pte (old_pte) which can be used to build |
| * or update an HPTE. There are 2 cases: |
| * |
| * 1. There is a valid (present) pte with no associated HPTE (this is |
| * the most common case) |
| * 2. There is a valid (present) pte with an associated HPTE. The |
| * current values of the pp bits in the HPTE prevent access |
| * because we are doing software DIRTY bit management and the |
| * page is currently not DIRTY. |
| */ |
| |
| |
| do { |
| old_pte = pte_val(*ptep); |
| /* If PTE busy, retry the access */ |
| if (unlikely(old_pte & _PAGE_BUSY)) |
| return 0; |
| /* If PTE permissions don't match, take page fault */ |
| if (unlikely(access & ~old_pte)) |
| return 1; |
| /* Try to lock the PTE, add ACCESSED and DIRTY if it was |
| * a write access */ |
| new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED; |
| if (access & _PAGE_RW) |
| new_pte |= _PAGE_DIRTY; |
| } while(old_pte != __cmpxchg_u64((unsigned long *)ptep, |
| old_pte, new_pte)); |
| |
| rflags = 0x2 | (!(new_pte & _PAGE_RW)); |
| /* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */ |
| rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N); |
| sz = ((1UL) << shift); |
| if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) |
| /* No CPU has hugepages but lacks no execute, so we |
| * don't need to worry about that case */ |
| rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); |
| |
| /* Check if pte already has an hpte (case 2) */ |
| if (unlikely(old_pte & _PAGE_HASHPTE)) { |
| /* There MIGHT be an HPTE for this pte */ |
| unsigned long hash, slot; |
| |
| hash = hpt_hash(vpn, shift, ssize); |
| if (old_pte & _PAGE_F_SECOND) |
| hash = ~hash; |
| slot = (hash & htab_hash_mask) * HPTES_PER_GROUP; |
| slot += (old_pte & _PAGE_F_GIX) >> 12; |
| |
| if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize, |
| ssize, local) == -1) |
| old_pte &= ~_PAGE_HPTEFLAGS; |
| } |
| |
| if (likely(!(old_pte & _PAGE_HASHPTE))) { |
| unsigned long hash = hpt_hash(vpn, shift, ssize); |
| |
| pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT; |
| |
| /* clear HPTE slot informations in new PTE */ |
| #ifdef CONFIG_PPC_64K_PAGES |
| new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0; |
| #else |
| new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE; |
| #endif |
| /* Add in WIMG bits */ |
| rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE | |
| _PAGE_COHERENT | _PAGE_GUARDED)); |
| |
| slot = hpte_insert_repeating(hash, vpn, pa, rflags, 0, |
| mmu_psize, ssize); |
| |
| /* |
| * Hypervisor failure. Restore old pte and return -1 |
| * similar to __hash_page_* |
| */ |
| if (unlikely(slot == -2)) { |
| *ptep = __pte(old_pte); |
| hash_failure_debug(ea, access, vsid, trap, ssize, |
| mmu_psize, mmu_psize, old_pte); |
| return -1; |
| } |
| |
| new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX); |
| } |
| |
| /* |
| * No need to use ldarx/stdcx here |
| */ |
| *ptep = __pte(new_pte & ~_PAGE_BUSY); |
| return 0; |
| } |