This patch convert different functions to take virtual page number instead of virtual address. Virtual page number is virtual address shifted right by VPN_SHIFT (12) bits. This enable us to have an address range of upto 76 bits. Reviewed-by: Paul Mackerras <paulus@samba.org> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
		
			
				
	
	
		
			144 lines
		
	
	
	
		
			4.1 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			144 lines
		
	
	
	
		
			4.1 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * PPC64 Huge TLB Page Support for hash based MMUs (POWER4 and later)
 | 
						|
 *
 | 
						|
 * Copyright (C) 2003 David Gibson, IBM Corporation.
 | 
						|
 *
 | 
						|
 * Based on the IA-32 version:
 | 
						|
 * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com>
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/hugetlb.h>
 | 
						|
#include <asm/pgtable.h>
 | 
						|
#include <asm/pgalloc.h>
 | 
						|
#include <asm/cacheflush.h>
 | 
						|
#include <asm/machdep.h>
 | 
						|
 | 
						|
int __hash_page_huge(unsigned long ea, unsigned long access, unsigned long vsid,
 | 
						|
		     pte_t *ptep, unsigned long trap, int local, int ssize,
 | 
						|
		     unsigned int shift, unsigned int mmu_psize)
 | 
						|
{
 | 
						|
	unsigned long vpn;
 | 
						|
	unsigned long old_pte, new_pte;
 | 
						|
	unsigned long rflags, pa, sz;
 | 
						|
	long slot;
 | 
						|
 | 
						|
	BUG_ON(shift != mmu_psize_defs[mmu_psize].shift);
 | 
						|
 | 
						|
	/* Search the Linux page table for a match with va */
 | 
						|
	vpn = hpt_vpn(ea, vsid, ssize);
 | 
						|
 | 
						|
	/* At this point, we have a pte (old_pte) which can be used to build
 | 
						|
	 * or update an HPTE. There are 2 cases:
 | 
						|
	 *
 | 
						|
	 * 1. There is a valid (present) pte with no associated HPTE (this is
 | 
						|
	 *	the most common case)
 | 
						|
	 * 2. There is a valid (present) pte with an associated HPTE. The
 | 
						|
	 *	current values of the pp bits in the HPTE prevent access
 | 
						|
	 *	because we are doing software DIRTY bit management and the
 | 
						|
	 *	page is currently not DIRTY.
 | 
						|
	 */
 | 
						|
 | 
						|
 | 
						|
	do {
 | 
						|
		old_pte = pte_val(*ptep);
 | 
						|
		/* If PTE busy, retry the access */
 | 
						|
		if (unlikely(old_pte & _PAGE_BUSY))
 | 
						|
			return 0;
 | 
						|
		/* If PTE permissions don't match, take page fault */
 | 
						|
		if (unlikely(access & ~old_pte))
 | 
						|
			return 1;
 | 
						|
		/* Try to lock the PTE, add ACCESSED and DIRTY if it was
 | 
						|
		 * a write access */
 | 
						|
		new_pte = old_pte | _PAGE_BUSY | _PAGE_ACCESSED;
 | 
						|
		if (access & _PAGE_RW)
 | 
						|
			new_pte |= _PAGE_DIRTY;
 | 
						|
	} while(old_pte != __cmpxchg_u64((unsigned long *)ptep,
 | 
						|
					 old_pte, new_pte));
 | 
						|
 | 
						|
	rflags = 0x2 | (!(new_pte & _PAGE_RW));
 | 
						|
	/* _PAGE_EXEC -> HW_NO_EXEC since it's inverted */
 | 
						|
	rflags |= ((new_pte & _PAGE_EXEC) ? 0 : HPTE_R_N);
 | 
						|
	sz = ((1UL) << shift);
 | 
						|
	if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE))
 | 
						|
		/* No CPU has hugepages but lacks no execute, so we
 | 
						|
		 * don't need to worry about that case */
 | 
						|
		rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap);
 | 
						|
 | 
						|
	/* Check if pte already has an hpte (case 2) */
 | 
						|
	if (unlikely(old_pte & _PAGE_HASHPTE)) {
 | 
						|
		/* There MIGHT be an HPTE for this pte */
 | 
						|
		unsigned long hash, slot;
 | 
						|
 | 
						|
		hash = hpt_hash(vpn, shift, ssize);
 | 
						|
		if (old_pte & _PAGE_F_SECOND)
 | 
						|
			hash = ~hash;
 | 
						|
		slot = (hash & htab_hash_mask) * HPTES_PER_GROUP;
 | 
						|
		slot += (old_pte & _PAGE_F_GIX) >> 12;
 | 
						|
 | 
						|
		if (ppc_md.hpte_updatepp(slot, rflags, vpn, mmu_psize,
 | 
						|
					 ssize, local) == -1)
 | 
						|
			old_pte &= ~_PAGE_HPTEFLAGS;
 | 
						|
	}
 | 
						|
 | 
						|
	if (likely(!(old_pte & _PAGE_HASHPTE))) {
 | 
						|
		unsigned long hash = hpt_hash(vpn, shift, ssize);
 | 
						|
		unsigned long hpte_group;
 | 
						|
 | 
						|
		pa = pte_pfn(__pte(old_pte)) << PAGE_SHIFT;
 | 
						|
 | 
						|
repeat:
 | 
						|
		hpte_group = ((hash & htab_hash_mask) *
 | 
						|
			      HPTES_PER_GROUP) & ~0x7UL;
 | 
						|
 | 
						|
		/* clear HPTE slot informations in new PTE */
 | 
						|
#ifdef CONFIG_PPC_64K_PAGES
 | 
						|
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HPTE_SUB0;
 | 
						|
#else
 | 
						|
		new_pte = (new_pte & ~_PAGE_HPTEFLAGS) | _PAGE_HASHPTE;
 | 
						|
#endif
 | 
						|
		/* Add in WIMG bits */
 | 
						|
		rflags |= (new_pte & (_PAGE_WRITETHRU | _PAGE_NO_CACHE |
 | 
						|
				      _PAGE_COHERENT | _PAGE_GUARDED));
 | 
						|
 | 
						|
		/* Insert into the hash table, primary slot */
 | 
						|
		slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags, 0,
 | 
						|
					  mmu_psize, ssize);
 | 
						|
 | 
						|
		/* Primary is full, try the secondary */
 | 
						|
		if (unlikely(slot == -1)) {
 | 
						|
			hpte_group = ((~hash & htab_hash_mask) *
 | 
						|
				      HPTES_PER_GROUP) & ~0x7UL;
 | 
						|
			slot = ppc_md.hpte_insert(hpte_group, vpn, pa, rflags,
 | 
						|
						  HPTE_V_SECONDARY,
 | 
						|
						  mmu_psize, ssize);
 | 
						|
			if (slot == -1) {
 | 
						|
				if (mftb() & 0x1)
 | 
						|
					hpte_group = ((hash & htab_hash_mask) *
 | 
						|
						      HPTES_PER_GROUP)&~0x7UL;
 | 
						|
 | 
						|
				ppc_md.hpte_remove(hpte_group);
 | 
						|
				goto repeat;
 | 
						|
                        }
 | 
						|
		}
 | 
						|
 | 
						|
		/*
 | 
						|
		 * Hypervisor failure. Restore old pte and return -1
 | 
						|
		 * similar to __hash_page_*
 | 
						|
		 */
 | 
						|
		if (unlikely(slot == -2)) {
 | 
						|
			*ptep = __pte(old_pte);
 | 
						|
			hash_failure_debug(ea, access, vsid, trap, ssize,
 | 
						|
					   mmu_psize, old_pte);
 | 
						|
			return -1;
 | 
						|
		}
 | 
						|
 | 
						|
		new_pte |= (slot << 12) & (_PAGE_F_SECOND | _PAGE_F_GIX);
 | 
						|
	}
 | 
						|
 | 
						|
	/*
 | 
						|
	 * No need to use ldarx/stdcx here
 | 
						|
	 */
 | 
						|
	*ptep = __pte(new_pte & ~_PAGE_BUSY);
 | 
						|
	return 0;
 | 
						|
}
 |