pte alloc routines must wait for split_huge_page if the pmd is not present and not null (i.e. pmd_trans_splitting). The additional branches are optimized away at compile time by pmd_trans_splitting if the config option is off. However we must pass the vma down in order to know the anon_vma lock to wait for. [akpm@linux-foundation.org: coding-style fixes] Signed-off-by: Andrea Arcangeli <aarcange@redhat.com> Acked-by: Rik van Riel <riel@redhat.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			90 lines
		
	
	
	
		
			1.5 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			90 lines
		
	
	
	
		
			1.5 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * arch/sh/mm/hugetlbpage.c
 | 
						|
 *
 | 
						|
 * SuperH HugeTLB page support.
 | 
						|
 *
 | 
						|
 * Cloned from sparc64 by Paul Mundt.
 | 
						|
 *
 | 
						|
 * Copyright (C) 2002, 2003 David S. Miller (davem@redhat.com)
 | 
						|
 */
 | 
						|
 | 
						|
#include <linux/init.h>
 | 
						|
#include <linux/fs.h>
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/hugetlb.h>
 | 
						|
#include <linux/pagemap.h>
 | 
						|
#include <linux/sysctl.h>
 | 
						|
 | 
						|
#include <asm/mman.h>
 | 
						|
#include <asm/pgalloc.h>
 | 
						|
#include <asm/tlb.h>
 | 
						|
#include <asm/tlbflush.h>
 | 
						|
#include <asm/cacheflush.h>
 | 
						|
 | 
						|
pte_t *huge_pte_alloc(struct mm_struct *mm,
 | 
						|
			unsigned long addr, unsigned long sz)
 | 
						|
{
 | 
						|
	pgd_t *pgd;
 | 
						|
	pud_t *pud;
 | 
						|
	pmd_t *pmd;
 | 
						|
	pte_t *pte = NULL;
 | 
						|
 | 
						|
	pgd = pgd_offset(mm, addr);
 | 
						|
	if (pgd) {
 | 
						|
		pud = pud_alloc(mm, pgd, addr);
 | 
						|
		if (pud) {
 | 
						|
			pmd = pmd_alloc(mm, pud, addr);
 | 
						|
			if (pmd)
 | 
						|
				pte = pte_alloc_map(mm, NULL, pmd, addr);
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return pte;
 | 
						|
}
 | 
						|
 | 
						|
pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
 | 
						|
{
 | 
						|
	pgd_t *pgd;
 | 
						|
	pud_t *pud;
 | 
						|
	pmd_t *pmd;
 | 
						|
	pte_t *pte = NULL;
 | 
						|
 | 
						|
	pgd = pgd_offset(mm, addr);
 | 
						|
	if (pgd) {
 | 
						|
		pud = pud_offset(pgd, addr);
 | 
						|
		if (pud) {
 | 
						|
			pmd = pmd_offset(pud, addr);
 | 
						|
			if (pmd)
 | 
						|
				pte = pte_offset_map(pmd, addr);
 | 
						|
		}
 | 
						|
	}
 | 
						|
 | 
						|
	return pte;
 | 
						|
}
 | 
						|
 | 
						|
int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
 | 
						|
{
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
struct page *follow_huge_addr(struct mm_struct *mm,
 | 
						|
			      unsigned long address, int write)
 | 
						|
{
 | 
						|
	return ERR_PTR(-EINVAL);
 | 
						|
}
 | 
						|
 | 
						|
int pmd_huge(pmd_t pmd)
 | 
						|
{
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
int pud_huge(pud_t pud)
 | 
						|
{
 | 
						|
	return 0;
 | 
						|
}
 | 
						|
 | 
						|
struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
 | 
						|
			     pmd_t *pmd, int write)
 | 
						|
{
 | 
						|
	return NULL;
 | 
						|
}
 |