The problem is that we check nr_ptes/nr_pmds in exit_mmap() which happens *before* pgd_free(). And if an arch does pte/pmd allocation in pgd_alloc() and frees them in pgd_free() we see offset in counters by the time of the checks. We tried to workaround this by offsetting expected counter value according to FIRST_USER_ADDRESS for both nr_pte and nr_pmd in exit_mmap(). But it doesn't work in some cases: 1. ARM with LPAE enabled also has non-zero USER_PGTABLES_CEILING, but upper addresses occupied with huge pmd entries, so the trick with offsetting expected counter value will get really ugly: we will have to apply it nr_pmds, but not nr_ptes. 2. Metag has non-zero FIRST_USER_ADDRESS, but doesn't do allocation pte/pmd page tables allocation in pgd_alloc(), just setup a pgd entry which is allocated at boot and shared accross all processes. The proposal is to move the check to check_mm() which happens *after* pgd_free() and do proper accounting during pgd_alloc() and pgd_free() which would bring counters to zero if nothing leaked. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Reported-by: Tyler Baker <tyler.baker@linaro.org> Tested-by: Tyler Baker <tyler.baker@linaro.org> Tested-by: Nishanth Menon <nm@ti.com> Cc: Russell King <linux@arm.linux.org.uk> Cc: James Hogan <james.hogan@imgtec.com> Cc: Guan Xuetao <gxt@mprc.pku.edu.cn> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			105 lines
		
	
	
	
		
			2.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			105 lines
		
	
	
	
		
			2.2 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
/*
 | 
						|
 * linux/arch/unicore32/mm/pgd.c
 | 
						|
 *
 | 
						|
 * Code specific to PKUnity SoC and UniCore ISA
 | 
						|
 *
 | 
						|
 * Copyright (C) 2001-2010 GUAN Xue-tao
 | 
						|
 *
 | 
						|
 * This program is free software; you can redistribute it and/or modify
 | 
						|
 * it under the terms of the GNU General Public License version 2 as
 | 
						|
 * published by the Free Software Foundation.
 | 
						|
 */
 | 
						|
#include <linux/mm.h>
 | 
						|
#include <linux/gfp.h>
 | 
						|
#include <linux/highmem.h>
 | 
						|
 | 
						|
#include <asm/pgalloc.h>
 | 
						|
#include <asm/page.h>
 | 
						|
#include <asm/tlbflush.h>
 | 
						|
 | 
						|
#include "mm.h"
 | 
						|
 | 
						|
#define FIRST_KERNEL_PGD_NR	(FIRST_USER_PGD_NR + USER_PTRS_PER_PGD)
 | 
						|
 | 
						|
/*
 | 
						|
 * need to get a 4k page for level 1
 | 
						|
 */
 | 
						|
pgd_t *get_pgd_slow(struct mm_struct *mm)
 | 
						|
{
 | 
						|
	pgd_t *new_pgd, *init_pgd;
 | 
						|
	pmd_t *new_pmd, *init_pmd;
 | 
						|
	pte_t *new_pte, *init_pte;
 | 
						|
 | 
						|
	new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 0);
 | 
						|
	if (!new_pgd)
 | 
						|
		goto no_pgd;
 | 
						|
 | 
						|
	memset(new_pgd, 0, FIRST_KERNEL_PGD_NR * sizeof(pgd_t));
 | 
						|
 | 
						|
	/*
 | 
						|
	 * Copy over the kernel and IO PGD entries
 | 
						|
	 */
 | 
						|
	init_pgd = pgd_offset_k(0);
 | 
						|
	memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR,
 | 
						|
		       (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t));
 | 
						|
 | 
						|
	clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t));
 | 
						|
 | 
						|
	if (!vectors_high()) {
 | 
						|
		/*
 | 
						|
		 * On UniCore, first page must always be allocated since it
 | 
						|
		 * contains the machine vectors.
 | 
						|
		 */
 | 
						|
		new_pmd = pmd_alloc(mm, (pud_t *)new_pgd, 0);
 | 
						|
		if (!new_pmd)
 | 
						|
			goto no_pmd;
 | 
						|
 | 
						|
		new_pte = pte_alloc_map(mm, NULL, new_pmd, 0);
 | 
						|
		if (!new_pte)
 | 
						|
			goto no_pte;
 | 
						|
 | 
						|
		init_pmd = pmd_offset((pud_t *)init_pgd, 0);
 | 
						|
		init_pte = pte_offset_map(init_pmd, 0);
 | 
						|
		set_pte(new_pte, *init_pte);
 | 
						|
		pte_unmap(init_pte);
 | 
						|
		pte_unmap(new_pte);
 | 
						|
	}
 | 
						|
 | 
						|
	return new_pgd;
 | 
						|
 | 
						|
no_pte:
 | 
						|
	pmd_free(mm, new_pmd);
 | 
						|
	mm_dec_nr_pmds(mm);
 | 
						|
no_pmd:
 | 
						|
	free_pages((unsigned long)new_pgd, 0);
 | 
						|
no_pgd:
 | 
						|
	return NULL;
 | 
						|
}
 | 
						|
 | 
						|
void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd)
 | 
						|
{
 | 
						|
	pmd_t *pmd;
 | 
						|
	pgtable_t pte;
 | 
						|
 | 
						|
	if (!pgd)
 | 
						|
		return;
 | 
						|
 | 
						|
	/* pgd is always present and good */
 | 
						|
	pmd = pmd_off(pgd, 0);
 | 
						|
	if (pmd_none(*pmd))
 | 
						|
		goto free;
 | 
						|
	if (pmd_bad(*pmd)) {
 | 
						|
		pmd_ERROR(*pmd);
 | 
						|
		pmd_clear(pmd);
 | 
						|
		goto free;
 | 
						|
	}
 | 
						|
 | 
						|
	pte = pmd_pgtable(*pmd);
 | 
						|
	pmd_clear(pmd);
 | 
						|
	pte_free(mm, pte);
 | 
						|
	atomic_long_dec(&mm->nr_ptes);
 | 
						|
	pmd_free(mm, pmd);
 | 
						|
	mm_dec_nr_pmds(mm);
 | 
						|
free:
 | 
						|
	free_pages((unsigned long) pgd, 0);
 | 
						|
}
 |