The core mm code will provide a default gate area based on FIXADDR_USER_START and FIXADDR_USER_END if !defined(__HAVE_ARCH_GATE_AREA) && defined(AT_SYSINFO_EHDR). This default is only useful for ia64. arm64, ppc, s390, sh, tile, 64-bit UML, and x86_32 have their own code just to disable it. arm, 32-bit UML, and x86_64 have gate areas, but they have their own implementations. This gets rid of the default and moves the code into ia64. This should save some code on architectures without a gate area: it's now possible to inline the gate_area functions in the default case. Signed-off-by: Andy Lutomirski <luto@amacapital.net> Acked-by: Nathan Lynch <nathan_lynch@mentor.com> Acked-by: H. Peter Anvin <hpa@linux.intel.com> Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> [in principle] Acked-by: Richard Weinberger <richard@nod.at> [for um] Acked-by: Will Deacon <will.deacon@arm.com> [for arm64] Cc: Catalin Marinas <catalin.marinas@arm.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Tony Luck <tony.luck@intel.com> Cc: Fenghua Yu <fenghua.yu@intel.com> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: Chris Metcalf <cmetcalf@tilera.com> Cc: Jeff Dike <jdike@addtoit.com> Cc: Richard Weinberger <richard@nod.at> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Ingo Molnar <mingo@redhat.com> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Nathan Lynch <Nathan_Lynch@mentor.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			76 lines
		
	
	
	
		
			2.1 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			76 lines
		
	
	
	
		
			2.1 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef _ASM_X86_PAGE_H
 | 
						|
#define _ASM_X86_PAGE_H
 | 
						|
 | 
						|
#include <linux/types.h>
 | 
						|
 | 
						|
#ifdef __KERNEL__
 | 
						|
 | 
						|
#include <asm/page_types.h>
 | 
						|
 | 
						|
#ifdef CONFIG_X86_64
 | 
						|
#include <asm/page_64.h>
 | 
						|
#else
 | 
						|
#include <asm/page_32.h>
 | 
						|
#endif	/* CONFIG_X86_64 */
 | 
						|
 | 
						|
#ifndef __ASSEMBLY__
 | 
						|
 | 
						|
struct page;
 | 
						|
 | 
						|
#include <linux/range.h>
 | 
						|
extern struct range pfn_mapped[];
 | 
						|
extern int nr_pfn_mapped;
 | 
						|
 | 
						|
static inline void clear_user_page(void *page, unsigned long vaddr,
 | 
						|
				   struct page *pg)
 | 
						|
{
 | 
						|
	clear_page(page);
 | 
						|
}
 | 
						|
 | 
						|
static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
 | 
						|
				  struct page *topage)
 | 
						|
{
 | 
						|
	copy_page(to, from);
 | 
						|
}
 | 
						|
 | 
						|
#define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \
 | 
						|
	alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr)
 | 
						|
#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE
 | 
						|
 | 
						|
#define __pa(x)		__phys_addr((unsigned long)(x))
 | 
						|
#define __pa_nodebug(x)	__phys_addr_nodebug((unsigned long)(x))
 | 
						|
/* __pa_symbol should be used for C visible symbols.
 | 
						|
   This seems to be the official gcc blessed way to do such arithmetic. */
 | 
						|
/*
 | 
						|
 * We need __phys_reloc_hide() here because gcc may assume that there is no
 | 
						|
 * overflow during __pa() calculation and can optimize it unexpectedly.
 | 
						|
 * Newer versions of gcc provide -fno-strict-overflow switch to handle this
 | 
						|
 * case properly. Once all supported versions of gcc understand it, we can
 | 
						|
 * remove this Voodoo magic stuff. (i.e. once gcc3.x is deprecated)
 | 
						|
 */
 | 
						|
#define __pa_symbol(x) \
 | 
						|
	__phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
 | 
						|
 | 
						|
#define __va(x)			((void *)((unsigned long)(x)+PAGE_OFFSET))
 | 
						|
 | 
						|
#define __boot_va(x)		__va(x)
 | 
						|
#define __boot_pa(x)		__pa(x)
 | 
						|
 | 
						|
/*
 | 
						|
 * virt_to_page(kaddr) returns a valid pointer if and only if
 | 
						|
 * virt_addr_valid(kaddr) returns true.
 | 
						|
 */
 | 
						|
#define virt_to_page(kaddr)	pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
 | 
						|
#define pfn_to_kaddr(pfn)      __va((pfn) << PAGE_SHIFT)
 | 
						|
extern bool __virt_addr_valid(unsigned long kaddr);
 | 
						|
#define virt_addr_valid(kaddr)	__virt_addr_valid((unsigned long) (kaddr))
 | 
						|
 | 
						|
#endif	/* __ASSEMBLY__ */
 | 
						|
 | 
						|
#include <asm-generic/memory_model.h>
 | 
						|
#include <asm-generic/getorder.h>
 | 
						|
 | 
						|
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
 | 
						|
 | 
						|
#endif	/* __KERNEL__ */
 | 
						|
#endif /* _ASM_X86_PAGE_H */
 |