 88ced03149
			
		
	
	
	88ced03149
	
	
	
		
			
			include/asm-ppc/ had #ifdef __KERNEL__ in all header files that are not meant for use by user space, include/asm-powerpc does not have this yet. This patch gets us a lot closer there. There are a few cases where I was not sure, so I left them out. I have verified that no CONFIG_* symbols are used outside of __KERNEL__ any more and that there are no obvious compile errors when including any of the headers in user space libraries. Signed-off-by: Arnd Bergmann <arnd@arndb.de> Signed-off-by: Paul Mackerras <paulus@samba.org>
		
			
				
	
	
		
			91 lines
		
	
	
	
		
			2.4 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			91 lines
		
	
	
	
		
			2.4 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef __ASM_POWERPC_MMU_CONTEXT_H
 | |
| #define __ASM_POWERPC_MMU_CONTEXT_H
 | |
| #ifdef __KERNEL__
 | |
| 
 | |
| #ifndef CONFIG_PPC64
 | |
| #include <asm-ppc/mmu_context.h>
 | |
| #else
 | |
| 
 | |
| #include <linux/kernel.h>	
 | |
| #include <linux/mm.h>	
 | |
| #include <asm/mmu.h>	
 | |
| #include <asm/cputable.h>
 | |
| 
 | |
| /*
 | |
|  * Copyright (C) 2001 PPC 64 Team, IBM Corp
 | |
|  *
 | |
|  * This program is free software; you can redistribute it and/or
 | |
|  * modify it under the terms of the GNU General Public License
 | |
|  * as published by the Free Software Foundation; either version
 | |
|  * 2 of the License, or (at your option) any later version.
 | |
|  */
 | |
| 
 | |
| /*
 | |
|  * Getting into a kernel thread, there is no valid user segment, mark
 | |
|  * paca->pgdir NULL so that SLB miss on user addresses will fault
 | |
|  */
 | |
| static inline void enter_lazy_tlb(struct mm_struct *mm,
 | |
| 				  struct task_struct *tsk)
 | |
| {
 | |
| #ifdef CONFIG_PPC_64K_PAGES
 | |
| 	get_paca()->pgdir = NULL;
 | |
| #endif /* CONFIG_PPC_64K_PAGES */
 | |
| }
 | |
| 
 | |
| #define NO_CONTEXT	0
 | |
| #define MAX_CONTEXT	(0x100000-1)
 | |
| 
 | |
| extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 | |
| extern void destroy_context(struct mm_struct *mm);
 | |
| 
 | |
| extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
 | |
| extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
 | |
| 
 | |
| /*
 | |
|  * switch_mm is the entry point called from the architecture independent
 | |
|  * code in kernel/sched.c
 | |
|  */
 | |
| static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 | |
| 			     struct task_struct *tsk)
 | |
| {
 | |
| 	if (!cpu_isset(smp_processor_id(), next->cpu_vm_mask))
 | |
| 		cpu_set(smp_processor_id(), next->cpu_vm_mask);
 | |
| 
 | |
| 	/* No need to flush userspace segments if the mm doesnt change */
 | |
| #ifdef CONFIG_PPC_64K_PAGES
 | |
| 	if (prev == next && get_paca()->pgdir == next->pgd)
 | |
| 		return;
 | |
| #else
 | |
| 	if (prev == next)
 | |
| 		return;
 | |
| #endif /* CONFIG_PPC_64K_PAGES */
 | |
| 
 | |
| #ifdef CONFIG_ALTIVEC
 | |
| 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 | |
| 		asm volatile ("dssall");
 | |
| #endif /* CONFIG_ALTIVEC */
 | |
| 
 | |
| 	if (cpu_has_feature(CPU_FTR_SLB))
 | |
| 		switch_slb(tsk, next);
 | |
| 	else
 | |
| 		switch_stab(tsk, next);
 | |
| }
 | |
| 
 | |
| #define deactivate_mm(tsk,mm)	do { } while (0)
 | |
| 
 | |
| /*
 | |
|  * After we have set current->mm to a new value, this activates
 | |
|  * the context for the new mm so we see the new mappings.
 | |
|  */
 | |
| static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	local_irq_save(flags);
 | |
| 	switch_mm(prev, next, current);
 | |
| 	local_irq_restore(flags);
 | |
| }
 | |
| 
 | |
| #endif /* CONFIG_PPC64 */
 | |
| #endif /* __KERNEL__ */
 | |
| #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
 |