2.6.25-rc1 percpu changes broke CONFIG_DEBUG_PREEMPT's per_cpu checking on several architectures. On s390, sparc64 and x86 it's been weakened to not checking at all; whereas on powerpc64 it's become too strict, issuing warnings from __raw_get_cpu_var in io_schedule and init_timer for example. Fix this by weakening powerpc's __my_cpu_offset to use the non-checking local_paca instead of get_paca (which itself contains such a check); and strengthening the generic my_cpu_offset to go the old slow way via smp_processor_id when CONFIG_DEBUG_PREEMPT (debug_smp_processor_id is where all the knowledge of what's correct when lives). Signed-off-by: Hugh Dickins <hugh@veritas.com> Reviewed-by: Mike Travis <travis@sgi.com> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
		
			
				
	
	
		
			24 lines
		
	
	
	
		
			559 B
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			24 lines
		
	
	
	
		
			559 B
			
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef _ASM_POWERPC_PERCPU_H_
 | 
						|
#define _ASM_POWERPC_PERCPU_H_
 | 
						|
#ifdef __powerpc64__
 | 
						|
#include <linux/compiler.h>
 | 
						|
 | 
						|
/*
 | 
						|
 * Same as asm-generic/percpu.h, except that we store the per cpu offset
 | 
						|
 * in the paca. Based on the x86-64 implementation.
 | 
						|
 */
 | 
						|
 | 
						|
#ifdef CONFIG_SMP
 | 
						|
 | 
						|
#include <asm/paca.h>
 | 
						|
 | 
						|
#define __per_cpu_offset(cpu) (paca[cpu].data_offset)
 | 
						|
#define __my_cpu_offset local_paca->data_offset
 | 
						|
#define per_cpu_offset(x) (__per_cpu_offset(x))
 | 
						|
 | 
						|
#endif /* CONFIG_SMP */
 | 
						|
#endif /* __powerpc64__ */
 | 
						|
 | 
						|
#include <asm-generic/percpu.h>
 | 
						|
 | 
						|
#endif /* _ASM_POWERPC_PERCPU_H_ */
 |