 b52e0a7c4e
			
		
	
	
	b52e0a7c4e
	
	
	
		
			
			The following change fixes the x86 implementation of trigger_all_cpu_backtrace(), which was previously (accidentally, as far as I can tell) disabled to always return false as on architectures that do not implement this function. trigger_all_cpu_backtrace(), as defined in include/linux/nmi.h, should call arch_trigger_all_cpu_backtrace() if available, or return false if the underlying arch doesn't implement this function. x86 did provide a suitable arch_trigger_all_cpu_backtrace() implementation, but it wasn't actually being used because it was declared in asm/nmi.h, which linux/nmi.h doesn't include. Also, linux/nmi.h couldn't easily be fixed by including asm/nmi.h, because that file is not available on all architectures. I am proposing to fix this by moving the x86 definition of arch_trigger_all_cpu_backtrace() to asm/irq.h. Tested via: echo l > /proc/sysrq-trigger Before the change, this uses a fallback implementation which shows backtraces on active CPUs (using smp_call_function_interrupt() ) After the change, this shows NMI backtraces on all CPUs Signed-off-by: Michel Lespinasse <walken@google.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Link: http://lkml.kernel.org/r/1370518875-1346-1-git-send-email-walken@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
		
			
				
	
	
		
			63 lines
		
	
	
	
		
			1.3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			63 lines
		
	
	
	
		
			1.3 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef _ASM_X86_NMI_H
 | |
| #define _ASM_X86_NMI_H
 | |
| 
 | |
| #include <linux/pm.h>
 | |
| #include <asm/irq.h>
 | |
| #include <asm/io.h>
 | |
| 
 | |
| #ifdef CONFIG_X86_LOCAL_APIC
 | |
| 
 | |
| extern int avail_to_resrv_perfctr_nmi_bit(unsigned int);
 | |
| extern int reserve_perfctr_nmi(unsigned int);
 | |
| extern void release_perfctr_nmi(unsigned int);
 | |
| extern int reserve_evntsel_nmi(unsigned int);
 | |
| extern void release_evntsel_nmi(unsigned int);
 | |
| 
 | |
| struct ctl_table;
 | |
| extern int proc_nmi_enabled(struct ctl_table *, int ,
 | |
| 			void __user *, size_t *, loff_t *);
 | |
| extern int unknown_nmi_panic;
 | |
| 
 | |
| #endif /* CONFIG_X86_LOCAL_APIC */
 | |
| 
 | |
| #define NMI_FLAG_FIRST	1
 | |
| 
 | |
| enum {
 | |
| 	NMI_LOCAL=0,
 | |
| 	NMI_UNKNOWN,
 | |
| 	NMI_SERR,
 | |
| 	NMI_IO_CHECK,
 | |
| 	NMI_MAX
 | |
| };
 | |
| 
 | |
| #define NMI_DONE	0
 | |
| #define NMI_HANDLED	1
 | |
| 
 | |
| typedef int (*nmi_handler_t)(unsigned int, struct pt_regs *);
 | |
| 
 | |
| struct nmiaction {
 | |
| 	struct list_head	list;
 | |
| 	nmi_handler_t		handler;
 | |
| 	unsigned long		flags;
 | |
| 	const char		*name;
 | |
| };
 | |
| 
 | |
| #define register_nmi_handler(t, fn, fg, n, init...)	\
 | |
| ({							\
 | |
| 	static struct nmiaction init fn##_na = {	\
 | |
| 		.handler = (fn),			\
 | |
| 		.name = (n),				\
 | |
| 		.flags = (fg),				\
 | |
| 	};						\
 | |
| 	__register_nmi_handler((t), &fn##_na);		\
 | |
| })
 | |
| 
 | |
| int __register_nmi_handler(unsigned int, struct nmiaction *);
 | |
| 
 | |
| void unregister_nmi_handler(unsigned int, const char *);
 | |
| 
 | |
| void stop_nmi(void);
 | |
| void restart_nmi(void);
 | |
| void local_touch_nmi(void);
 | |
| 
 | |
| #endif /* _ASM_X86_NMI_H */
 |