Provide a mechanism that allows running code in IRQ context. It is most useful for NMI code that needs to interact with the rest of the system -- like wakeup a task to drain buffers. Perf currently has such a mechanism, so extract that and provide it as a generic feature, independent of perf so that others may also benefit. The IRQ context callback is generated through self-IPIs where possible, or on architectures like powerpc the decrementer (the built-in timer facility) is set to generate an interrupt immediately. Architectures that don't have anything like this get to do with a callback from the timer tick. These architectures can call irq_work_run() at the tail of any IRQ handlers that might enqueue such work (like the perf IRQ handler) to avoid undue latencies in processing the work. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Acked-by: Kyle McMartin <kyle@mcmartin.ca> Acked-by: Martin Schwidefsky <schwidefsky@de.ibm.com> [ various fixes ] Signed-off-by: Huang Ying <ying.huang@intel.com> LKML-Reference: <1287036094.7768.291.camel@yhuang-dev> Signed-off-by: Ingo Molnar <mingo@elte.hu>
		
			
				
	
	
		
			55 lines
		
	
	
	
		
			1.5 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			55 lines
		
	
	
	
		
			1.5 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
#ifndef _ASM_X86_HARDIRQ_H
 | 
						|
#define _ASM_X86_HARDIRQ_H
 | 
						|
 | 
						|
#include <linux/threads.h>
 | 
						|
#include <linux/irq.h>
 | 
						|
 | 
						|
typedef struct {
 | 
						|
	unsigned int __softirq_pending;
 | 
						|
	unsigned int __nmi_count;	/* arch dependent */
 | 
						|
	unsigned int irq0_irqs;
 | 
						|
#ifdef CONFIG_X86_LOCAL_APIC
 | 
						|
	unsigned int apic_timer_irqs;	/* arch dependent */
 | 
						|
	unsigned int irq_spurious_count;
 | 
						|
#endif
 | 
						|
	unsigned int x86_platform_ipis;	/* arch dependent */
 | 
						|
	unsigned int apic_perf_irqs;
 | 
						|
	unsigned int apic_irq_work_irqs;
 | 
						|
#ifdef CONFIG_SMP
 | 
						|
	unsigned int irq_resched_count;
 | 
						|
	unsigned int irq_call_count;
 | 
						|
	unsigned int irq_tlb_count;
 | 
						|
#endif
 | 
						|
#ifdef CONFIG_X86_THERMAL_VECTOR
 | 
						|
	unsigned int irq_thermal_count;
 | 
						|
#endif
 | 
						|
#ifdef CONFIG_X86_MCE_THRESHOLD
 | 
						|
	unsigned int irq_threshold_count;
 | 
						|
#endif
 | 
						|
} ____cacheline_aligned irq_cpustat_t;
 | 
						|
 | 
						|
DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
 | 
						|
 | 
						|
/* We can have at most NR_VECTORS irqs routed to a cpu at a time */
 | 
						|
#define MAX_HARDIRQS_PER_CPU NR_VECTORS
 | 
						|
 | 
						|
#define __ARCH_IRQ_STAT
 | 
						|
 | 
						|
#define inc_irq_stat(member)	percpu_inc(irq_stat.member)
 | 
						|
 | 
						|
#define local_softirq_pending()	percpu_read(irq_stat.__softirq_pending)
 | 
						|
 | 
						|
#define __ARCH_SET_SOFTIRQ_PENDING
 | 
						|
 | 
						|
#define set_softirq_pending(x)	percpu_write(irq_stat.__softirq_pending, (x))
 | 
						|
#define or_softirq_pending(x)	percpu_or(irq_stat.__softirq_pending, (x))
 | 
						|
 | 
						|
extern void ack_bad_irq(unsigned int irq);
 | 
						|
 | 
						|
extern u64 arch_irq_stat_cpu(unsigned int cpu);
 | 
						|
#define arch_irq_stat_cpu	arch_irq_stat_cpu
 | 
						|
 | 
						|
extern u64 arch_irq_stat(void);
 | 
						|
#define arch_irq_stat		arch_irq_stat
 | 
						|
 | 
						|
#endif /* _ASM_X86_HARDIRQ_H */
 |