genirq: Mirror IRQ_PER_CPU and IRQ_NO_BALANCING in irq_data.state
That's the right data structure to look at for arch code. Accessor functions are provided. irqd_is_per_cpu(irqdata); irqd_can_balance(irqdata); Coders who access them directly will be tracked down and slapped with stinking trouts. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
This commit is contained in:
		
					parent
					
						
							
								1ce6068dac
							
						
					
				
			
			
				commit
				
					
						a005677b3d
					
				
			
		
					 7 changed files with 84 additions and 15 deletions
				
			
		| 
						 | 
				
			
			@ -132,10 +132,14 @@ struct irq_data {
 | 
			
		|||
 * Bit masks for irq_data.state
 | 
			
		||||
 *
 | 
			
		||||
 * IRQD_SETAFFINITY_PENDING	- Affinity setting is pending
 | 
			
		||||
 * IRQD_NO_BALANCING		- Balancing disabled for this IRQ
 | 
			
		||||
 * IRQD_PER_CPU			- Interrupt is per cpu
 | 
			
		||||
 */
 | 
			
		||||
enum {
 | 
			
		||||
	/* Bit 0 - 7 reserved for TYPE will use later */
 | 
			
		||||
	IRQD_SETAFFINITY_PENDING	= (1 <<  8),
 | 
			
		||||
	IRQD_NO_BALANCING		= (1 << 10),
 | 
			
		||||
	IRQD_PER_CPU			= (1 << 11),
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
 | 
			
		||||
| 
						 | 
				
			
			@ -143,6 +147,16 @@ static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
 | 
			
		|||
	return d->state_use_accessors & IRQD_SETAFFINITY_PENDING;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool irqd_is_per_cpu(struct irq_data *d)
 | 
			
		||||
{
 | 
			
		||||
	return d->state_use_accessors & IRQD_PER_CPU;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool irqd_can_balance(struct irq_data *d)
 | 
			
		||||
{
 | 
			
		||||
	return !(d->state_use_accessors & (IRQD_PER_CPU | IRQD_NO_BALANCING));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * struct irq_chip - hardware interrupt chip descriptor
 | 
			
		||||
 *
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -706,12 +706,15 @@ void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set)
 | 
			
		|||
	if (!desc)
 | 
			
		||||
		return;
 | 
			
		||||
 | 
			
		||||
	/* Sanitize flags */
 | 
			
		||||
	set &= IRQF_MODIFY_MASK;
 | 
			
		||||
	clr &= IRQF_MODIFY_MASK;
 | 
			
		||||
 | 
			
		||||
	raw_spin_lock_irqsave(&desc->lock, flags);
 | 
			
		||||
	desc->status &= ~clr;
 | 
			
		||||
	desc->status |= set;
 | 
			
		||||
 | 
			
		||||
	irq_settings_clr_and_set(desc, clr, set);
 | 
			
		||||
 | 
			
		||||
	irqd_clear(&desc->irq_data, IRQD_NO_BALANCING | IRQD_PER_CPU);
 | 
			
		||||
	if (irq_settings_has_no_balance_set(desc))
 | 
			
		||||
		irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
 | 
			
		||||
	if (irq_settings_is_per_cpu(desc))
 | 
			
		||||
		irqd_set(&desc->irq_data, IRQD_PER_CPU);
 | 
			
		||||
 | 
			
		||||
	raw_spin_unlock_irqrestore(&desc->lock, flags);
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -139,3 +139,14 @@ static inline void irqd_clr_move_pending(struct irq_data *d)
 | 
			
		|||
	d->state_use_accessors &= ~IRQD_SETAFFINITY_PENDING;
 | 
			
		||||
	irq_compat_clr_move_pending(irq_data_to_desc(d));
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void irqd_clear(struct irq_data *d, unsigned int mask)
 | 
			
		||||
{
 | 
			
		||||
	d->state_use_accessors &= ~mask;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void irqd_set(struct irq_data *d, unsigned int mask)
 | 
			
		||||
{
 | 
			
		||||
	d->state_use_accessors |= mask;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -73,8 +73,8 @@ int irq_can_set_affinity(unsigned int irq)
 | 
			
		|||
{
 | 
			
		||||
	struct irq_desc *desc = irq_to_desc(irq);
 | 
			
		||||
 | 
			
		||||
	if ((desc->status & (IRQ_PER_CPU | IRQ_NO_BALANCING)) ||
 | 
			
		||||
	    !desc->irq_data.chip || !desc->irq_data.chip->irq_set_affinity)
 | 
			
		||||
	if (!irqd_can_balance(&desc->irq_data) || !desc->irq_data.chip ||
 | 
			
		||||
	    !desc->irq_data.chip->irq_set_affinity)
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	return 1;
 | 
			
		||||
| 
						 | 
				
			
			@ -897,8 +897,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 | 
			
		|||
				  IRQS_INPROGRESS | IRQS_ONESHOT | \
 | 
			
		||||
				  IRQS_WAITING);
 | 
			
		||||
 | 
			
		||||
		if (new->flags & IRQF_PERCPU)
 | 
			
		||||
			desc->status |= IRQ_PER_CPU;
 | 
			
		||||
		if (new->flags & IRQF_PERCPU) {
 | 
			
		||||
			irqd_set(&desc->irq_data, IRQD_PER_CPU);
 | 
			
		||||
			irq_settings_set_per_cpu(desc);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (new->flags & IRQF_ONESHOT)
 | 
			
		||||
			desc->istate |= IRQS_ONESHOT;
 | 
			
		||||
| 
						 | 
				
			
			@ -910,8 +912,10 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
 | 
			
		|||
			desc->depth = 1;
 | 
			
		||||
 | 
			
		||||
		/* Exclude IRQ from balancing if requested */
 | 
			
		||||
		if (new->flags & IRQF_NOBALANCING)
 | 
			
		||||
			desc->status |= IRQ_NO_BALANCING;
 | 
			
		||||
		if (new->flags & IRQF_NOBALANCING) {
 | 
			
		||||
			irq_settings_set_no_balancing(desc);
 | 
			
		||||
			irqd_set(&desc->irq_data, IRQD_NO_BALANCING);
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/* Set default affinity mask once everything is setup */
 | 
			
		||||
		setup_affinity(irq, desc, mask);
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -15,7 +15,7 @@ void move_masked_irq(int irq)
 | 
			
		|||
	/*
 | 
			
		||||
	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
 | 
			
		||||
	 */
 | 
			
		||||
	if (desc->status & (IRQ_PER_CPU | IRQ_NO_BALANCING)) {
 | 
			
		||||
	if (!irqd_can_balance(&desc->irq_data)) {
 | 
			
		||||
		WARN_ON(1);
 | 
			
		||||
		return;
 | 
			
		||||
	}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -4,6 +4,9 @@
 | 
			
		|||
 */
 | 
			
		||||
enum {
 | 
			
		||||
	_IRQ_DEFAULT_INIT_FLAGS	= IRQ_DEFAULT_INIT_FLAGS,
 | 
			
		||||
	_IRQ_PER_CPU		= IRQ_PER_CPU,
 | 
			
		||||
	_IRQ_NO_BALANCING	= IRQ_NO_BALANCING,
 | 
			
		||||
	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#undef IRQ_INPROGRESS
 | 
			
		||||
| 
						 | 
				
			
			@ -22,3 +25,36 @@ enum {
 | 
			
		|||
#define IRQ_WAKEUP		GOT_YOU_MORON
 | 
			
		||||
#undef IRQ_MOVE_PENDING
 | 
			
		||||
#define IRQ_MOVE_PENDING	GOT_YOU_MORON
 | 
			
		||||
#undef IRQ_PER_CPU
 | 
			
		||||
#define IRQ_PER_CPU		GOT_YOU_MORON
 | 
			
		||||
#undef IRQ_NO_BALANCING
 | 
			
		||||
#define IRQ_NO_BALANCING	GOT_YOU_MORON
 | 
			
		||||
#undef IRQF_MODIFY_MASK
 | 
			
		||||
#define IRQF_MODIFY_MASK	GOT_YOU_MORON
 | 
			
		||||
 | 
			
		||||
static inline void
 | 
			
		||||
irq_settings_clr_and_set(struct irq_desc *desc, u32 clr, u32 set)
 | 
			
		||||
{
 | 
			
		||||
	desc->status &= ~(clr & _IRQF_MODIFY_MASK);
 | 
			
		||||
	desc->status |= (set & _IRQF_MODIFY_MASK);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool irq_settings_is_per_cpu(struct irq_desc *desc)
 | 
			
		||||
{
 | 
			
		||||
	return desc->status & _IRQ_PER_CPU;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void irq_settings_set_per_cpu(struct irq_desc *desc)
 | 
			
		||||
{
 | 
			
		||||
	desc->status |= _IRQ_PER_CPU;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline void irq_settings_set_no_balancing(struct irq_desc *desc)
 | 
			
		||||
{
 | 
			
		||||
	desc->status |= _IRQ_NO_BALANCING;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline bool irq_settings_has_no_balance_set(struct irq_desc *desc)
 | 
			
		||||
{
 | 
			
		||||
	return desc->status & _IRQ_NO_BALANCING;
 | 
			
		||||
}
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
| 
						 | 
				
			
			@ -68,7 +68,8 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
 | 
			
		|||
	raw_spin_lock(&desc->lock);
 | 
			
		||||
 | 
			
		||||
	/* PER_CPU and nested thread interrupts are never polled */
 | 
			
		||||
	if (desc->status & (IRQ_PER_CPU | IRQ_NESTED_THREAD))
 | 
			
		||||
	if (irq_settings_is_per_cpu(desc) ||
 | 
			
		||||
	    (desc->status & IRQ_NESTED_THREAD))
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
| 
						 | 
				
			
			
 | 
			
		|||
		Loading…
	
	Add table
		Add a link
		
	
		Reference in a new issue