 818b0f3bfb
			
		
	
	
	818b0f3bfb
	
	
	
		
			
			All invocations of chip->irq_set_affinity() are doing the same return value checks. Let them all use a common function. [ tglx: removed the silly likely while at it ] Signed-off-by: Jiang Liu <jiang.liu@huawei.com> Cc: Jiang Liu <liuj97@gmail.com> Cc: Keping Chen <chenkeping@huawei.com> Link: http://lkml.kernel.org/r/1333120296-13563-3-git-send-email-jiang.liu@huawei.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
		
			
				
	
	
		
			72 lines
		
	
	
	
		
			1.7 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			72 lines
		
	
	
	
		
			1.7 KiB
			
		
	
	
	
		
			C
		
	
	
	
	
	
| 
 | |
| #include <linux/irq.h>
 | |
| #include <linux/interrupt.h>
 | |
| 
 | |
| #include "internals.h"
 | |
| 
 | |
| void irq_move_masked_irq(struct irq_data *idata)
 | |
| {
 | |
| 	struct irq_desc *desc = irq_data_to_desc(idata);
 | |
| 	struct irq_chip *chip = idata->chip;
 | |
| 
 | |
| 	if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
 | |
| 		return;
 | |
| 
 | |
| 	/*
 | |
| 	 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
 | |
| 	 */
 | |
| 	if (!irqd_can_balance(&desc->irq_data)) {
 | |
| 		WARN_ON(1);
 | |
| 		return;
 | |
| 	}
 | |
| 
 | |
| 	irqd_clr_move_pending(&desc->irq_data);
 | |
| 
 | |
| 	if (unlikely(cpumask_empty(desc->pending_mask)))
 | |
| 		return;
 | |
| 
 | |
| 	if (!chip->irq_set_affinity)
 | |
| 		return;
 | |
| 
 | |
| 	assert_raw_spin_locked(&desc->lock);
 | |
| 
 | |
| 	/*
 | |
| 	 * If there was a valid mask to work with, please
 | |
| 	 * do the disable, re-program, enable sequence.
 | |
| 	 * This is *not* particularly important for level triggered
 | |
| 	 * but in a edge trigger case, we might be setting rte
 | |
| 	 * when an active trigger is coming in. This could
 | |
| 	 * cause some ioapics to mal-function.
 | |
| 	 * Being paranoid i guess!
 | |
| 	 *
 | |
| 	 * For correct operation this depends on the caller
 | |
| 	 * masking the irqs.
 | |
| 	 */
 | |
| 	if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
 | |
| 		irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
 | |
| 
 | |
| 	cpumask_clear(desc->pending_mask);
 | |
| }
 | |
| 
 | |
| void irq_move_irq(struct irq_data *idata)
 | |
| {
 | |
| 	bool masked;
 | |
| 
 | |
| 	if (likely(!irqd_is_setaffinity_pending(idata)))
 | |
| 		return;
 | |
| 
 | |
| 	if (unlikely(irqd_irq_disabled(idata)))
 | |
| 		return;
 | |
| 
 | |
| 	/*
 | |
| 	 * Be careful vs. already masked interrupts. If this is a
 | |
| 	 * threaded interrupt with ONESHOT set, we can end up with an
 | |
| 	 * interrupt storm.
 | |
| 	 */
 | |
| 	masked = irqd_irq_masked(idata);
 | |
| 	if (!masked)
 | |
| 		idata->chip->irq_mask(idata);
 | |
| 	irq_move_masked_irq(idata);
 | |
| 	if (!masked)
 | |
| 		idata->chip->irq_unmask(idata);
 | |
| }
 |