blob: ca3f4aaff707db1d2aa28bee7f388be67a3317b5 [file] [log] [blame]
Andrew Mortonc777ac52006-03-25 03:07:36 -08001
Christoph Hellwigd824e662006-04-10 22:54:04 -07002#include <linux/irq.h>
Yinghai Lu57b150c2009-04-27 17:59:53 -07003#include <linux/interrupt.h>
4
5#include "internals.h"
Andrew Mortonc777ac52006-03-25 03:07:36 -08006
Thomas Gleixnera4395202011-02-04 18:46:16 +01007void irq_move_masked_irq(struct irq_data *idata)
Andrew Mortonc777ac52006-03-25 03:07:36 -08008{
Thomas Gleixnera4395202011-02-04 18:46:16 +01009 struct irq_desc *desc = irq_data_to_desc(idata);
10 struct irq_chip *chip = idata->chip;
Andrew Mortonc777ac52006-03-25 03:07:36 -080011
Thomas Gleixnerf230b6d2011-02-05 15:20:04 +010012 if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
Andrew Mortonc777ac52006-03-25 03:07:36 -080013 return;
14
Bryan Holty501f2492006-03-25 03:07:37 -080015 /*
16 * Paranoia: cpu-local interrupts shouldn't be calling in here anyway.
17 */
Thomas Gleixnera0056772011-02-08 17:11:03 +010018 if (!irqd_can_balance(&desc->irq_data)) {
Bryan Holty501f2492006-03-25 03:07:37 -080019 WARN_ON(1);
20 return;
21 }
22
Thomas Gleixnerf230b6d2011-02-05 15:20:04 +010023 irqd_clr_move_pending(&desc->irq_data);
Andrew Mortonc777ac52006-03-25 03:07:36 -080024
Mike Travis7f7ace02009-01-10 21:58:08 -080025 if (unlikely(cpumask_empty(desc->pending_mask)))
Andrew Mortonc777ac52006-03-25 03:07:36 -080026 return;
27
Thomas Gleixnerc96b3b32010-09-27 12:45:41 +000028 if (!chip->irq_set_affinity)
Andrew Mortonc777ac52006-03-25 03:07:36 -080029 return;
30
Thomas Gleixner239007b2009-11-17 16:46:45 +010031 assert_raw_spin_locked(&desc->lock);
Bryan Holty501f2492006-03-25 03:07:37 -080032
Andrew Mortonc777ac52006-03-25 03:07:36 -080033 /*
34 * If there was a valid mask to work with, please
35 * do the disable, re-program, enable sequence.
36 * This is *not* particularly important for level triggered
37 * but in a edge trigger case, we might be setting rte
Lucas De Marchi25985ed2011-03-30 22:57:33 -030038 * when an active trigger is coming in. This could
Andrew Mortonc777ac52006-03-25 03:07:36 -080039 * cause some ioapics to mal-function.
40 * Being paranoid i guess!
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070041 *
42 * For correct operation this depends on the caller
43 * masking the irqs.
Andrew Mortonc777ac52006-03-25 03:07:36 -080044 */
Jiang Liu818b0f32012-03-30 23:11:34 +080045 if (cpumask_any_and(desc->pending_mask, cpu_online_mask) < nr_cpu_ids)
46 irq_do_set_affinity(&desc->irq_data, desc->pending_mask, false);
Yinghai Lu57b150c2009-04-27 17:59:53 -070047
Mike Travis7f7ace02009-01-10 21:58:08 -080048 cpumask_clear(desc->pending_mask);
Andrew Mortonc777ac52006-03-25 03:07:36 -080049}
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070050
Thomas Gleixnera4395202011-02-04 18:46:16 +010051void irq_move_irq(struct irq_data *idata)
52{
Thomas Gleixnerf1a06392011-01-28 08:47:15 +010053 bool masked;
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070054
Thomas Gleixnera4395202011-02-04 18:46:16 +010055 if (likely(!irqd_is_setaffinity_pending(idata)))
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070056 return;
57
Thomas Gleixner32f41252011-03-28 14:10:52 +020058 if (unlikely(irqd_irq_disabled(idata)))
Eric W. Biederman2a786b42007-02-23 04:46:20 -070059 return;
Eric W. Biedermane7b946e2006-10-04 02:16:29 -070060
Thomas Gleixnerf1a06392011-01-28 08:47:15 +010061 /*
62 * Be careful vs. already masked interrupts. If this is a
63 * threaded interrupt with ONESHOT set, we can end up with an
64 * interrupt storm.
65 */
Thomas Gleixner32f41252011-03-28 14:10:52 +020066 masked = irqd_irq_masked(idata);
Thomas Gleixnerf1a06392011-01-28 08:47:15 +010067 if (!masked)
Thomas Gleixnera4395202011-02-04 18:46:16 +010068 idata->chip->irq_mask(idata);
69 irq_move_masked_irq(idata);
Thomas Gleixnerf1a06392011-01-28 08:47:15 +010070 if (!masked)
Thomas Gleixnera4395202011-02-04 18:46:16 +010071 idata->chip->irq_unmask(idata);
72}