Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/kernel/irq/spurious.c |
| 3 | * |
| 4 | * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar |
| 5 | * |
| 6 | * This file contains spurious interrupt handling. |
| 7 | */ |
| 8 | |
S.Caglar Onur | 188fd89 | 2008-02-14 17:36:51 +0200 | [diff] [blame] | 9 | #include <linux/jiffies.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/irq.h> |
| 11 | #include <linux/module.h> |
| 12 | #include <linux/kallsyms.h> |
| 13 | #include <linux/interrupt.h> |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 14 | #include <linux/moduleparam.h> |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 15 | #include <linux/timer.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 17 | static int irqfixup __read_mostly; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 18 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 19 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
| 20 | static void poll_spurious_irqs(unsigned long dummy); |
| 21 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
| 22 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 23 | /* |
| 24 | * Recovery handler for misrouted interrupts. |
| 25 | */ |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 26 | static int try_one_irq(int irq, struct irq_desc *desc) |
| 27 | { |
| 28 | struct irqaction *action; |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 29 | int ok = 0, work = 0; |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 30 | |
| 31 | spin_lock(&desc->lock); |
| 32 | /* Already running on another processor */ |
| 33 | if (desc->status & IRQ_INPROGRESS) { |
| 34 | /* |
| 35 | * Already running: If it is shared get the other |
| 36 | * CPU to go looking for our mystery interrupt too |
| 37 | */ |
| 38 | if (desc->action && (desc->action->flags & IRQF_SHARED)) |
| 39 | desc->status |= IRQ_PENDING; |
| 40 | spin_unlock(&desc->lock); |
| 41 | return ok; |
| 42 | } |
| 43 | /* Honour the normal IRQ locking */ |
| 44 | desc->status |= IRQ_INPROGRESS; |
| 45 | action = desc->action; |
| 46 | spin_unlock(&desc->lock); |
| 47 | |
| 48 | while (action) { |
| 49 | /* Only shared IRQ handlers are safe to call */ |
| 50 | if (action->flags & IRQF_SHARED) { |
| 51 | if (action->handler(irq, action->dev_id) == |
| 52 | IRQ_HANDLED) |
| 53 | ok = 1; |
| 54 | } |
| 55 | action = action->next; |
| 56 | } |
| 57 | local_irq_disable(); |
| 58 | /* Now clean up the flags */ |
| 59 | spin_lock(&desc->lock); |
| 60 | action = desc->action; |
| 61 | |
| 62 | /* |
| 63 | * While we were looking for a fixup someone queued a real |
| 64 | * IRQ clashing with our walk: |
| 65 | */ |
| 66 | while ((desc->status & IRQ_PENDING) && action) { |
| 67 | /* |
| 68 | * Perform real IRQ processing for the IRQ we deferred |
| 69 | */ |
| 70 | work = 1; |
| 71 | spin_unlock(&desc->lock); |
| 72 | handle_IRQ_event(irq, action); |
| 73 | spin_lock(&desc->lock); |
| 74 | desc->status &= ~IRQ_PENDING; |
| 75 | } |
| 76 | desc->status &= ~IRQ_INPROGRESS; |
| 77 | /* |
| 78 | * If we did actual work for the real IRQ line we must let the |
| 79 | * IRQ controller clean up too |
| 80 | */ |
| 81 | if (work && desc->chip && desc->chip->end) |
| 82 | desc->chip->end(irq); |
| 83 | spin_unlock(&desc->lock); |
| 84 | |
| 85 | return ok; |
| 86 | } |
| 87 | |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 88 | static int misrouted_irq(int irq) |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 89 | { |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 90 | struct irq_desc *desc; |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 91 | int i, ok = 0; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 92 | |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 93 | for_each_irq_desc(i, desc) { |
| 94 | if (!i) |
| 95 | continue; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 96 | |
| 97 | if (i == irq) /* Already tried */ |
| 98 | continue; |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 99 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 100 | if (try_one_irq(i, desc)) |
| 101 | ok = 1; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 102 | } |
| 103 | /* So the caller can adjust the irq error counts */ |
| 104 | return ok; |
| 105 | } |
| 106 | |
Ingo Molnar | 74296a8 | 2009-01-16 17:43:50 +0100 | [diff] [blame] | 107 | static void poll_all_shared_irqs(void) |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 108 | { |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 109 | struct irq_desc *desc; |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 110 | int i; |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 111 | |
| 112 | for_each_irq_desc(i, desc) { |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 113 | unsigned int status; |
| 114 | |
Yinghai Lu | e00585b | 2008-09-15 01:53:50 -0700 | [diff] [blame] | 115 | if (!i) |
| 116 | continue; |
| 117 | |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 118 | /* Racy but it doesn't matter */ |
| 119 | status = desc->status; |
| 120 | barrier(); |
| 121 | if (!(status & IRQ_SPURIOUS_DISABLED)) |
| 122 | continue; |
| 123 | |
| 124 | try_one_irq(i, desc); |
| 125 | } |
Ingo Molnar | 74296a8 | 2009-01-16 17:43:50 +0100 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | static void poll_spurious_irqs(unsigned long dummy) |
| 129 | { |
| 130 | poll_all_shared_irqs(); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 131 | |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 132 | mod_timer(&poll_spurious_irq_timer, |
| 133 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 134 | } |
| 135 | |
Ingo Molnar | 74296a8 | 2009-01-16 17:43:50 +0100 | [diff] [blame] | 136 | #ifdef CONFIG_DEBUG_SHIRQ |
| 137 | void debug_poll_all_shared_irqs(void) |
| 138 | { |
| 139 | poll_all_shared_irqs(); |
| 140 | } |
| 141 | #endif |
| 142 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 143 | /* |
| 144 | * If 99,900 of the previous 100,000 interrupts have not been handled |
| 145 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
| 146 | * and try to turn the IRQ off. |
| 147 | * |
| 148 | * (The other 100-of-100,000 interrupts may have been a correctly |
| 149 | * functioning device sharing an IRQ with the failing one) |
| 150 | * |
| 151 | * Called under desc->lock |
| 152 | */ |
| 153 | |
| 154 | static void |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 155 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
| 156 | irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | { |
| 158 | struct irqaction *action; |
| 159 | |
| 160 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { |
| 161 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
| 162 | irq, action_ret); |
| 163 | } else { |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 164 | printk(KERN_ERR "irq %d: nobody cared (try booting with " |
| 165 | "the \"irqpoll\" option)\n", irq); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 166 | } |
| 167 | dump_stack(); |
| 168 | printk(KERN_ERR "handlers:\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 169 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 170 | action = desc->action; |
| 171 | while (action) { |
| 172 | printk(KERN_ERR "[<%p>]", action->handler); |
| 173 | print_symbol(" (%s)", |
| 174 | (unsigned long)action->handler); |
| 175 | printk("\n"); |
| 176 | action = action->next; |
| 177 | } |
| 178 | } |
| 179 | |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 180 | static void |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 181 | report_bad_irq(unsigned int irq, struct irq_desc *desc, irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | { |
| 183 | static int count = 100; |
| 184 | |
| 185 | if (count > 0) { |
| 186 | count--; |
| 187 | __report_bad_irq(irq, desc, action_ret); |
| 188 | } |
| 189 | } |
| 190 | |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 191 | static inline int |
| 192 | try_misrouted_irq(unsigned int irq, struct irq_desc *desc, |
| 193 | irqreturn_t action_ret) |
Linus Torvalds | 92ea772 | 2007-05-24 08:37:14 -0700 | [diff] [blame] | 194 | { |
| 195 | struct irqaction *action; |
| 196 | |
| 197 | if (!irqfixup) |
| 198 | return 0; |
| 199 | |
| 200 | /* We didn't actually handle the IRQ - see if it was misrouted? */ |
| 201 | if (action_ret == IRQ_NONE) |
| 202 | return 1; |
| 203 | |
| 204 | /* |
| 205 | * But for 'irqfixup == 2' we also do it for handled interrupts if |
| 206 | * they are marked as IRQF_IRQPOLL (or for irq zero, which is the |
| 207 | * traditional PC timer interrupt.. Legacy) |
| 208 | */ |
| 209 | if (irqfixup < 2) |
| 210 | return 0; |
| 211 | |
| 212 | if (!irq) |
| 213 | return 1; |
| 214 | |
| 215 | /* |
| 216 | * Since we don't get the descriptor lock, "action" can |
| 217 | * change under us. We don't really care, but we don't |
| 218 | * want to follow a NULL pointer. So tell the compiler to |
| 219 | * just load it once by using a barrier. |
| 220 | */ |
| 221 | action = desc->action; |
| 222 | barrier(); |
| 223 | return action && (action->flags & IRQF_IRQPOLL); |
| 224 | } |
| 225 | |
Ingo Molnar | 34ffdb7 | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 226 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
David Howells | 7d12e78 | 2006-10-05 14:55:46 +0100 | [diff] [blame] | 227 | irqreturn_t action_ret) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | { |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 229 | if (unlikely(action_ret != IRQ_HANDLED)) { |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 230 | /* |
| 231 | * If we are seeing only the odd spurious IRQ caused by |
| 232 | * bus asynchronicity then don't eventually trigger an error, |
| 233 | * otherwise the couter becomes a doomsday timer for otherwise |
| 234 | * working systems |
| 235 | */ |
S.Caglar Onur | 188fd89 | 2008-02-14 17:36:51 +0200 | [diff] [blame] | 236 | if (time_after(jiffies, desc->last_unhandled + HZ/10)) |
Alan Cox | 4f27c00 | 2007-07-15 23:40:55 -0700 | [diff] [blame] | 237 | desc->irqs_unhandled = 1; |
| 238 | else |
| 239 | desc->irqs_unhandled++; |
| 240 | desc->last_unhandled = jiffies; |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 241 | if (unlikely(action_ret != IRQ_NONE)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 242 | report_bad_irq(irq, desc, action_ret); |
| 243 | } |
| 244 | |
Linus Torvalds | 92ea772 | 2007-05-24 08:37:14 -0700 | [diff] [blame] | 245 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |
| 246 | int ok = misrouted_irq(irq); |
| 247 | if (action_ret == IRQ_NONE) |
| 248 | desc->irqs_unhandled -= ok; |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 249 | } |
| 250 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 251 | desc->irq_count++; |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 252 | if (likely(desc->irq_count < 100000)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | return; |
| 254 | |
| 255 | desc->irq_count = 0; |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 256 | if (unlikely(desc->irqs_unhandled > 99900)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 257 | /* |
| 258 | * The interrupt is stuck |
| 259 | */ |
| 260 | __report_bad_irq(irq, desc, action_ret); |
| 261 | /* |
| 262 | * Now kill the IRQ |
| 263 | */ |
| 264 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
Thomas Gleixner | 1adb085 | 2008-04-28 17:01:56 +0200 | [diff] [blame] | 265 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
| 266 | desc->depth++; |
Ingo Molnar | d1bef4e | 2006-06-29 02:24:36 -0700 | [diff] [blame] | 267 | desc->chip->disable(irq); |
Eric W. Biederman | f84dbb9 | 2008-07-10 14:48:54 -0700 | [diff] [blame] | 268 | |
Thomas Gleixner | d3c6004 | 2008-10-16 09:55:00 +0200 | [diff] [blame] | 269 | mod_timer(&poll_spurious_irq_timer, |
| 270 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | } |
| 272 | desc->irqs_unhandled = 0; |
| 273 | } |
| 274 | |
Andreas Mohr | 83d4e6e | 2006-06-23 02:05:32 -0700 | [diff] [blame] | 275 | int noirqdebug __read_mostly; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 276 | |
Vivek Goyal | 343cde5 | 2007-01-11 01:52:44 +0100 | [diff] [blame] | 277 | int noirqdebug_setup(char *str) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 278 | { |
| 279 | noirqdebug = 1; |
| 280 | printk(KERN_INFO "IRQ lockup detection disabled\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 281 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | return 1; |
| 283 | } |
| 284 | |
| 285 | __setup("noirqdebug", noirqdebug_setup); |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 286 | module_param(noirqdebug, bool, 0644); |
| 287 | MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 288 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 289 | static int __init irqfixup_setup(char *str) |
| 290 | { |
| 291 | irqfixup = 1; |
| 292 | printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n"); |
| 293 | printk(KERN_WARNING "This may impact system performance.\n"); |
Ingo Molnar | 06fcb0c | 2006-06-29 02:24:40 -0700 | [diff] [blame] | 294 | |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 295 | return 1; |
| 296 | } |
| 297 | |
| 298 | __setup("irqfixup", irqfixup_setup); |
Andi Kleen | 9e094c1 | 2008-01-30 13:32:48 +0100 | [diff] [blame] | 299 | module_param(irqfixup, int, 0644); |
Alan Cox | 200803d | 2005-06-28 20:45:18 -0700 | [diff] [blame] | 300 | |
| 301 | static int __init irqpoll_setup(char *str) |
| 302 | { |
| 303 | irqfixup = 2; |
| 304 | printk(KERN_WARNING "Misrouted IRQ fixup and polling support " |
| 305 | "enabled\n"); |
| 306 | printk(KERN_WARNING "This may significantly impact system " |
| 307 | "performance\n"); |
| 308 | return 1; |
| 309 | } |
| 310 | |
| 311 | __setup("irqpoll", irqpoll_setup); |