Merge tag 'sound-fix-4.15-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/tiwai...
[sfrench/cifs-2.6.git] / kernel / irq / spurious.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/kernel/irq/spurious.c
4  *
5  * Copyright (C) 1992, 1998-2004 Linus Torvalds, Ingo Molnar
6  *
7  * This file contains spurious interrupt handling.
8  */
9
10 #include <linux/jiffies.h>
11 #include <linux/irq.h>
12 #include <linux/module.h>
13 #include <linux/kallsyms.h>
14 #include <linux/interrupt.h>
15 #include <linux/moduleparam.h>
16 #include <linux/timer.h>
17
18 #include "internals.h"
19
20 static int irqfixup __read_mostly;
21
22 #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
23 static void poll_spurious_irqs(unsigned long dummy);
24 static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs);
25 static int irq_poll_cpu;
26 static atomic_t irq_poll_active;
27
28 /*
29  * We wait here for a poller to finish.
30  *
31  * If the poll runs on this CPU, then we yell loudly and return
32  * false. That will leave the interrupt line disabled in the worst
33  * case, but it should never happen.
34  *
35  * We wait until the poller is done and then recheck disabled and
36  * action (about to be disabled). Only if it's still active, we return
37  * true and let the handler run.
38  */
39 bool irq_wait_for_poll(struct irq_desc *desc)
40 {
41         if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
42                       "irq poll in progress on cpu %d for irq %d\n",
43                       smp_processor_id(), desc->irq_data.irq))
44                 return false;
45
46 #ifdef CONFIG_SMP
47         do {
48                 raw_spin_unlock(&desc->lock);
49                 while (irqd_irq_inprogress(&desc->irq_data))
50                         cpu_relax();
51                 raw_spin_lock(&desc->lock);
52         } while (irqd_irq_inprogress(&desc->irq_data));
53         /* Might have been disabled in meantime */
54         return !irqd_irq_disabled(&desc->irq_data) && desc->action;
55 #else
56         return false;
57 #endif
58 }
59
60
61 /*
62  * Recovery handler for misrouted interrupts.
63  */
64 static int try_one_irq(struct irq_desc *desc, bool force)
65 {
66         irqreturn_t ret = IRQ_NONE;
67         struct irqaction *action;
68
69         raw_spin_lock(&desc->lock);
70
71         /*
72          * PER_CPU, nested thread interrupts and interrupts explicitely
73          * marked polled are excluded from polling.
74          */
75         if (irq_settings_is_per_cpu(desc) ||
76             irq_settings_is_nested_thread(desc) ||
77             irq_settings_is_polled(desc))
78                 goto out;
79
80         /*
81          * Do not poll disabled interrupts unless the spurious
82          * disabled poller asks explicitely.
83          */
84         if (irqd_irq_disabled(&desc->irq_data) && !force)
85                 goto out;
86
87         /*
88          * All handlers must agree on IRQF_SHARED, so we test just the
89          * first.
90          */
91         action = desc->action;
92         if (!action || !(action->flags & IRQF_SHARED) ||
93             (action->flags & __IRQF_TIMER))
94                 goto out;
95
96         /* Already running on another processor */
97         if (irqd_irq_inprogress(&desc->irq_data)) {
98                 /*
99                  * Already running: If it is shared get the other
100                  * CPU to go looking for our mystery interrupt too
101                  */
102                 desc->istate |= IRQS_PENDING;
103                 goto out;
104         }
105
106         /* Mark it poll in progress */
107         desc->istate |= IRQS_POLL_INPROGRESS;
108         do {
109                 if (handle_irq_event(desc) == IRQ_HANDLED)
110                         ret = IRQ_HANDLED;
111                 /* Make sure that there is still a valid action */
112                 action = desc->action;
113         } while ((desc->istate & IRQS_PENDING) && action);
114         desc->istate &= ~IRQS_POLL_INPROGRESS;
115 out:
116         raw_spin_unlock(&desc->lock);
117         return ret == IRQ_HANDLED;
118 }
119
120 static int misrouted_irq(int irq)
121 {
122         struct irq_desc *desc;
123         int i, ok = 0;
124
125         if (atomic_inc_return(&irq_poll_active) != 1)
126                 goto out;
127
128         irq_poll_cpu = smp_processor_id();
129
130         for_each_irq_desc(i, desc) {
131                 if (!i)
132                          continue;
133
134                 if (i == irq)   /* Already tried */
135                         continue;
136
137                 if (try_one_irq(desc, false))
138                         ok = 1;
139         }
140 out:
141         atomic_dec(&irq_poll_active);
142         /* So the caller can adjust the irq error counts */
143         return ok;
144 }
145
146 static void poll_spurious_irqs(unsigned long dummy)
147 {
148         struct irq_desc *desc;
149         int i;
150
151         if (atomic_inc_return(&irq_poll_active) != 1)
152                 goto out;
153         irq_poll_cpu = smp_processor_id();
154
155         for_each_irq_desc(i, desc) {
156                 unsigned int state;
157
158                 if (!i)
159                          continue;
160
161                 /* Racy but it doesn't matter */
162                 state = desc->istate;
163                 barrier();
164                 if (!(state & IRQS_SPURIOUS_DISABLED))
165                         continue;
166
167                 local_irq_disable();
168                 try_one_irq(desc, true);
169                 local_irq_enable();
170         }
171 out:
172         atomic_dec(&irq_poll_active);
173         mod_timer(&poll_spurious_irq_timer,
174                   jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
175 }
176
177 static inline int bad_action_ret(irqreturn_t action_ret)
178 {
179         unsigned int r = action_ret;
180
181         if (likely(r <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
182                 return 0;
183         return 1;
184 }
185
186 /*
187  * If 99,900 of the previous 100,000 interrupts have not been handled
188  * then assume that the IRQ is stuck in some manner. Drop a diagnostic
189  * and try to turn the IRQ off.
190  *
191  * (The other 100-of-100,000 interrupts may have been a correctly
192  *  functioning device sharing an IRQ with the failing one)
193  */
194 static void __report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
195 {
196         unsigned int irq = irq_desc_get_irq(desc);
197         struct irqaction *action;
198         unsigned long flags;
199
200         if (bad_action_ret(action_ret)) {
201                 printk(KERN_ERR "irq event %d: bogus return value %x\n",
202                                 irq, action_ret);
203         } else {
204                 printk(KERN_ERR "irq %d: nobody cared (try booting with "
205                                 "the \"irqpoll\" option)\n", irq);
206         }
207         dump_stack();
208         printk(KERN_ERR "handlers:\n");
209
210         /*
211          * We need to take desc->lock here. note_interrupt() is called
212          * w/o desc->lock held, but IRQ_PROGRESS set. We might race
213          * with something else removing an action. It's ok to take
214          * desc->lock here. See synchronize_irq().
215          */
216         raw_spin_lock_irqsave(&desc->lock, flags);
217         for_each_action_of_desc(desc, action) {
218                 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
219                 if (action->thread_fn)
220                         printk(KERN_CONT " threaded [<%p>] %pf",
221                                         action->thread_fn, action->thread_fn);
222                 printk(KERN_CONT "\n");
223         }
224         raw_spin_unlock_irqrestore(&desc->lock, flags);
225 }
226
227 static void report_bad_irq(struct irq_desc *desc, irqreturn_t action_ret)
228 {
229         static int count = 100;
230
231         if (count > 0) {
232                 count--;
233                 __report_bad_irq(desc, action_ret);
234         }
235 }
236
237 static inline int
238 try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
239                   irqreturn_t action_ret)
240 {
241         struct irqaction *action;
242
243         if (!irqfixup)
244                 return 0;
245
246         /* We didn't actually handle the IRQ - see if it was misrouted? */
247         if (action_ret == IRQ_NONE)
248                 return 1;
249
250         /*
251          * But for 'irqfixup == 2' we also do it for handled interrupts if
252          * they are marked as IRQF_IRQPOLL (or for irq zero, which is the
253          * traditional PC timer interrupt.. Legacy)
254          */
255         if (irqfixup < 2)
256                 return 0;
257
258         if (!irq)
259                 return 1;
260
261         /*
262          * Since we don't get the descriptor lock, "action" can
263          * change under us.  We don't really care, but we don't
264          * want to follow a NULL pointer. So tell the compiler to
265          * just load it once by using a barrier.
266          */
267         action = desc->action;
268         barrier();
269         return action && (action->flags & IRQF_IRQPOLL);
270 }
271
272 #define SPURIOUS_DEFERRED       0x80000000
273
274 void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret)
275 {
276         unsigned int irq;
277
278         if (desc->istate & IRQS_POLL_INPROGRESS ||
279             irq_settings_is_polled(desc))
280                 return;
281
282         if (bad_action_ret(action_ret)) {
283                 report_bad_irq(desc, action_ret);
284                 return;
285         }
286
287         /*
288          * We cannot call note_interrupt from the threaded handler
289          * because we need to look at the compound of all handlers
290          * (primary and threaded). Aside of that in the threaded
291          * shared case we have no serialization against an incoming
292          * hardware interrupt while we are dealing with a threaded
293          * result.
294          *
295          * So in case a thread is woken, we just note the fact and
296          * defer the analysis to the next hardware interrupt.
297          *
298          * The threaded handlers store whether they sucessfully
299          * handled an interrupt and we check whether that number
300          * changed versus the last invocation.
301          *
302          * We could handle all interrupts with the delayed by one
303          * mechanism, but for the non forced threaded case we'd just
304          * add pointless overhead to the straight hardirq interrupts
305          * for the sake of a few lines less code.
306          */
307         if (action_ret & IRQ_WAKE_THREAD) {
308                 /*
309                  * There is a thread woken. Check whether one of the
310                  * shared primary handlers returned IRQ_HANDLED. If
311                  * not we defer the spurious detection to the next
312                  * interrupt.
313                  */
314                 if (action_ret == IRQ_WAKE_THREAD) {
315                         int handled;
316                         /*
317                          * We use bit 31 of thread_handled_last to
318                          * denote the deferred spurious detection
319                          * active. No locking necessary as
320                          * thread_handled_last is only accessed here
321                          * and we have the guarantee that hard
322                          * interrupts are not reentrant.
323                          */
324                         if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) {
325                                 desc->threads_handled_last |= SPURIOUS_DEFERRED;
326                                 return;
327                         }
328                         /*
329                          * Check whether one of the threaded handlers
330                          * returned IRQ_HANDLED since the last
331                          * interrupt happened.
332                          *
333                          * For simplicity we just set bit 31, as it is
334                          * set in threads_handled_last as well. So we
335                          * avoid extra masking. And we really do not
336                          * care about the high bits of the handled
337                          * count. We just care about the count being
338                          * different than the one we saw before.
339                          */
340                         handled = atomic_read(&desc->threads_handled);
341                         handled |= SPURIOUS_DEFERRED;
342                         if (handled != desc->threads_handled_last) {
343                                 action_ret = IRQ_HANDLED;
344                                 /*
345                                  * Note: We keep the SPURIOUS_DEFERRED
346                                  * bit set. We are handling the
347                                  * previous invocation right now.
348                                  * Keep it for the current one, so the
349                                  * next hardware interrupt will
350                                  * account for it.
351                                  */
352                                 desc->threads_handled_last = handled;
353                         } else {
354                                 /*
355                                  * None of the threaded handlers felt
356                                  * responsible for the last interrupt
357                                  *
358                                  * We keep the SPURIOUS_DEFERRED bit
359                                  * set in threads_handled_last as we
360                                  * need to account for the current
361                                  * interrupt as well.
362                                  */
363                                 action_ret = IRQ_NONE;
364                         }
365                 } else {
366                         /*
367                          * One of the primary handlers returned
368                          * IRQ_HANDLED. So we don't care about the
369                          * threaded handlers on the same line. Clear
370                          * the deferred detection bit.
371                          *
372                          * In theory we could/should check whether the
373                          * deferred bit is set and take the result of
374                          * the previous run into account here as
375                          * well. But it's really not worth the
376                          * trouble. If every other interrupt is
377                          * handled we never trigger the spurious
378                          * detector. And if this is just the one out
379                          * of 100k unhandled ones which is handled
380                          * then we merily delay the spurious detection
381                          * by one hard interrupt. Not a real problem.
382                          */
383                         desc->threads_handled_last &= ~SPURIOUS_DEFERRED;
384                 }
385         }
386
387         if (unlikely(action_ret == IRQ_NONE)) {
388                 /*
389                  * If we are seeing only the odd spurious IRQ caused by
390                  * bus asynchronicity then don't eventually trigger an error,
391                  * otherwise the counter becomes a doomsday timer for otherwise
392                  * working systems
393                  */
394                 if (time_after(jiffies, desc->last_unhandled + HZ/10))
395                         desc->irqs_unhandled = 1;
396                 else
397                         desc->irqs_unhandled++;
398                 desc->last_unhandled = jiffies;
399         }
400
401         irq = irq_desc_get_irq(desc);
402         if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
403                 int ok = misrouted_irq(irq);
404                 if (action_ret == IRQ_NONE)
405                         desc->irqs_unhandled -= ok;
406         }
407
408         desc->irq_count++;
409         if (likely(desc->irq_count < 100000))
410                 return;
411
412         desc->irq_count = 0;
413         if (unlikely(desc->irqs_unhandled > 99900)) {
414                 /*
415                  * The interrupt is stuck
416                  */
417                 __report_bad_irq(desc, action_ret);
418                 /*
419                  * Now kill the IRQ
420                  */
421                 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
422                 desc->istate |= IRQS_SPURIOUS_DISABLED;
423                 desc->depth++;
424                 irq_disable(desc);
425
426                 mod_timer(&poll_spurious_irq_timer,
427                           jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
428         }
429         desc->irqs_unhandled = 0;
430 }
431
432 bool noirqdebug __read_mostly;
433
434 int noirqdebug_setup(char *str)
435 {
436         noirqdebug = 1;
437         printk(KERN_INFO "IRQ lockup detection disabled\n");
438
439         return 1;
440 }
441
442 __setup("noirqdebug", noirqdebug_setup);
443 module_param(noirqdebug, bool, 0644);
444 MODULE_PARM_DESC(noirqdebug, "Disable irq lockup detection when true");
445
446 static int __init irqfixup_setup(char *str)
447 {
448         irqfixup = 1;
449         printk(KERN_WARNING "Misrouted IRQ fixup support enabled.\n");
450         printk(KERN_WARNING "This may impact system performance.\n");
451
452         return 1;
453 }
454
455 __setup("irqfixup", irqfixup_setup);
456 module_param(irqfixup, int, 0644);
457
458 static int __init irqpoll_setup(char *str)
459 {
460         irqfixup = 2;
461         printk(KERN_WARNING "Misrouted IRQ fixup and polling support "
462                                 "enabled\n");
463         printk(KERN_WARNING "This may significantly impact system "
464                                 "performance\n");
465         return 1;
466 }
467
468 __setup("irqpoll", irqpoll_setup);