x86: add debug info for 32bit sparse_irq
[sfrench/cifs-2.6.git] / arch / x86 / kernel / irq_32.c
1 /*
2  *      Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
3  *
4  * This file contains the lowest level x86-specific interrupt
5  * entry, irq-stacks and irq statistics code. All the remaining
6  * irq logic is done by the generic kernel/irq/ code and
7  * by the x86-specific irq controller code. (e.g. i8259.c and
8  * io_apic.c.)
9  */
10
11 #include <linux/module.h>
12 #include <linux/seq_file.h>
13 #include <linux/interrupt.h>
14 #include <linux/kernel_stat.h>
15 #include <linux/notifier.h>
16 #include <linux/cpu.h>
17 #include <linux/delay.h>
18
19 #include <asm/apic.h>
20 #include <asm/uaccess.h>
21
22 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
23 EXPORT_PER_CPU_SYMBOL(irq_stat);
24
25 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
26 EXPORT_PER_CPU_SYMBOL(irq_regs);
27
28 /*
29  * 'what should we do if we get a hw irq event on an illegal vector'.
30  * each architecture has to answer this themselves.
31  */
32 void ack_bad_irq(unsigned int irq)
33 {
34         printk(KERN_ERR "unexpected IRQ trap at vector %02x\n", irq);
35
36 #ifdef CONFIG_X86_LOCAL_APIC
37         /*
38          * Currently unexpected vectors happen only on SMP and APIC.
39          * We _must_ ack these because every local APIC has only N
40          * irq slots per priority level, and a 'hanging, unacked' IRQ
41          * holds up an irq slot - in excessive cases (when multiple
42          * unexpected vectors occur) that might lock up the APIC
43          * completely.
44          * But only ack when the APIC is enabled -AK
45          */
46         if (cpu_has_apic)
47                 ack_APIC_irq();
48 #endif
49 }
50
51 #ifdef CONFIG_DEBUG_STACKOVERFLOW
52 /* Debugging check for stack overflow: is there less than 1KB free? */
53 static int check_stack_overflow(void)
54 {
55         long sp;
56
57         __asm__ __volatile__("andl %%esp,%0" :
58                              "=r" (sp) : "0" (THREAD_SIZE - 1));
59
60         return sp < (sizeof(struct thread_info) + STACK_WARN);
61 }
62
63 static void print_stack_overflow(void)
64 {
65         printk(KERN_WARNING "low stack detected by irq handler\n");
66         dump_stack();
67 }
68
69 #else
70 static inline int check_stack_overflow(void) { return 0; }
71 static inline void print_stack_overflow(void) { }
72 #endif
73
74 #ifdef CONFIG_4KSTACKS
75 /*
76  * per-CPU IRQ handling contexts (thread information and stack)
77  */
78 union irq_ctx {
79         struct thread_info      tinfo;
80         u32                     stack[THREAD_SIZE/sizeof(u32)];
81 };
82
83 static union irq_ctx *hardirq_ctx[NR_CPUS] __read_mostly;
84 static union irq_ctx *softirq_ctx[NR_CPUS] __read_mostly;
85
86 static char softirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
87 static char hardirq_stack[NR_CPUS * THREAD_SIZE] __page_aligned_bss;
88
89 static void call_on_stack(void *func, void *stack)
90 {
91         asm volatile("xchgl     %%ebx,%%esp     \n"
92                      "call      *%%edi          \n"
93                      "movl      %%ebx,%%esp     \n"
94                      : "=b" (stack)
95                      : "0" (stack),
96                        "D"(func)
97                      : "memory", "cc", "edx", "ecx", "eax");
98 }
99
100 static inline int
101 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
102 {
103         union irq_ctx *curctx, *irqctx;
104         u32 *isp, arg1, arg2;
105
106         curctx = (union irq_ctx *) current_thread_info();
107         irqctx = hardirq_ctx[smp_processor_id()];
108
109         /*
110          * this is where we switch to the IRQ stack. However, if we are
111          * already using the IRQ stack (because we interrupted a hardirq
112          * handler) we can't do that and just have to keep using the
113          * current stack (which is the irq stack already after all)
114          */
115         if (unlikely(curctx == irqctx))
116                 return 0;
117
118         /* build the stack frame on the IRQ stack */
119         isp = (u32 *) ((char*)irqctx + sizeof(*irqctx));
120         irqctx->tinfo.task = curctx->tinfo.task;
121         irqctx->tinfo.previous_esp = current_stack_pointer;
122
123         /*
124          * Copy the softirq bits in preempt_count so that the
125          * softirq checks work in the hardirq context.
126          */
127         irqctx->tinfo.preempt_count =
128                 (irqctx->tinfo.preempt_count & ~SOFTIRQ_MASK) |
129                 (curctx->tinfo.preempt_count & SOFTIRQ_MASK);
130
131         if (unlikely(overflow))
132                 call_on_stack(print_stack_overflow, isp);
133
134         asm volatile("xchgl     %%ebx,%%esp     \n"
135                      "call      *%%edi          \n"
136                      "movl      %%ebx,%%esp     \n"
137                      : "=a" (arg1), "=d" (arg2), "=b" (isp)
138                      :  "0" (irq),   "1" (desc),  "2" (isp),
139                         "D" (desc->handle_irq)
140                      : "memory", "cc", "ecx");
141         return 1;
142 }
143
144 /*
145  * allocate per-cpu stacks for hardirq and for softirq processing
146  */
147 void __cpuinit irq_ctx_init(int cpu)
148 {
149         union irq_ctx *irqctx;
150
151         if (hardirq_ctx[cpu])
152                 return;
153
154         irqctx = (union irq_ctx*) &hardirq_stack[cpu*THREAD_SIZE];
155         irqctx->tinfo.task              = NULL;
156         irqctx->tinfo.exec_domain       = NULL;
157         irqctx->tinfo.cpu               = cpu;
158         irqctx->tinfo.preempt_count     = HARDIRQ_OFFSET;
159         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
160
161         hardirq_ctx[cpu] = irqctx;
162
163         irqctx = (union irq_ctx*) &softirq_stack[cpu*THREAD_SIZE];
164         irqctx->tinfo.task              = NULL;
165         irqctx->tinfo.exec_domain       = NULL;
166         irqctx->tinfo.cpu               = cpu;
167         irqctx->tinfo.preempt_count     = 0;
168         irqctx->tinfo.addr_limit        = MAKE_MM_SEG(0);
169
170         softirq_ctx[cpu] = irqctx;
171
172         printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
173                cpu,hardirq_ctx[cpu],softirq_ctx[cpu]);
174 }
175
176 void irq_ctx_exit(int cpu)
177 {
178         hardirq_ctx[cpu] = NULL;
179 }
180
181 asmlinkage void do_softirq(void)
182 {
183         unsigned long flags;
184         struct thread_info *curctx;
185         union irq_ctx *irqctx;
186         u32 *isp;
187
188         if (in_interrupt())
189                 return;
190
191         local_irq_save(flags);
192
193         if (local_softirq_pending()) {
194                 curctx = current_thread_info();
195                 irqctx = softirq_ctx[smp_processor_id()];
196                 irqctx->tinfo.task = curctx->task;
197                 irqctx->tinfo.previous_esp = current_stack_pointer;
198
199                 /* build the stack frame on the softirq stack */
200                 isp = (u32*) ((char*)irqctx + sizeof(*irqctx));
201
202                 call_on_stack(__do_softirq, isp);
203                 /*
204                  * Shouldnt happen, we returned above if in_interrupt():
205                  */
206                 WARN_ON_ONCE(softirq_count());
207         }
208
209         local_irq_restore(flags);
210 }
211
212 #else
213 static inline int
214 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq) { return 0; }
215 #endif
216
217 /*
218  * do_IRQ handles all normal device IRQ's (the special
219  * SMP cross-CPU interrupts have their own specific
220  * handlers).
221  */
222 unsigned int do_IRQ(struct pt_regs *regs)
223 {
224         struct pt_regs *old_regs;
225         /* high bit used in ret_from_ code */
226         int overflow;
227         unsigned vector = ~regs->orig_ax;
228         struct irq_desc *desc;
229         unsigned irq;
230
231
232         old_regs = set_irq_regs(regs);
233         irq_enter();
234         irq = __get_cpu_var(vector_irq)[vector];
235
236         overflow = check_stack_overflow();
237
238         desc = irq_to_desc(irq);
239         if (unlikely(!desc)) {
240                 printk(KERN_EMERG "%s: cannot handle IRQ %d vector %#x cpu %d\n",
241                                         __func__, irq, vector, smp_processor_id());
242                 BUG();
243         }
244
245         if (!execute_on_irq_stack(overflow, desc, irq)) {
246                 if (unlikely(overflow))
247                         print_stack_overflow();
248                 desc->handle_irq(irq, desc);
249         }
250
251         irq_exit();
252         set_irq_regs(old_regs);
253         return 1;
254 }
255
256 /*
257  * Interrupt statistics:
258  */
259
260 atomic_t irq_err_count;
261
262 /*
263  * /proc/interrupts printing:
264  */
265
266 int show_interrupts(struct seq_file *p, void *v)
267 {
268         int i = *(loff_t *) v, j;
269         struct irqaction * action;
270         unsigned long flags;
271         unsigned int entries;
272         struct irq_desc *desc;
273         int tail = 0;
274
275 #ifdef CONFIG_HAVE_SPARSE_IRQ
276         desc = (struct irq_desc *)v;
277         entries = -1U;
278         i = desc->irq;
279         if (!desc->next)
280                 tail = 1;
281 #else
282         entries = nr_irqs - 1;
283         i = *(loff_t *) v;
284         if (i == nr_irqs)
285                 tail = 1;
286         else
287                 desc = irq_to_desc(i);
288 #endif
289
290         if (i == 0) {
291                 seq_printf(p, "           ");
292                 for_each_online_cpu(j)
293                         seq_printf(p, "CPU%-8d",j);
294                 seq_putc(p, '\n');
295         }
296
297         if (i <= entries) {
298                 unsigned any_count = 0;
299
300                 spin_lock_irqsave(&desc->lock, flags);
301 #ifndef CONFIG_SMP
302                 any_count = kstat_irqs(i);
303 #else
304                 for_each_online_cpu(j)
305                         any_count |= kstat_irqs_cpu(i, j);
306 #endif
307                 action = desc->action;
308                 if (!action && !any_count)
309                         goto skip;
310                 seq_printf(p, "%#x: ",i);
311 #ifndef CONFIG_SMP
312                 seq_printf(p, "%10u ", kstat_irqs(i));
313 #else
314                 for_each_online_cpu(j)
315                         seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
316 #endif
317                 seq_printf(p, " %8s", desc->chip->name);
318                 seq_printf(p, "-%-8s", desc->name);
319
320                 if (action) {
321                         seq_printf(p, "  %s", action->name);
322                         while ((action = action->next) != NULL)
323                                 seq_printf(p, ", %s", action->name);
324                 }
325
326                 seq_putc(p, '\n');
327 skip:
328                 spin_unlock_irqrestore(&desc->lock, flags);
329         }
330
331         if (tail) {
332                 seq_printf(p, "NMI: ");
333                 for_each_online_cpu(j)
334                         seq_printf(p, "%10u ", nmi_count(j));
335                 seq_printf(p, "  Non-maskable interrupts\n");
336 #ifdef CONFIG_X86_LOCAL_APIC
337                 seq_printf(p, "LOC: ");
338                 for_each_online_cpu(j)
339                         seq_printf(p, "%10u ",
340                                 per_cpu(irq_stat,j).apic_timer_irqs);
341                 seq_printf(p, "  Local timer interrupts\n");
342 #endif
343 #ifdef CONFIG_SMP
344                 seq_printf(p, "RES: ");
345                 for_each_online_cpu(j)
346                         seq_printf(p, "%10u ",
347                                 per_cpu(irq_stat,j).irq_resched_count);
348                 seq_printf(p, "  Rescheduling interrupts\n");
349                 seq_printf(p, "CAL: ");
350                 for_each_online_cpu(j)
351                         seq_printf(p, "%10u ",
352                                 per_cpu(irq_stat,j).irq_call_count);
353                 seq_printf(p, "  Function call interrupts\n");
354                 seq_printf(p, "TLB: ");
355                 for_each_online_cpu(j)
356                         seq_printf(p, "%10u ",
357                                 per_cpu(irq_stat,j).irq_tlb_count);
358                 seq_printf(p, "  TLB shootdowns\n");
359 #endif
360 #ifdef CONFIG_X86_MCE
361                 seq_printf(p, "TRM: ");
362                 for_each_online_cpu(j)
363                         seq_printf(p, "%10u ",
364                                 per_cpu(irq_stat,j).irq_thermal_count);
365                 seq_printf(p, "  Thermal event interrupts\n");
366 #endif
367 #ifdef CONFIG_X86_LOCAL_APIC
368                 seq_printf(p, "SPU: ");
369                 for_each_online_cpu(j)
370                         seq_printf(p, "%10u ",
371                                 per_cpu(irq_stat,j).irq_spurious_count);
372                 seq_printf(p, "  Spurious interrupts\n");
373 #endif
374                 seq_printf(p, "ERR: %10u\n", atomic_read(&irq_err_count));
375 #if defined(CONFIG_X86_IO_APIC)
376                 seq_printf(p, "MIS: %10u\n", atomic_read(&irq_mis_count));
377 #endif
378         }
379         return 0;
380 }
381
382 /*
383  * /proc/stat helpers
384  */
385 u64 arch_irq_stat_cpu(unsigned int cpu)
386 {
387         u64 sum = nmi_count(cpu);
388
389 #ifdef CONFIG_X86_LOCAL_APIC
390         sum += per_cpu(irq_stat, cpu).apic_timer_irqs;
391 #endif
392 #ifdef CONFIG_SMP
393         sum += per_cpu(irq_stat, cpu).irq_resched_count;
394         sum += per_cpu(irq_stat, cpu).irq_call_count;
395         sum += per_cpu(irq_stat, cpu).irq_tlb_count;
396 #endif
397 #ifdef CONFIG_X86_MCE
398         sum += per_cpu(irq_stat, cpu).irq_thermal_count;
399 #endif
400 #ifdef CONFIG_X86_LOCAL_APIC
401         sum += per_cpu(irq_stat, cpu).irq_spurious_count;
402 #endif
403         return sum;
404 }
405
406 u64 arch_irq_stat(void)
407 {
408         u64 sum = atomic_read(&irq_err_count);
409
410 #ifdef CONFIG_X86_IO_APIC
411         sum += atomic_read(&irq_mis_count);
412 #endif
413         return sum;
414 }
415
416 #ifdef CONFIG_HOTPLUG_CPU
417 #include <mach_apic.h>
418
419 void fixup_irqs(cpumask_t map)
420 {
421         unsigned int irq;
422         static int warned;
423         struct irq_desc *desc;
424
425         for_each_irq_desc(irq, desc) {
426                 cpumask_t mask;
427
428                 if (irq == 2)
429                         continue;
430
431                 cpus_and(mask, desc->affinity, map);
432                 if (any_online_cpu(mask) == NR_CPUS) {
433                         printk("Breaking affinity for irq %i\n", irq);
434                         mask = map;
435                 }
436                 if (desc->chip->set_affinity)
437                         desc->chip->set_affinity(irq, mask);
438                 else if (desc->action && !(warned++))
439                         printk("Cannot set affinity for irq %i\n", irq);
440         }
441
442 #if 0
443         barrier();
444         /* Ingo Molnar says: "after the IO-APIC masks have been redirected
445            [note the nop - the interrupt-enable boundary on x86 is two
446            instructions from sti] - to flush out pending hardirqs and
447            IPIs. After this point nothing is supposed to reach this CPU." */
448         __asm__ __volatile__("sti; nop; cli");
449         barrier();
450 #else
451         /* That doesn't seem sufficient.  Give it 1ms. */
452         local_irq_enable();
453         mdelay(1);
454         local_irq_disable();
455 #endif
456 }
457 #endif
458