64f6f2031c226901dc54105c7a0f433566e2846d
[sfrench/cifs-2.6.git] / arch / powerpc / kernel / irq.c
1 /*
2  *  Derived from arch/i386/kernel/irq.c
3  *    Copyright (C) 1992 Linus Torvalds
4  *  Adapted from arch/i386 by Gary Thomas
5  *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
6  *  Updated and modified by Cort Dougan <cort@fsmlabs.com>
7  *    Copyright (C) 1996-2001 Cort Dougan
8  *  Adapted for Power Macintosh by Paul Mackerras
9  *    Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au)
10  *
11  * This program is free software; you can redistribute it and/or
12  * modify it under the terms of the GNU General Public License
13  * as published by the Free Software Foundation; either version
14  * 2 of the License, or (at your option) any later version.
15  *
16  * This file contains the code used by various IRQ handling routines:
17  * asking for different IRQ's should be done through these routines
18  * instead of just grabbing them. Thus setups with different IRQ numbers
19  * shouldn't result in any weird surprises, and installing new handlers
20  * should be easier.
21  *
22  * The MPC8xx has an interrupt mask in the SIU.  If a bit is set, the
23  * interrupt is _enabled_.  As expected, IRQ0 is bit 0 in the 32-bit
24  * mask register (of which only 16 are defined), hence the weird shifting
25  * and complement of the cached_irq_mask.  I want to be able to stuff
26  * this right into the SIU SMASK register.
27  * Many of the prep/chrp functions are conditional compiled on CONFIG_8xx
28  * to reduce code space and undefined function references.
29  */
30
31 #undef DEBUG
32
33 #include <linux/module.h>
34 #include <linux/threads.h>
35 #include <linux/kernel_stat.h>
36 #include <linux/signal.h>
37 #include <linux/sched.h>
38 #include <linux/ptrace.h>
39 #include <linux/ioport.h>
40 #include <linux/interrupt.h>
41 #include <linux/timex.h>
42 #include <linux/init.h>
43 #include <linux/slab.h>
44 #include <linux/delay.h>
45 #include <linux/irq.h>
46 #include <linux/seq_file.h>
47 #include <linux/cpumask.h>
48 #include <linux/profile.h>
49 #include <linux/bitops.h>
50 #include <linux/list.h>
51 #include <linux/radix-tree.h>
52 #include <linux/mutex.h>
53 #include <linux/bootmem.h>
54 #include <linux/pci.h>
55 #include <linux/debugfs.h>
56 #include <linux/perf_event.h>
57
58 #include <asm/uaccess.h>
59 #include <asm/system.h>
60 #include <asm/io.h>
61 #include <asm/pgtable.h>
62 #include <asm/irq.h>
63 #include <asm/cache.h>
64 #include <asm/prom.h>
65 #include <asm/ptrace.h>
66 #include <asm/machdep.h>
67 #include <asm/udbg.h>
68 #ifdef CONFIG_PPC64
69 #include <asm/paca.h>
70 #include <asm/firmware.h>
71 #include <asm/lv1call.h>
72 #endif
73 #define CREATE_TRACE_POINTS
74 #include <asm/trace.h>
75
76 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
77 EXPORT_PER_CPU_SYMBOL(irq_stat);
78
79 int __irq_offset_value;
80
81 #ifdef CONFIG_PPC32
82 EXPORT_SYMBOL(__irq_offset_value);
83 atomic_t ppc_n_lost_interrupts;
84
85 #ifdef CONFIG_TAU_INT
86 extern int tau_initialized;
87 extern int tau_interrupts(int);
88 #endif
89 #endif /* CONFIG_PPC32 */
90
91 #ifdef CONFIG_PPC64
92
93 #ifndef CONFIG_SPARSE_IRQ
94 EXPORT_SYMBOL(irq_desc);
95 #endif
96
97 int distribute_irqs = 1;
98
99 static inline notrace unsigned long get_hard_enabled(void)
100 {
101         unsigned long enabled;
102
103         __asm__ __volatile__("lbz %0,%1(13)"
104         : "=r" (enabled) : "i" (offsetof(struct paca_struct, hard_enabled)));
105
106         return enabled;
107 }
108
109 static inline notrace void set_soft_enabled(unsigned long enable)
110 {
111         __asm__ __volatile__("stb %0,%1(13)"
112         : : "r" (enable), "i" (offsetof(struct paca_struct, soft_enabled)));
113 }
114
115 notrace void raw_local_irq_restore(unsigned long en)
116 {
117         /*
118          * get_paca()->soft_enabled = en;
119          * Is it ever valid to use local_irq_restore(0) when soft_enabled is 1?
120          * That was allowed before, and in such a case we do need to take care
121          * that gcc will set soft_enabled directly via r13, not choose to use
122          * an intermediate register, lest we're preempted to a different cpu.
123          */
124         set_soft_enabled(en);
125         if (!en)
126                 return;
127
128 #ifdef CONFIG_PPC_STD_MMU_64
129         if (firmware_has_feature(FW_FEATURE_ISERIES)) {
130                 /*
131                  * Do we need to disable preemption here?  Not really: in the
132                  * unlikely event that we're preempted to a different cpu in
133                  * between getting r13, loading its lppaca_ptr, and loading
134                  * its any_int, we might call iseries_handle_interrupts without
135                  * an interrupt pending on the new cpu, but that's no disaster,
136                  * is it?  And the business of preempting us off the old cpu
137                  * would itself involve a local_irq_restore which handles the
138                  * interrupt to that cpu.
139                  *
140                  * But use "local_paca->lppaca_ptr" instead of "get_lppaca()"
141                  * to avoid any preemption checking added into get_paca().
142                  */
143                 if (local_paca->lppaca_ptr->int_dword.any_int)
144                         iseries_handle_interrupts();
145         }
146 #endif /* CONFIG_PPC_STD_MMU_64 */
147
148         if (test_perf_event_pending()) {
149                 clear_perf_event_pending();
150                 perf_event_do_pending();
151         }
152
153         /*
154          * if (get_paca()->hard_enabled) return;
155          * But again we need to take care that gcc gets hard_enabled directly
156          * via r13, not choose to use an intermediate register, lest we're
157          * preempted to a different cpu in between the two instructions.
158          */
159         if (get_hard_enabled())
160                 return;
161
162         /*
163          * Need to hard-enable interrupts here.  Since currently disabled,
164          * no need to take further asm precautions against preemption; but
165          * use local_paca instead of get_paca() to avoid preemption checking.
166          */
167         local_paca->hard_enabled = en;
168         if ((int)mfspr(SPRN_DEC) < 0)
169                 mtspr(SPRN_DEC, 1);
170
171         /*
172          * Force the delivery of pending soft-disabled interrupts on PS3.
173          * Any HV call will have this side effect.
174          */
175         if (firmware_has_feature(FW_FEATURE_PS3_LV1)) {
176                 u64 tmp;
177                 lv1_get_version_info(&tmp);
178         }
179
180         __hard_irq_enable();
181 }
182 EXPORT_SYMBOL(raw_local_irq_restore);
183 #endif /* CONFIG_PPC64 */
184
185 static int show_other_interrupts(struct seq_file *p, int prec)
186 {
187         int j;
188
189 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT)
190         if (tau_initialized) {
191                 seq_printf(p, "%*s: ", prec, "TAU");
192                 for_each_online_cpu(j)
193                         seq_printf(p, "%10u ", tau_interrupts(j));
194                 seq_puts(p, "  PowerPC             Thermal Assist (cpu temp)\n");
195         }
196 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */
197
198         seq_printf(p, "%*s: ", prec, "LOC");
199         for_each_online_cpu(j)
200                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs);
201         seq_printf(p, "  Local timer interrupts\n");
202
203         seq_printf(p, "%*s: ", prec, "SPU");
204         for_each_online_cpu(j)
205                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs);
206         seq_printf(p, "  Spurious interrupts\n");
207
208         seq_printf(p, "%*s: ", prec, "CNT");
209         for_each_online_cpu(j)
210                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs);
211         seq_printf(p, "  Performance monitoring interrupts\n");
212
213         seq_printf(p, "%*s: ", prec, "MCE");
214         for_each_online_cpu(j)
215                 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions);
216         seq_printf(p, "  Machine check exceptions\n");
217
218         return 0;
219 }
220
221 int show_interrupts(struct seq_file *p, void *v)
222 {
223         unsigned long flags, any_count = 0;
224         int i = *(loff_t *) v, j, prec;
225         struct irqaction *action;
226         struct irq_desc *desc;
227
228         if (i > nr_irqs)
229                 return 0;
230
231         for (prec = 3, j = 1000; prec < 10 && j <= nr_irqs; ++prec)
232                 j *= 10;
233
234         if (i == nr_irqs)
235                 return show_other_interrupts(p, prec);
236
237         /* print header */
238         if (i == 0) {
239                 seq_printf(p, "%*s", prec + 8, "");
240                 for_each_online_cpu(j)
241                         seq_printf(p, "CPU%-8d", j);
242                 seq_putc(p, '\n');
243         }
244
245         desc = irq_to_desc(i);
246         if (!desc)
247                 return 0;
248
249         raw_spin_lock_irqsave(&desc->lock, flags);
250         for_each_online_cpu(j)
251                 any_count |= kstat_irqs_cpu(i, j);
252         action = desc->action;
253         if (!action && !any_count)
254                 goto out;
255
256         seq_printf(p, "%*d: ", prec, i);
257         for_each_online_cpu(j)
258                 seq_printf(p, "%10u ", kstat_irqs_cpu(i, j));
259
260         if (desc->chip)
261                 seq_printf(p, "  %-16s", desc->chip->name);
262         else
263                 seq_printf(p, "  %-16s", "None");
264         seq_printf(p, " %-8s", (desc->status & IRQ_LEVEL) ? "Level" : "Edge");
265
266         if (action) {
267                 seq_printf(p, "     %s", action->name);
268                 while ((action = action->next) != NULL)
269                         seq_printf(p, ", %s", action->name);
270         }
271
272         seq_putc(p, '\n');
273 out:
274         raw_spin_unlock_irqrestore(&desc->lock, flags);
275         return 0;
276 }
277
278 /*
279  * /proc/stat helpers
280  */
281 u64 arch_irq_stat_cpu(unsigned int cpu)
282 {
283         u64 sum = per_cpu(irq_stat, cpu).timer_irqs;
284
285         sum += per_cpu(irq_stat, cpu).pmu_irqs;
286         sum += per_cpu(irq_stat, cpu).mce_exceptions;
287         sum += per_cpu(irq_stat, cpu).spurious_irqs;
288
289         return sum;
290 }
291
292 #ifdef CONFIG_HOTPLUG_CPU
293 void fixup_irqs(cpumask_t map)
294 {
295         struct irq_desc *desc;
296         unsigned int irq;
297         static int warned;
298
299         for_each_irq(irq) {
300                 cpumask_t mask;
301
302                 desc = irq_to_desc(irq);
303                 if (desc && desc->status & IRQ_PER_CPU)
304                         continue;
305
306                 cpumask_and(&mask, desc->affinity, &map);
307                 if (any_online_cpu(mask) == NR_CPUS) {
308                         printk("Breaking affinity for irq %i\n", irq);
309                         mask = map;
310                 }
311                 if (desc->chip->set_affinity)
312                         desc->chip->set_affinity(irq, &mask);
313                 else if (desc->action && !(warned++))
314                         printk("Cannot set affinity for irq %i\n", irq);
315         }
316
317         local_irq_enable();
318         mdelay(1);
319         local_irq_disable();
320 }
321 #endif
322
323 #ifdef CONFIG_IRQSTACKS
324 static inline void handle_one_irq(unsigned int irq)
325 {
326         struct thread_info *curtp, *irqtp;
327         unsigned long saved_sp_limit;
328         struct irq_desc *desc;
329
330         /* Switch to the irq stack to handle this */
331         curtp = current_thread_info();
332         irqtp = hardirq_ctx[smp_processor_id()];
333
334         if (curtp == irqtp) {
335                 /* We're already on the irq stack, just handle it */
336                 generic_handle_irq(irq);
337                 return;
338         }
339
340         desc = irq_to_desc(irq);
341         saved_sp_limit = current->thread.ksp_limit;
342
343         irqtp->task = curtp->task;
344         irqtp->flags = 0;
345
346         /* Copy the softirq bits in preempt_count so that the
347          * softirq checks work in the hardirq context. */
348         irqtp->preempt_count = (irqtp->preempt_count & ~SOFTIRQ_MASK) |
349                                (curtp->preempt_count & SOFTIRQ_MASK);
350
351         current->thread.ksp_limit = (unsigned long)irqtp +
352                 _ALIGN_UP(sizeof(struct thread_info), 16);
353
354         call_handle_irq(irq, desc, irqtp, desc->handle_irq);
355         current->thread.ksp_limit = saved_sp_limit;
356         irqtp->task = NULL;
357
358         /* Set any flag that may have been set on the
359          * alternate stack
360          */
361         if (irqtp->flags)
362                 set_bits(irqtp->flags, &curtp->flags);
363 }
364 #else
365 static inline void handle_one_irq(unsigned int irq)
366 {
367         generic_handle_irq(irq);
368 }
369 #endif
370
371 static inline void check_stack_overflow(void)
372 {
373 #ifdef CONFIG_DEBUG_STACKOVERFLOW
374         long sp;
375
376         sp = __get_SP() & (THREAD_SIZE-1);
377
378         /* check for stack overflow: is there less than 2KB free? */
379         if (unlikely(sp < (sizeof(struct thread_info) + 2048))) {
380                 printk("do_IRQ: stack overflow: %ld\n",
381                         sp - sizeof(struct thread_info));
382                 dump_stack();
383         }
384 #endif
385 }
386
387 void do_IRQ(struct pt_regs *regs)
388 {
389         struct pt_regs *old_regs = set_irq_regs(regs);
390         unsigned int irq;
391
392         trace_irq_entry(regs);
393
394         irq_enter();
395
396         check_stack_overflow();
397
398         irq = ppc_md.get_irq();
399
400         if (irq != NO_IRQ && irq != NO_IRQ_IGNORE)
401                 handle_one_irq(irq);
402         else if (irq != NO_IRQ_IGNORE)
403                 __get_cpu_var(irq_stat).spurious_irqs++;
404
405         irq_exit();
406         set_irq_regs(old_regs);
407
408 #ifdef CONFIG_PPC_ISERIES
409         if (firmware_has_feature(FW_FEATURE_ISERIES) &&
410                         get_lppaca()->int_dword.fields.decr_int) {
411                 get_lppaca()->int_dword.fields.decr_int = 0;
412                 /* Signal a fake decrementer interrupt */
413                 timer_interrupt(regs);
414         }
415 #endif
416
417         trace_irq_exit(regs);
418 }
419
420 void __init init_IRQ(void)
421 {
422         if (ppc_md.init_IRQ)
423                 ppc_md.init_IRQ();
424
425         exc_lvl_ctx_init();
426
427         irq_ctx_init();
428 }
429
430 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x)
431 struct thread_info   *critirq_ctx[NR_CPUS] __read_mostly;
432 struct thread_info    *dbgirq_ctx[NR_CPUS] __read_mostly;
433 struct thread_info *mcheckirq_ctx[NR_CPUS] __read_mostly;
434
435 void exc_lvl_ctx_init(void)
436 {
437         struct thread_info *tp;
438         int i;
439
440         for_each_possible_cpu(i) {
441                 memset((void *)critirq_ctx[i], 0, THREAD_SIZE);
442                 tp = critirq_ctx[i];
443                 tp->cpu = i;
444                 tp->preempt_count = 0;
445
446 #ifdef CONFIG_BOOKE
447                 memset((void *)dbgirq_ctx[i], 0, THREAD_SIZE);
448                 tp = dbgirq_ctx[i];
449                 tp->cpu = i;
450                 tp->preempt_count = 0;
451
452                 memset((void *)mcheckirq_ctx[i], 0, THREAD_SIZE);
453                 tp = mcheckirq_ctx[i];
454                 tp->cpu = i;
455                 tp->preempt_count = HARDIRQ_OFFSET;
456 #endif
457         }
458 }
459 #endif
460
461 #ifdef CONFIG_IRQSTACKS
462 struct thread_info *softirq_ctx[NR_CPUS] __read_mostly;
463 struct thread_info *hardirq_ctx[NR_CPUS] __read_mostly;
464
465 void irq_ctx_init(void)
466 {
467         struct thread_info *tp;
468         int i;
469
470         for_each_possible_cpu(i) {
471                 memset((void *)softirq_ctx[i], 0, THREAD_SIZE);
472                 tp = softirq_ctx[i];
473                 tp->cpu = i;
474                 tp->preempt_count = 0;
475
476                 memset((void *)hardirq_ctx[i], 0, THREAD_SIZE);
477                 tp = hardirq_ctx[i];
478                 tp->cpu = i;
479                 tp->preempt_count = HARDIRQ_OFFSET;
480         }
481 }
482
483 static inline void do_softirq_onstack(void)
484 {
485         struct thread_info *curtp, *irqtp;
486         unsigned long saved_sp_limit = current->thread.ksp_limit;
487
488         curtp = current_thread_info();
489         irqtp = softirq_ctx[smp_processor_id()];
490         irqtp->task = curtp->task;
491         current->thread.ksp_limit = (unsigned long)irqtp +
492                                     _ALIGN_UP(sizeof(struct thread_info), 16);
493         call_do_softirq(irqtp);
494         current->thread.ksp_limit = saved_sp_limit;
495         irqtp->task = NULL;
496 }
497
498 #else
499 #define do_softirq_onstack()    __do_softirq()
500 #endif /* CONFIG_IRQSTACKS */
501
502 void do_softirq(void)
503 {
504         unsigned long flags;
505
506         if (in_interrupt())
507                 return;
508
509         local_irq_save(flags);
510
511         if (local_softirq_pending())
512                 do_softirq_onstack();
513
514         local_irq_restore(flags);
515 }
516
517
518 /*
519  * IRQ controller and virtual interrupts
520  */
521
522 static LIST_HEAD(irq_hosts);
523 static DEFINE_RAW_SPINLOCK(irq_big_lock);
524 static unsigned int revmap_trees_allocated;
525 static DEFINE_MUTEX(revmap_trees_mutex);
526 struct irq_map_entry irq_map[NR_IRQS];
527 static unsigned int irq_virq_count = NR_IRQS;
528 static struct irq_host *irq_default_host;
529
530 irq_hw_number_t virq_to_hw(unsigned int virq)
531 {
532         return irq_map[virq].hwirq;
533 }
534 EXPORT_SYMBOL_GPL(virq_to_hw);
535
536 static int default_irq_host_match(struct irq_host *h, struct device_node *np)
537 {
538         return h->of_node != NULL && h->of_node == np;
539 }
540
541 struct irq_host *irq_alloc_host(struct device_node *of_node,
542                                 unsigned int revmap_type,
543                                 unsigned int revmap_arg,
544                                 struct irq_host_ops *ops,
545                                 irq_hw_number_t inval_irq)
546 {
547         struct irq_host *host;
548         unsigned int size = sizeof(struct irq_host);
549         unsigned int i;
550         unsigned int *rmap;
551         unsigned long flags;
552
553         /* Allocate structure and revmap table if using linear mapping */
554         if (revmap_type == IRQ_HOST_MAP_LINEAR)
555                 size += revmap_arg * sizeof(unsigned int);
556         host = zalloc_maybe_bootmem(size, GFP_KERNEL);
557         if (host == NULL)
558                 return NULL;
559
560         /* Fill structure */
561         host->revmap_type = revmap_type;
562         host->inval_irq = inval_irq;
563         host->ops = ops;
564         host->of_node = of_node_get(of_node);
565
566         if (host->ops->match == NULL)
567                 host->ops->match = default_irq_host_match;
568
569         raw_spin_lock_irqsave(&irq_big_lock, flags);
570
571         /* If it's a legacy controller, check for duplicates and
572          * mark it as allocated (we use irq 0 host pointer for that
573          */
574         if (revmap_type == IRQ_HOST_MAP_LEGACY) {
575                 if (irq_map[0].host != NULL) {
576                         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
577                         /* If we are early boot, we can't free the structure,
578                          * too bad...
579                          * this will be fixed once slab is made available early
580                          * instead of the current cruft
581                          */
582                         if (mem_init_done)
583                                 kfree(host);
584                         return NULL;
585                 }
586                 irq_map[0].host = host;
587         }
588
589         list_add(&host->link, &irq_hosts);
590         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
591
592         /* Additional setups per revmap type */
593         switch(revmap_type) {
594         case IRQ_HOST_MAP_LEGACY:
595                 /* 0 is always the invalid number for legacy */
596                 host->inval_irq = 0;
597                 /* setup us as the host for all legacy interrupts */
598                 for (i = 1; i < NUM_ISA_INTERRUPTS; i++) {
599                         irq_map[i].hwirq = i;
600                         smp_wmb();
601                         irq_map[i].host = host;
602                         smp_wmb();
603
604                         /* Clear norequest flags */
605                         irq_to_desc(i)->status &= ~IRQ_NOREQUEST;
606
607                         /* Legacy flags are left to default at this point,
608                          * one can then use irq_create_mapping() to
609                          * explicitly change them
610                          */
611                         ops->map(host, i, i);
612                 }
613                 break;
614         case IRQ_HOST_MAP_LINEAR:
615                 rmap = (unsigned int *)(host + 1);
616                 for (i = 0; i < revmap_arg; i++)
617                         rmap[i] = NO_IRQ;
618                 host->revmap_data.linear.size = revmap_arg;
619                 smp_wmb();
620                 host->revmap_data.linear.revmap = rmap;
621                 break;
622         default:
623                 break;
624         }
625
626         pr_debug("irq: Allocated host of type %d @0x%p\n", revmap_type, host);
627
628         return host;
629 }
630
631 struct irq_host *irq_find_host(struct device_node *node)
632 {
633         struct irq_host *h, *found = NULL;
634         unsigned long flags;
635
636         /* We might want to match the legacy controller last since
637          * it might potentially be set to match all interrupts in
638          * the absence of a device node. This isn't a problem so far
639          * yet though...
640          */
641         raw_spin_lock_irqsave(&irq_big_lock, flags);
642         list_for_each_entry(h, &irq_hosts, link)
643                 if (h->ops->match(h, node)) {
644                         found = h;
645                         break;
646                 }
647         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
648         return found;
649 }
650 EXPORT_SYMBOL_GPL(irq_find_host);
651
652 void irq_set_default_host(struct irq_host *host)
653 {
654         pr_debug("irq: Default host set to @0x%p\n", host);
655
656         irq_default_host = host;
657 }
658
659 void irq_set_virq_count(unsigned int count)
660 {
661         pr_debug("irq: Trying to set virq count to %d\n", count);
662
663         BUG_ON(count < NUM_ISA_INTERRUPTS);
664         if (count < NR_IRQS)
665                 irq_virq_count = count;
666 }
667
668 static int irq_setup_virq(struct irq_host *host, unsigned int virq,
669                             irq_hw_number_t hwirq)
670 {
671         struct irq_desc *desc;
672
673         desc = irq_to_desc_alloc_node(virq, 0);
674         if (!desc) {
675                 pr_debug("irq: -> allocating desc failed\n");
676                 goto error;
677         }
678
679         /* Clear IRQ_NOREQUEST flag */
680         desc->status &= ~IRQ_NOREQUEST;
681
682         /* map it */
683         smp_wmb();
684         irq_map[virq].hwirq = hwirq;
685         smp_mb();
686
687         if (host->ops->map(host, virq, hwirq)) {
688                 pr_debug("irq: -> mapping failed, freeing\n");
689                 goto error;
690         }
691
692         return 0;
693
694 error:
695         irq_free_virt(virq, 1);
696         return -1;
697 }
698
699 unsigned int irq_create_direct_mapping(struct irq_host *host)
700 {
701         unsigned int virq;
702
703         if (host == NULL)
704                 host = irq_default_host;
705
706         BUG_ON(host == NULL);
707         WARN_ON(host->revmap_type != IRQ_HOST_MAP_NOMAP);
708
709         virq = irq_alloc_virt(host, 1, 0);
710         if (virq == NO_IRQ) {
711                 pr_debug("irq: create_direct virq allocation failed\n");
712                 return NO_IRQ;
713         }
714
715         pr_debug("irq: create_direct obtained virq %d\n", virq);
716
717         if (irq_setup_virq(host, virq, virq))
718                 return NO_IRQ;
719
720         return virq;
721 }
722
723 unsigned int irq_create_mapping(struct irq_host *host,
724                                 irq_hw_number_t hwirq)
725 {
726         unsigned int virq, hint;
727
728         pr_debug("irq: irq_create_mapping(0x%p, 0x%lx)\n", host, hwirq);
729
730         /* Look for default host if nececssary */
731         if (host == NULL)
732                 host = irq_default_host;
733         if (host == NULL) {
734                 printk(KERN_WARNING "irq_create_mapping called for"
735                        " NULL host, hwirq=%lx\n", hwirq);
736                 WARN_ON(1);
737                 return NO_IRQ;
738         }
739         pr_debug("irq: -> using host @%p\n", host);
740
741         /* Check if mapping already exist, if it does, call
742          * host->ops->map() to update the flags
743          */
744         virq = irq_find_mapping(host, hwirq);
745         if (virq != NO_IRQ) {
746                 if (host->ops->remap)
747                         host->ops->remap(host, virq, hwirq);
748                 pr_debug("irq: -> existing mapping on virq %d\n", virq);
749                 return virq;
750         }
751
752         /* Get a virtual interrupt number */
753         if (host->revmap_type == IRQ_HOST_MAP_LEGACY) {
754                 /* Handle legacy */
755                 virq = (unsigned int)hwirq;
756                 if (virq == 0 || virq >= NUM_ISA_INTERRUPTS)
757                         return NO_IRQ;
758                 return virq;
759         } else {
760                 /* Allocate a virtual interrupt number */
761                 hint = hwirq % irq_virq_count;
762                 virq = irq_alloc_virt(host, 1, hint);
763                 if (virq == NO_IRQ) {
764                         pr_debug("irq: -> virq allocation failed\n");
765                         return NO_IRQ;
766                 }
767         }
768
769         if (irq_setup_virq(host, virq, hwirq))
770                 return NO_IRQ;
771
772         printk(KERN_DEBUG "irq: irq %lu on host %s mapped to virtual irq %u\n",
773                 hwirq, host->of_node ? host->of_node->full_name : "null", virq);
774
775         return virq;
776 }
777 EXPORT_SYMBOL_GPL(irq_create_mapping);
778
779 unsigned int irq_create_of_mapping(struct device_node *controller,
780                                    const u32 *intspec, unsigned int intsize)
781 {
782         struct irq_host *host;
783         irq_hw_number_t hwirq;
784         unsigned int type = IRQ_TYPE_NONE;
785         unsigned int virq;
786
787         if (controller == NULL)
788                 host = irq_default_host;
789         else
790                 host = irq_find_host(controller);
791         if (host == NULL) {
792                 printk(KERN_WARNING "irq: no irq host found for %s !\n",
793                        controller->full_name);
794                 return NO_IRQ;
795         }
796
797         /* If host has no translation, then we assume interrupt line */
798         if (host->ops->xlate == NULL)
799                 hwirq = intspec[0];
800         else {
801                 if (host->ops->xlate(host, controller, intspec, intsize,
802                                      &hwirq, &type))
803                         return NO_IRQ;
804         }
805
806         /* Create mapping */
807         virq = irq_create_mapping(host, hwirq);
808         if (virq == NO_IRQ)
809                 return virq;
810
811         /* Set type if specified and different than the current one */
812         if (type != IRQ_TYPE_NONE &&
813             type != (irq_to_desc(virq)->status & IRQF_TRIGGER_MASK))
814                 set_irq_type(virq, type);
815         return virq;
816 }
817 EXPORT_SYMBOL_GPL(irq_create_of_mapping);
818
819 unsigned int irq_of_parse_and_map(struct device_node *dev, int index)
820 {
821         struct of_irq oirq;
822
823         if (of_irq_map_one(dev, index, &oirq))
824                 return NO_IRQ;
825
826         return irq_create_of_mapping(oirq.controller, oirq.specifier,
827                                      oirq.size);
828 }
829 EXPORT_SYMBOL_GPL(irq_of_parse_and_map);
830
831 void irq_dispose_mapping(unsigned int virq)
832 {
833         struct irq_host *host;
834         irq_hw_number_t hwirq;
835
836         if (virq == NO_IRQ)
837                 return;
838
839         host = irq_map[virq].host;
840         WARN_ON (host == NULL);
841         if (host == NULL)
842                 return;
843
844         /* Never unmap legacy interrupts */
845         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
846                 return;
847
848         /* remove chip and handler */
849         set_irq_chip_and_handler(virq, NULL, NULL);
850
851         /* Make sure it's completed */
852         synchronize_irq(virq);
853
854         /* Tell the PIC about it */
855         if (host->ops->unmap)
856                 host->ops->unmap(host, virq);
857         smp_mb();
858
859         /* Clear reverse map */
860         hwirq = irq_map[virq].hwirq;
861         switch(host->revmap_type) {
862         case IRQ_HOST_MAP_LINEAR:
863                 if (hwirq < host->revmap_data.linear.size)
864                         host->revmap_data.linear.revmap[hwirq] = NO_IRQ;
865                 break;
866         case IRQ_HOST_MAP_TREE:
867                 /*
868                  * Check if radix tree allocated yet, if not then nothing to
869                  * remove.
870                  */
871                 smp_rmb();
872                 if (revmap_trees_allocated < 1)
873                         break;
874                 mutex_lock(&revmap_trees_mutex);
875                 radix_tree_delete(&host->revmap_data.tree, hwirq);
876                 mutex_unlock(&revmap_trees_mutex);
877                 break;
878         }
879
880         /* Destroy map */
881         smp_mb();
882         irq_map[virq].hwirq = host->inval_irq;
883
884         /* Set some flags */
885         irq_to_desc(virq)->status |= IRQ_NOREQUEST;
886
887         /* Free it */
888         irq_free_virt(virq, 1);
889 }
890 EXPORT_SYMBOL_GPL(irq_dispose_mapping);
891
892 unsigned int irq_find_mapping(struct irq_host *host,
893                               irq_hw_number_t hwirq)
894 {
895         unsigned int i;
896         unsigned int hint = hwirq % irq_virq_count;
897
898         /* Look for default host if nececssary */
899         if (host == NULL)
900                 host = irq_default_host;
901         if (host == NULL)
902                 return NO_IRQ;
903
904         /* legacy -> bail early */
905         if (host->revmap_type == IRQ_HOST_MAP_LEGACY)
906                 return hwirq;
907
908         /* Slow path does a linear search of the map */
909         if (hint < NUM_ISA_INTERRUPTS)
910                 hint = NUM_ISA_INTERRUPTS;
911         i = hint;
912         do  {
913                 if (irq_map[i].host == host &&
914                     irq_map[i].hwirq == hwirq)
915                         return i;
916                 i++;
917                 if (i >= irq_virq_count)
918                         i = NUM_ISA_INTERRUPTS;
919         } while(i != hint);
920         return NO_IRQ;
921 }
922 EXPORT_SYMBOL_GPL(irq_find_mapping);
923
924
925 unsigned int irq_radix_revmap_lookup(struct irq_host *host,
926                                      irq_hw_number_t hwirq)
927 {
928         struct irq_map_entry *ptr;
929         unsigned int virq;
930
931         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
932
933         /*
934          * Check if the radix tree exists and has bee initialized.
935          * If not, we fallback to slow mode
936          */
937         if (revmap_trees_allocated < 2)
938                 return irq_find_mapping(host, hwirq);
939
940         /* Now try to resolve */
941         /*
942          * No rcu_read_lock(ing) needed, the ptr returned can't go under us
943          * as it's referencing an entry in the static irq_map table.
944          */
945         ptr = radix_tree_lookup(&host->revmap_data.tree, hwirq);
946
947         /*
948          * If found in radix tree, then fine.
949          * Else fallback to linear lookup - this should not happen in practice
950          * as it means that we failed to insert the node in the radix tree.
951          */
952         if (ptr)
953                 virq = ptr - irq_map;
954         else
955                 virq = irq_find_mapping(host, hwirq);
956
957         return virq;
958 }
959
960 void irq_radix_revmap_insert(struct irq_host *host, unsigned int virq,
961                              irq_hw_number_t hwirq)
962 {
963
964         WARN_ON(host->revmap_type != IRQ_HOST_MAP_TREE);
965
966         /*
967          * Check if the radix tree exists yet.
968          * If not, then the irq will be inserted into the tree when it gets
969          * initialized.
970          */
971         smp_rmb();
972         if (revmap_trees_allocated < 1)
973                 return;
974
975         if (virq != NO_IRQ) {
976                 mutex_lock(&revmap_trees_mutex);
977                 radix_tree_insert(&host->revmap_data.tree, hwirq,
978                                   &irq_map[virq]);
979                 mutex_unlock(&revmap_trees_mutex);
980         }
981 }
982
983 unsigned int irq_linear_revmap(struct irq_host *host,
984                                irq_hw_number_t hwirq)
985 {
986         unsigned int *revmap;
987
988         WARN_ON(host->revmap_type != IRQ_HOST_MAP_LINEAR);
989
990         /* Check revmap bounds */
991         if (unlikely(hwirq >= host->revmap_data.linear.size))
992                 return irq_find_mapping(host, hwirq);
993
994         /* Check if revmap was allocated */
995         revmap = host->revmap_data.linear.revmap;
996         if (unlikely(revmap == NULL))
997                 return irq_find_mapping(host, hwirq);
998
999         /* Fill up revmap with slow path if no mapping found */
1000         if (unlikely(revmap[hwirq] == NO_IRQ))
1001                 revmap[hwirq] = irq_find_mapping(host, hwirq);
1002
1003         return revmap[hwirq];
1004 }
1005
1006 unsigned int irq_alloc_virt(struct irq_host *host,
1007                             unsigned int count,
1008                             unsigned int hint)
1009 {
1010         unsigned long flags;
1011         unsigned int i, j, found = NO_IRQ;
1012
1013         if (count == 0 || count > (irq_virq_count - NUM_ISA_INTERRUPTS))
1014                 return NO_IRQ;
1015
1016         raw_spin_lock_irqsave(&irq_big_lock, flags);
1017
1018         /* Use hint for 1 interrupt if any */
1019         if (count == 1 && hint >= NUM_ISA_INTERRUPTS &&
1020             hint < irq_virq_count && irq_map[hint].host == NULL) {
1021                 found = hint;
1022                 goto hint_found;
1023         }
1024
1025         /* Look for count consecutive numbers in the allocatable
1026          * (non-legacy) space
1027          */
1028         for (i = NUM_ISA_INTERRUPTS, j = 0; i < irq_virq_count; i++) {
1029                 if (irq_map[i].host != NULL)
1030                         j = 0;
1031                 else
1032                         j++;
1033
1034                 if (j == count) {
1035                         found = i - count + 1;
1036                         break;
1037                 }
1038         }
1039         if (found == NO_IRQ) {
1040                 raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1041                 return NO_IRQ;
1042         }
1043  hint_found:
1044         for (i = found; i < (found + count); i++) {
1045                 irq_map[i].hwirq = host->inval_irq;
1046                 smp_wmb();
1047                 irq_map[i].host = host;
1048         }
1049         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1050         return found;
1051 }
1052
1053 void irq_free_virt(unsigned int virq, unsigned int count)
1054 {
1055         unsigned long flags;
1056         unsigned int i;
1057
1058         WARN_ON (virq < NUM_ISA_INTERRUPTS);
1059         WARN_ON (count == 0 || (virq + count) > irq_virq_count);
1060
1061         raw_spin_lock_irqsave(&irq_big_lock, flags);
1062         for (i = virq; i < (virq + count); i++) {
1063                 struct irq_host *host;
1064
1065                 if (i < NUM_ISA_INTERRUPTS ||
1066                     (virq + count) > irq_virq_count)
1067                         continue;
1068
1069                 host = irq_map[i].host;
1070                 irq_map[i].hwirq = host->inval_irq;
1071                 smp_wmb();
1072                 irq_map[i].host = NULL;
1073         }
1074         raw_spin_unlock_irqrestore(&irq_big_lock, flags);
1075 }
1076
1077 int arch_early_irq_init(void)
1078 {
1079         struct irq_desc *desc;
1080         int i;
1081
1082         for (i = 0; i < NR_IRQS; i++) {
1083                 desc = irq_to_desc(i);
1084                 if (desc)
1085                         desc->status |= IRQ_NOREQUEST;
1086         }
1087
1088         return 0;
1089 }
1090
1091 int arch_init_chip_data(struct irq_desc *desc, int node)
1092 {
1093         desc->status |= IRQ_NOREQUEST;
1094         return 0;
1095 }
1096
1097 /* We need to create the radix trees late */
1098 static int irq_late_init(void)
1099 {
1100         struct irq_host *h;
1101         unsigned int i;
1102
1103         /*
1104          * No mutual exclusion with respect to accessors of the tree is needed
1105          * here as the synchronization is done via the state variable
1106          * revmap_trees_allocated.
1107          */
1108         list_for_each_entry(h, &irq_hosts, link) {
1109                 if (h->revmap_type == IRQ_HOST_MAP_TREE)
1110                         INIT_RADIX_TREE(&h->revmap_data.tree, GFP_KERNEL);
1111         }
1112
1113         /*
1114          * Make sure the radix trees inits are visible before setting
1115          * the flag
1116          */
1117         smp_wmb();
1118         revmap_trees_allocated = 1;
1119
1120         /*
1121          * Insert the reverse mapping for those interrupts already present
1122          * in irq_map[].
1123          */
1124         mutex_lock(&revmap_trees_mutex);
1125         for (i = 0; i < irq_virq_count; i++) {
1126                 if (irq_map[i].host &&
1127                     (irq_map[i].host->revmap_type == IRQ_HOST_MAP_TREE))
1128                         radix_tree_insert(&irq_map[i].host->revmap_data.tree,
1129                                           irq_map[i].hwirq, &irq_map[i]);
1130         }
1131         mutex_unlock(&revmap_trees_mutex);
1132
1133         /*
1134          * Make sure the radix trees insertions are visible before setting
1135          * the flag
1136          */
1137         smp_wmb();
1138         revmap_trees_allocated = 2;
1139
1140         return 0;
1141 }
1142 arch_initcall(irq_late_init);
1143
1144 #ifdef CONFIG_VIRQ_DEBUG
1145 static int virq_debug_show(struct seq_file *m, void *private)
1146 {
1147         unsigned long flags;
1148         struct irq_desc *desc;
1149         const char *p;
1150         char none[] = "none";
1151         int i;
1152
1153         seq_printf(m, "%-5s  %-7s  %-15s  %s\n", "virq", "hwirq",
1154                       "chip name", "host name");
1155
1156         for (i = 1; i < nr_irqs; i++) {
1157                 desc = irq_to_desc(i);
1158                 if (!desc)
1159                         continue;
1160
1161                 raw_spin_lock_irqsave(&desc->lock, flags);
1162
1163                 if (desc->action && desc->action->handler) {
1164                         seq_printf(m, "%5d  ", i);
1165                         seq_printf(m, "0x%05lx  ", virq_to_hw(i));
1166
1167                         if (desc->chip && desc->chip->name)
1168                                 p = desc->chip->name;
1169                         else
1170                                 p = none;
1171                         seq_printf(m, "%-15s  ", p);
1172
1173                         if (irq_map[i].host && irq_map[i].host->of_node)
1174                                 p = irq_map[i].host->of_node->full_name;
1175                         else
1176                                 p = none;
1177                         seq_printf(m, "%s\n", p);
1178                 }
1179
1180                 raw_spin_unlock_irqrestore(&desc->lock, flags);
1181         }
1182
1183         return 0;
1184 }
1185
1186 static int virq_debug_open(struct inode *inode, struct file *file)
1187 {
1188         return single_open(file, virq_debug_show, inode->i_private);
1189 }
1190
1191 static const struct file_operations virq_debug_fops = {
1192         .open = virq_debug_open,
1193         .read = seq_read,
1194         .llseek = seq_lseek,
1195         .release = single_release,
1196 };
1197
1198 static int __init irq_debugfs_init(void)
1199 {
1200         if (debugfs_create_file("virq_mapping", S_IRUGO, powerpc_debugfs_root,
1201                                  NULL, &virq_debug_fops) == NULL)
1202                 return -ENOMEM;
1203
1204         return 0;
1205 }
1206 __initcall(irq_debugfs_init);
1207 #endif /* CONFIG_VIRQ_DEBUG */
1208
1209 #ifdef CONFIG_PPC64
1210 static int __init setup_noirqdistrib(char *str)
1211 {
1212         distribute_irqs = 0;
1213         return 1;
1214 }
1215
1216 __setup("noirqdistrib", setup_noirqdistrib);
1217 #endif /* CONFIG_PPC64 */