a862d13fc85748303b84497848046c1ad22a3e1e
[sfrench/cifs-2.6.git] / arch / sparc64 / kernel / irq.c
1 /* irq.c: UltraSparc IRQ handling/init/registry.
2  *
3  * Copyright (C) 1997, 2007  David S. Miller  (davem@davemloft.net)
4  * Copyright (C) 1998  Eddie C. Dost    (ecd@skynet.be)
5  * Copyright (C) 1998  Jakub Jelinek    (jj@ultra.linux.cz)
6  */
7
8 #include <linux/module.h>
9 #include <linux/sched.h>
10 #include <linux/ptrace.h>
11 #include <linux/errno.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/signal.h>
14 #include <linux/mm.h>
15 #include <linux/interrupt.h>
16 #include <linux/slab.h>
17 #include <linux/random.h>
18 #include <linux/init.h>
19 #include <linux/delay.h>
20 #include <linux/proc_fs.h>
21 #include <linux/seq_file.h>
22 #include <linux/bootmem.h>
23 #include <linux/irq.h>
24 #include <linux/msi.h>
25
26 #include <asm/ptrace.h>
27 #include <asm/processor.h>
28 #include <asm/atomic.h>
29 #include <asm/system.h>
30 #include <asm/irq.h>
31 #include <asm/io.h>
32 #include <asm/sbus.h>
33 #include <asm/iommu.h>
34 #include <asm/upa.h>
35 #include <asm/oplib.h>
36 #include <asm/prom.h>
37 #include <asm/timer.h>
38 #include <asm/smp.h>
39 #include <asm/starfire.h>
40 #include <asm/uaccess.h>
41 #include <asm/cache.h>
42 #include <asm/cpudata.h>
43 #include <asm/auxio.h>
44 #include <asm/head.h>
45 #include <asm/hypervisor.h>
46
47 /* UPA nodes send interrupt packet to UltraSparc with first data reg
48  * value low 5 (7 on Starfire) bits holding the IRQ identifier being
49  * delivered.  We must translate this into a non-vector IRQ so we can
50  * set the softint on this cpu.
51  *
52  * To make processing these packets efficient and race free we use
53  * an array of irq buckets below.  The interrupt vector handler in
54  * entry.S feeds incoming packets into per-cpu pil-indexed lists.
55  * The IVEC handler does not need to act atomically, the PIL dispatch
56  * code uses CAS to get an atomic snapshot of the list and clear it
57  * at the same time.
58  *
59  * If you make changes to ino_bucket, please update hand coded assembler
60  * of the vectored interrupt trap handler(s) in entry.S and sun4v_ivec.S
61  */
62 struct ino_bucket {
63         /* Next handler in per-CPU IRQ worklist.  We know that
64          * bucket pointers have the high 32-bits clear, so to
65          * save space we only store the bits we need.
66          */
67 /*0x00*/unsigned int irq_chain;
68
69         /* Virtual interrupt number assigned to this INO.  */
70 /*0x04*/unsigned int virt_irq;
71 };
72
73 #define NUM_IVECS       (IMAP_INR + 1)
74 struct ino_bucket ivector_table[NUM_IVECS] __attribute__ ((aligned (SMP_CACHE_BYTES)));
75
76 #define __irq_ino(irq) \
77         (((struct ino_bucket *)(unsigned long)(irq)) - &ivector_table[0])
78 #define __bucket(irq) ((struct ino_bucket *)(unsigned long)(irq))
79 #define __irq(bucket) ((unsigned int)(unsigned long)(bucket))
80
81 /* This has to be in the main kernel image, it cannot be
82  * turned into per-cpu data.  The reason is that the main
83  * kernel image is locked into the TLB and this structure
84  * is accessed from the vectored interrupt trap handler.  If
85  * access to this structure takes a TLB miss it could cause
86  * the 5-level sparc v9 trap stack to overflow.
87  */
88 #define irq_work(__cpu) &(trap_block[(__cpu)].irq_worklist)
89
90 static unsigned int virt_to_real_irq_table[NR_IRQS];
91
92 static unsigned char virt_irq_alloc(unsigned int real_irq)
93 {
94         unsigned char ent;
95
96         BUILD_BUG_ON(NR_IRQS >= 256);
97
98         for (ent = 1; ent < NR_IRQS; ent++) {
99                 if (!virt_to_real_irq_table[ent])
100                         break;
101         }
102         if (ent >= NR_IRQS) {
103                 printk(KERN_ERR "IRQ: Out of virtual IRQs.\n");
104                 return 0;
105         }
106
107         virt_to_real_irq_table[ent] = real_irq;
108
109         return ent;
110 }
111
112 #ifdef CONFIG_PCI_MSI
113 static void virt_irq_free(unsigned int virt_irq)
114 {
115         unsigned int real_irq;
116
117         if (virt_irq >= NR_IRQS)
118                 return;
119
120         real_irq = virt_to_real_irq_table[virt_irq];
121         virt_to_real_irq_table[virt_irq] = 0;
122
123         __bucket(real_irq)->virt_irq = 0;
124 }
125 #endif
126
127 static unsigned int virt_to_real_irq(unsigned char virt_irq)
128 {
129         return virt_to_real_irq_table[virt_irq];
130 }
131
132 /*
133  * /proc/interrupts printing:
134  */
135
136 int show_interrupts(struct seq_file *p, void *v)
137 {
138         int i = *(loff_t *) v, j;
139         struct irqaction * action;
140         unsigned long flags;
141
142         if (i == 0) {
143                 seq_printf(p, "           ");
144                 for_each_online_cpu(j)
145                         seq_printf(p, "CPU%d       ",j);
146                 seq_putc(p, '\n');
147         }
148
149         if (i < NR_IRQS) {
150                 spin_lock_irqsave(&irq_desc[i].lock, flags);
151                 action = irq_desc[i].action;
152                 if (!action)
153                         goto skip;
154                 seq_printf(p, "%3d: ",i);
155 #ifndef CONFIG_SMP
156                 seq_printf(p, "%10u ", kstat_irqs(i));
157 #else
158                 for_each_online_cpu(j)
159                         seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
160 #endif
161                 seq_printf(p, " %9s", irq_desc[i].chip->typename);
162                 seq_printf(p, "  %s", action->name);
163
164                 for (action=action->next; action; action = action->next)
165                         seq_printf(p, ", %s", action->name);
166
167                 seq_putc(p, '\n');
168 skip:
169                 spin_unlock_irqrestore(&irq_desc[i].lock, flags);
170         }
171         return 0;
172 }
173
174 static unsigned int sun4u_compute_tid(unsigned long imap, unsigned long cpuid)
175 {
176         unsigned int tid;
177
178         if (this_is_starfire) {
179                 tid = starfire_translate(imap, cpuid);
180                 tid <<= IMAP_TID_SHIFT;
181                 tid &= IMAP_TID_UPA;
182         } else {
183                 if (tlb_type == cheetah || tlb_type == cheetah_plus) {
184                         unsigned long ver;
185
186                         __asm__ ("rdpr %%ver, %0" : "=r" (ver));
187                         if ((ver >> 32UL) == __JALAPENO_ID ||
188                             (ver >> 32UL) == __SERRANO_ID) {
189                                 tid = cpuid << IMAP_TID_SHIFT;
190                                 tid &= IMAP_TID_JBUS;
191                         } else {
192                                 unsigned int a = cpuid & 0x1f;
193                                 unsigned int n = (cpuid >> 5) & 0x1f;
194
195                                 tid = ((a << IMAP_AID_SHIFT) |
196                                        (n << IMAP_NID_SHIFT));
197                                 tid &= (IMAP_AID_SAFARI |
198                                         IMAP_NID_SAFARI);;
199                         }
200                 } else {
201                         tid = cpuid << IMAP_TID_SHIFT;
202                         tid &= IMAP_TID_UPA;
203                 }
204         }
205
206         return tid;
207 }
208
209 struct irq_handler_data {
210         unsigned long   iclr;
211         unsigned long   imap;
212
213         void            (*pre_handler)(unsigned int, void *, void *);
214         void            *pre_handler_arg1;
215         void            *pre_handler_arg2;
216 };
217
218 static inline struct ino_bucket *virt_irq_to_bucket(unsigned int virt_irq)
219 {
220         unsigned int real_irq = virt_to_real_irq(virt_irq);
221         struct ino_bucket *bucket = NULL;
222
223         if (likely(real_irq))
224                 bucket = __bucket(real_irq);
225
226         return bucket;
227 }
228
229 #ifdef CONFIG_SMP
230 static int irq_choose_cpu(unsigned int virt_irq)
231 {
232         cpumask_t mask = irq_desc[virt_irq].affinity;
233         int cpuid;
234
235         if (cpus_equal(mask, CPU_MASK_ALL)) {
236                 static int irq_rover;
237                 static DEFINE_SPINLOCK(irq_rover_lock);
238                 unsigned long flags;
239
240                 /* Round-robin distribution... */
241         do_round_robin:
242                 spin_lock_irqsave(&irq_rover_lock, flags);
243
244                 while (!cpu_online(irq_rover)) {
245                         if (++irq_rover >= NR_CPUS)
246                                 irq_rover = 0;
247                 }
248                 cpuid = irq_rover;
249                 do {
250                         if (++irq_rover >= NR_CPUS)
251                                 irq_rover = 0;
252                 } while (!cpu_online(irq_rover));
253
254                 spin_unlock_irqrestore(&irq_rover_lock, flags);
255         } else {
256                 cpumask_t tmp;
257
258                 cpus_and(tmp, cpu_online_map, mask);
259
260                 if (cpus_empty(tmp))
261                         goto do_round_robin;
262
263                 cpuid = first_cpu(tmp);
264         }
265
266         return cpuid;
267 }
268 #else
269 static int irq_choose_cpu(unsigned int virt_irq)
270 {
271         return real_hard_smp_processor_id();
272 }
273 #endif
274
275 static void sun4u_irq_enable(unsigned int virt_irq)
276 {
277         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
278
279         if (likely(data)) {
280                 unsigned long cpuid, imap, val;
281                 unsigned int tid;
282
283                 cpuid = irq_choose_cpu(virt_irq);
284                 imap = data->imap;
285
286                 tid = sun4u_compute_tid(imap, cpuid);
287
288                 val = upa_readq(imap);
289                 val &= ~(IMAP_TID_UPA | IMAP_TID_JBUS |
290                          IMAP_AID_SAFARI | IMAP_NID_SAFARI);
291                 val |= tid | IMAP_VALID;
292                 upa_writeq(val, imap);
293         }
294 }
295
296 static void sun4u_irq_disable(unsigned int virt_irq)
297 {
298         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
299
300         if (likely(data)) {
301                 unsigned long imap = data->imap;
302                 u32 tmp = upa_readq(imap);
303
304                 tmp &= ~IMAP_VALID;
305                 upa_writeq(tmp, imap);
306         }
307 }
308
309 static void sun4u_irq_end(unsigned int virt_irq)
310 {
311         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
312
313         if (likely(data))
314                 upa_writeq(ICLR_IDLE, data->iclr);
315 }
316
317 static void sun4v_irq_enable(unsigned int virt_irq)
318 {
319         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
320         unsigned int ino = bucket - &ivector_table[0];
321
322         if (likely(bucket)) {
323                 unsigned long cpuid;
324                 int err;
325
326                 cpuid = irq_choose_cpu(virt_irq);
327
328                 err = sun4v_intr_settarget(ino, cpuid);
329                 if (err != HV_EOK)
330                         printk("sun4v_intr_settarget(%x,%lu): err(%d)\n",
331                                ino, cpuid, err);
332                 err = sun4v_intr_setenabled(ino, HV_INTR_ENABLED);
333                 if (err != HV_EOK)
334                         printk("sun4v_intr_setenabled(%x): err(%d)\n",
335                                ino, err);
336         }
337 }
338
339 static void sun4v_irq_disable(unsigned int virt_irq)
340 {
341         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
342         unsigned int ino = bucket - &ivector_table[0];
343
344         if (likely(bucket)) {
345                 int err;
346
347                 err = sun4v_intr_setenabled(ino, HV_INTR_DISABLED);
348                 if (err != HV_EOK)
349                         printk("sun4v_intr_setenabled(%x): "
350                                "err(%d)\n", ino, err);
351         }
352 }
353
354 #ifdef CONFIG_PCI_MSI
355 static void sun4v_msi_enable(unsigned int virt_irq)
356 {
357         sun4v_irq_enable(virt_irq);
358         unmask_msi_irq(virt_irq);
359 }
360
361 static void sun4v_msi_disable(unsigned int virt_irq)
362 {
363         mask_msi_irq(virt_irq);
364         sun4v_irq_disable(virt_irq);
365 }
366 #endif
367
368 static void sun4v_irq_end(unsigned int virt_irq)
369 {
370         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
371         unsigned int ino = bucket - &ivector_table[0];
372
373         if (likely(bucket)) {
374                 int err;
375
376                 err = sun4v_intr_setstate(ino, HV_INTR_STATE_IDLE);
377                 if (err != HV_EOK)
378                         printk("sun4v_intr_setstate(%x): "
379                                "err(%d)\n", ino, err);
380         }
381 }
382
383 static void sun4v_virq_enable(unsigned int virt_irq)
384 {
385         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
386         unsigned int ino = bucket - &ivector_table[0];
387
388         if (likely(bucket)) {
389                 unsigned long cpuid, dev_handle, dev_ino;
390                 int err;
391
392                 cpuid = irq_choose_cpu(virt_irq);
393
394                 dev_handle = ino & IMAP_IGN;
395                 dev_ino = ino & IMAP_INO;
396
397                 err = sun4v_vintr_set_target(dev_handle, dev_ino, cpuid);
398                 if (err != HV_EOK)
399                         printk("sun4v_vintr_set_target(%lx,%lx,%lu): "
400                                "err(%d)\n",
401                                dev_handle, dev_ino, cpuid, err);
402                 err = sun4v_vintr_set_state(dev_handle, dev_ino,
403                                             HV_INTR_STATE_IDLE);
404                 if (err != HV_EOK)
405                         printk("sun4v_vintr_set_state(%lx,%lx,"
406                                 "HV_INTR_STATE_IDLE): err(%d)\n",
407                                dev_handle, dev_ino, err);
408                 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
409                                             HV_INTR_ENABLED);
410                 if (err != HV_EOK)
411                         printk("sun4v_vintr_set_state(%lx,%lx,"
412                                "HV_INTR_ENABLED): err(%d)\n",
413                                dev_handle, dev_ino, err);
414         }
415 }
416
417 static void sun4v_virq_disable(unsigned int virt_irq)
418 {
419         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
420         unsigned int ino = bucket - &ivector_table[0];
421
422         if (likely(bucket)) {
423                 unsigned long dev_handle, dev_ino;
424                 int err;
425
426                 dev_handle = ino & IMAP_IGN;
427                 dev_ino = ino & IMAP_INO;
428
429                 err = sun4v_vintr_set_valid(dev_handle, dev_ino,
430                                             HV_INTR_DISABLED);
431                 if (err != HV_EOK)
432                         printk("sun4v_vintr_set_state(%lx,%lx,"
433                                "HV_INTR_DISABLED): err(%d)\n",
434                                dev_handle, dev_ino, err);
435         }
436 }
437
438 static void sun4v_virq_end(unsigned int virt_irq)
439 {
440         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
441         unsigned int ino = bucket - &ivector_table[0];
442
443         if (likely(bucket)) {
444                 unsigned long dev_handle, dev_ino;
445                 int err;
446
447                 dev_handle = ino & IMAP_IGN;
448                 dev_ino = ino & IMAP_INO;
449
450                 err = sun4v_vintr_set_state(dev_handle, dev_ino,
451                                             HV_INTR_STATE_IDLE);
452                 if (err != HV_EOK)
453                         printk("sun4v_vintr_set_state(%lx,%lx,"
454                                 "HV_INTR_STATE_IDLE): err(%d)\n",
455                                dev_handle, dev_ino, err);
456         }
457 }
458
459 static void run_pre_handler(unsigned int virt_irq)
460 {
461         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
462         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
463
464         if (likely(data->pre_handler)) {
465                 data->pre_handler(__irq_ino(__irq(bucket)),
466                                   data->pre_handler_arg1,
467                                   data->pre_handler_arg2);
468         }
469 }
470
471 static struct irq_chip sun4u_irq = {
472         .typename       = "sun4u",
473         .enable         = sun4u_irq_enable,
474         .disable        = sun4u_irq_disable,
475         .end            = sun4u_irq_end,
476 };
477
478 static struct irq_chip sun4u_irq_ack = {
479         .typename       = "sun4u+ack",
480         .enable         = sun4u_irq_enable,
481         .disable        = sun4u_irq_disable,
482         .ack            = run_pre_handler,
483         .end            = sun4u_irq_end,
484 };
485
486 static struct irq_chip sun4v_irq = {
487         .typename       = "sun4v",
488         .enable         = sun4v_irq_enable,
489         .disable        = sun4v_irq_disable,
490         .end            = sun4v_irq_end,
491 };
492
493 static struct irq_chip sun4v_irq_ack = {
494         .typename       = "sun4v+ack",
495         .enable         = sun4v_irq_enable,
496         .disable        = sun4v_irq_disable,
497         .ack            = run_pre_handler,
498         .end            = sun4v_irq_end,
499 };
500
501 #ifdef CONFIG_PCI_MSI
502 static struct irq_chip sun4v_msi = {
503         .typename       = "sun4v+msi",
504         .mask           = mask_msi_irq,
505         .unmask         = unmask_msi_irq,
506         .enable         = sun4v_msi_enable,
507         .disable        = sun4v_msi_disable,
508         .ack            = run_pre_handler,
509         .end            = sun4v_irq_end,
510 };
511 #endif
512
513 static struct irq_chip sun4v_virq = {
514         .typename       = "vsun4v",
515         .enable         = sun4v_virq_enable,
516         .disable        = sun4v_virq_disable,
517         .end            = sun4v_virq_end,
518 };
519
520 static struct irq_chip sun4v_virq_ack = {
521         .typename       = "vsun4v+ack",
522         .enable         = sun4v_virq_enable,
523         .disable        = sun4v_virq_disable,
524         .ack            = run_pre_handler,
525         .end            = sun4v_virq_end,
526 };
527
528 void irq_install_pre_handler(int virt_irq,
529                              void (*func)(unsigned int, void *, void *),
530                              void *arg1, void *arg2)
531 {
532         struct irq_handler_data *data = get_irq_chip_data(virt_irq);
533         struct irq_chip *chip;
534
535         data->pre_handler = func;
536         data->pre_handler_arg1 = arg1;
537         data->pre_handler_arg2 = arg2;
538
539         chip = get_irq_chip(virt_irq);
540         if (chip == &sun4u_irq_ack ||
541             chip == &sun4v_irq_ack ||
542             chip == &sun4v_virq_ack
543 #ifdef CONFIG_PCI_MSI
544             || chip == &sun4v_msi
545 #endif
546             )
547                 return;
548
549         chip = (chip == &sun4u_irq ?
550                 &sun4u_irq_ack :
551                 (chip == &sun4v_irq ?
552                  &sun4v_irq_ack : &sun4v_virq_ack));
553         set_irq_chip(virt_irq, chip);
554 }
555
556 unsigned int build_irq(int inofixup, unsigned long iclr, unsigned long imap)
557 {
558         struct ino_bucket *bucket;
559         struct irq_handler_data *data;
560         int ino;
561
562         BUG_ON(tlb_type == hypervisor);
563
564         ino = (upa_readq(imap) & (IMAP_IGN | IMAP_INO)) + inofixup;
565         bucket = &ivector_table[ino];
566         if (!bucket->virt_irq) {
567                 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
568                 set_irq_chip(bucket->virt_irq, &sun4u_irq);
569         }
570
571         data = get_irq_chip_data(bucket->virt_irq);
572         if (unlikely(data))
573                 goto out;
574
575         data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
576         if (unlikely(!data)) {
577                 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
578                 prom_halt();
579         }
580         set_irq_chip_data(bucket->virt_irq, data);
581
582         data->imap  = imap;
583         data->iclr  = iclr;
584
585 out:
586         return bucket->virt_irq;
587 }
588
589 static unsigned int sun4v_build_common(unsigned long sysino,
590                                        struct irq_chip *chip)
591 {
592         struct ino_bucket *bucket;
593         struct irq_handler_data *data;
594
595         BUG_ON(tlb_type != hypervisor);
596
597         bucket = &ivector_table[sysino];
598         if (!bucket->virt_irq) {
599                 bucket->virt_irq = virt_irq_alloc(__irq(bucket));
600                 set_irq_chip(bucket->virt_irq, chip);
601         }
602
603         data = get_irq_chip_data(bucket->virt_irq);
604         if (unlikely(data))
605                 goto out;
606
607         data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
608         if (unlikely(!data)) {
609                 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
610                 prom_halt();
611         }
612         set_irq_chip_data(bucket->virt_irq, data);
613
614         /* Catch accidental accesses to these things.  IMAP/ICLR handling
615          * is done by hypervisor calls on sun4v platforms, not by direct
616          * register accesses.
617          */
618         data->imap = ~0UL;
619         data->iclr = ~0UL;
620
621 out:
622         return bucket->virt_irq;
623 }
624
625 unsigned int sun4v_build_irq(u32 devhandle, unsigned int devino)
626 {
627         unsigned long sysino = sun4v_devino_to_sysino(devhandle, devino);
628
629         return sun4v_build_common(sysino, &sun4v_irq);
630 }
631
632 unsigned int sun4v_build_virq(u32 devhandle, unsigned int devino)
633 {
634         unsigned long sysino, hv_err;
635
636         BUG_ON(devhandle & ~IMAP_IGN);
637         BUG_ON(devino & ~IMAP_INO);
638
639         sysino = devhandle | devino;
640
641         hv_err = sun4v_vintr_set_cookie(devhandle, devino, sysino);
642         if (hv_err) {
643                 prom_printf("IRQ: Fatal, cannot set cookie for [%x:%x] "
644                             "err=%lu\n", devhandle, devino, hv_err);
645                 prom_halt();
646         }
647
648         return sun4v_build_common(sysino, &sun4v_virq);
649 }
650
651 #ifdef CONFIG_PCI_MSI
652 unsigned int sun4v_build_msi(u32 devhandle, unsigned int *virt_irq_p,
653                              unsigned int msi_start, unsigned int msi_end)
654 {
655         struct ino_bucket *bucket;
656         struct irq_handler_data *data;
657         unsigned long sysino;
658         unsigned int devino;
659
660         BUG_ON(tlb_type != hypervisor);
661
662         /* Find a free devino in the given range.  */
663         for (devino = msi_start; devino < msi_end; devino++) {
664                 sysino = sun4v_devino_to_sysino(devhandle, devino);
665                 bucket = &ivector_table[sysino];
666                 if (!bucket->virt_irq)
667                         break;
668         }
669         if (devino >= msi_end)
670                 return 0;
671
672         sysino = sun4v_devino_to_sysino(devhandle, devino);
673         bucket = &ivector_table[sysino];
674         bucket->virt_irq = virt_irq_alloc(__irq(bucket));
675         *virt_irq_p = bucket->virt_irq;
676         set_irq_chip(bucket->virt_irq, &sun4v_msi);
677
678         data = get_irq_chip_data(bucket->virt_irq);
679         if (unlikely(data))
680                 return devino;
681
682         data = kzalloc(sizeof(struct irq_handler_data), GFP_ATOMIC);
683         if (unlikely(!data)) {
684                 prom_printf("IRQ: kzalloc(irq_handler_data) failed.\n");
685                 prom_halt();
686         }
687         set_irq_chip_data(bucket->virt_irq, data);
688
689         data->imap = ~0UL;
690         data->iclr = ~0UL;
691
692         return devino;
693 }
694
695 void sun4v_destroy_msi(unsigned int virt_irq)
696 {
697         virt_irq_free(virt_irq);
698 }
699 #endif
700
701 void ack_bad_irq(unsigned int virt_irq)
702 {
703         struct ino_bucket *bucket = virt_irq_to_bucket(virt_irq);
704         unsigned int ino = 0xdeadbeef;
705
706         if (bucket)
707                 ino = bucket - &ivector_table[0];
708
709         printk(KERN_CRIT "Unexpected IRQ from ino[%x] virt_irq[%u]\n",
710                ino, virt_irq);
711 }
712
713 void handler_irq(int irq, struct pt_regs *regs)
714 {
715         struct ino_bucket *bucket;
716         struct pt_regs *old_regs;
717
718         clear_softint(1 << irq);
719
720         old_regs = set_irq_regs(regs);
721         irq_enter();
722
723         /* Sliiiick... */
724         bucket = __bucket(xchg32(irq_work(smp_processor_id()), 0));
725         while (bucket) {
726                 struct ino_bucket *next = __bucket(bucket->irq_chain);
727
728                 bucket->irq_chain = 0;
729                 __do_IRQ(bucket->virt_irq);
730
731                 bucket = next;
732         }
733
734         irq_exit();
735         set_irq_regs(old_regs);
736 }
737
738 struct sun5_timer {
739         u64     count0;
740         u64     limit0;
741         u64     count1;
742         u64     limit1;
743 };
744
745 static struct sun5_timer *prom_timers;
746 static u64 prom_limit0, prom_limit1;
747
748 static void map_prom_timers(void)
749 {
750         struct device_node *dp;
751         const unsigned int *addr;
752
753         /* PROM timer node hangs out in the top level of device siblings... */
754         dp = of_find_node_by_path("/");
755         dp = dp->child;
756         while (dp) {
757                 if (!strcmp(dp->name, "counter-timer"))
758                         break;
759                 dp = dp->sibling;
760         }
761
762         /* Assume if node is not present, PROM uses different tick mechanism
763          * which we should not care about.
764          */
765         if (!dp) {
766                 prom_timers = (struct sun5_timer *) 0;
767                 return;
768         }
769
770         /* If PROM is really using this, it must be mapped by him. */
771         addr = of_get_property(dp, "address", NULL);
772         if (!addr) {
773                 prom_printf("PROM does not have timer mapped, trying to continue.\n");
774                 prom_timers = (struct sun5_timer *) 0;
775                 return;
776         }
777         prom_timers = (struct sun5_timer *) ((unsigned long)addr[0]);
778 }
779
780 static void kill_prom_timer(void)
781 {
782         if (!prom_timers)
783                 return;
784
785         /* Save them away for later. */
786         prom_limit0 = prom_timers->limit0;
787         prom_limit1 = prom_timers->limit1;
788
789         /* Just as in sun4c/sun4m PROM uses timer which ticks at IRQ 14.
790          * We turn both off here just to be paranoid.
791          */
792         prom_timers->limit0 = 0;
793         prom_timers->limit1 = 0;
794
795         /* Wheee, eat the interrupt packet too... */
796         __asm__ __volatile__(
797 "       mov     0x40, %%g2\n"
798 "       ldxa    [%%g0] %0, %%g1\n"
799 "       ldxa    [%%g2] %1, %%g1\n"
800 "       stxa    %%g0, [%%g0] %0\n"
801 "       membar  #Sync\n"
802         : /* no outputs */
803         : "i" (ASI_INTR_RECEIVE), "i" (ASI_INTR_R)
804         : "g1", "g2");
805 }
806
807 void init_irqwork_curcpu(void)
808 {
809         int cpu = hard_smp_processor_id();
810
811         trap_block[cpu].irq_worklist = 0;
812 }
813
814 /* Please be very careful with register_one_mondo() and
815  * sun4v_register_mondo_queues().
816  *
817  * On SMP this gets invoked from the CPU trampoline before
818  * the cpu has fully taken over the trap table from OBP,
819  * and it's kernel stack + %g6 thread register state is
820  * not fully cooked yet.
821  *
822  * Therefore you cannot make any OBP calls, not even prom_printf,
823  * from these two routines.
824  */
825 static void __cpuinit register_one_mondo(unsigned long paddr, unsigned long type, unsigned long qmask)
826 {
827         unsigned long num_entries = (qmask + 1) / 64;
828         unsigned long status;
829
830         status = sun4v_cpu_qconf(type, paddr, num_entries);
831         if (status != HV_EOK) {
832                 prom_printf("SUN4V: sun4v_cpu_qconf(%lu:%lx:%lu) failed, "
833                             "err %lu\n", type, paddr, num_entries, status);
834                 prom_halt();
835         }
836 }
837
838 static void __cpuinit sun4v_register_mondo_queues(int this_cpu)
839 {
840         struct trap_per_cpu *tb = &trap_block[this_cpu];
841
842         register_one_mondo(tb->cpu_mondo_pa, HV_CPU_QUEUE_CPU_MONDO,
843                            tb->cpu_mondo_qmask);
844         register_one_mondo(tb->dev_mondo_pa, HV_CPU_QUEUE_DEVICE_MONDO,
845                            tb->dev_mondo_qmask);
846         register_one_mondo(tb->resum_mondo_pa, HV_CPU_QUEUE_RES_ERROR,
847                            tb->resum_qmask);
848         register_one_mondo(tb->nonresum_mondo_pa, HV_CPU_QUEUE_NONRES_ERROR,
849                            tb->nonresum_qmask);
850 }
851
852 static void __cpuinit alloc_one_mondo(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
853 {
854         unsigned long size = PAGE_ALIGN(qmask + 1);
855         unsigned long order = get_order(size);
856         void *p = NULL;
857
858         if (use_bootmem) {
859                 p = __alloc_bootmem_low(size, size, 0);
860         } else {
861                 struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
862                 if (page)
863                         p = page_address(page);
864         }
865
866         if (!p) {
867                 prom_printf("SUN4V: Error, cannot allocate mondo queue.\n");
868                 prom_halt();
869         }
870
871         *pa_ptr = __pa(p);
872 }
873
874 static void __cpuinit alloc_one_kbuf(unsigned long *pa_ptr, unsigned long qmask, int use_bootmem)
875 {
876         unsigned long size = PAGE_ALIGN(qmask + 1);
877         unsigned long order = get_order(size);
878         void *p = NULL;
879
880         if (use_bootmem) {
881                 p = __alloc_bootmem_low(size, size, 0);
882         } else {
883                 struct page *page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order);
884                 if (page)
885                         p = page_address(page);
886         }
887
888         if (!p) {
889                 prom_printf("SUN4V: Error, cannot allocate kbuf page.\n");
890                 prom_halt();
891         }
892
893         *pa_ptr = __pa(p);
894 }
895
896 static void __cpuinit init_cpu_send_mondo_info(struct trap_per_cpu *tb, int use_bootmem)
897 {
898 #ifdef CONFIG_SMP
899         void *page;
900
901         BUILD_BUG_ON((NR_CPUS * sizeof(u16)) > (PAGE_SIZE - 64));
902
903         if (use_bootmem)
904                 page = alloc_bootmem_low_pages(PAGE_SIZE);
905         else
906                 page = (void *) get_zeroed_page(GFP_ATOMIC);
907
908         if (!page) {
909                 prom_printf("SUN4V: Error, cannot allocate cpu mondo page.\n");
910                 prom_halt();
911         }
912
913         tb->cpu_mondo_block_pa = __pa(page);
914         tb->cpu_list_pa = __pa(page + 64);
915 #endif
916 }
917
918 /* Allocate and register the mondo and error queues for this cpu.  */
919 void __cpuinit sun4v_init_mondo_queues(int use_bootmem, int cpu, int alloc, int load)
920 {
921         struct trap_per_cpu *tb = &trap_block[cpu];
922
923         if (alloc) {
924                 alloc_one_mondo(&tb->cpu_mondo_pa, tb->cpu_mondo_qmask, use_bootmem);
925                 alloc_one_mondo(&tb->dev_mondo_pa, tb->dev_mondo_qmask, use_bootmem);
926                 alloc_one_mondo(&tb->resum_mondo_pa, tb->resum_qmask, use_bootmem);
927                 alloc_one_kbuf(&tb->resum_kernel_buf_pa, tb->resum_qmask, use_bootmem);
928                 alloc_one_mondo(&tb->nonresum_mondo_pa, tb->nonresum_qmask, use_bootmem);
929                 alloc_one_kbuf(&tb->nonresum_kernel_buf_pa, tb->nonresum_qmask, use_bootmem);
930
931                 init_cpu_send_mondo_info(tb, use_bootmem);
932         }
933
934         if (load) {
935                 if (cpu != hard_smp_processor_id()) {
936                         prom_printf("SUN4V: init mondo on cpu %d not %d\n",
937                                     cpu, hard_smp_processor_id());
938                         prom_halt();
939                 }
940                 sun4v_register_mondo_queues(cpu);
941         }
942 }
943
944 static struct irqaction timer_irq_action = {
945         .name = "timer",
946 };
947
948 /* Only invoked on boot processor. */
949 void __init init_IRQ(void)
950 {
951         map_prom_timers();
952         kill_prom_timer();
953         memset(&ivector_table[0], 0, sizeof(ivector_table));
954
955         if (tlb_type == hypervisor)
956                 sun4v_init_mondo_queues(1, hard_smp_processor_id(), 1, 1);
957
958         /* We need to clear any IRQ's pending in the soft interrupt
959          * registers, a spurious one could be left around from the
960          * PROM timer which we just disabled.
961          */
962         clear_softint(get_softint());
963
964         /* Now that ivector table is initialized, it is safe
965          * to receive IRQ vector traps.  We will normally take
966          * one or two right now, in case some device PROM used
967          * to boot us wants to speak to us.  We just ignore them.
968          */
969         __asm__ __volatile__("rdpr      %%pstate, %%g1\n\t"
970                              "or        %%g1, %0, %%g1\n\t"
971                              "wrpr      %%g1, 0x0, %%pstate"
972                              : /* No outputs */
973                              : "i" (PSTATE_IE)
974                              : "g1");
975
976         irq_desc[0].action = &timer_irq_action;
977 }