Merge branches 'stable/balloon.cleanup' and 'stable/general.cleanup' of git://git...
[sfrench/cifs-2.6.git] / drivers / xen / events.c
1 /*
2  * Xen event channels
3  *
4  * Xen models interrupts with abstract event channels.  Because each
5  * domain gets 1024 event channels, but NR_IRQ is not that large, we
6  * must dynamically map irqs<->event channels.  The event channels
7  * interface with the rest of the kernel by defining a xen interrupt
8  * chip.  When an event is received, it is mapped to an irq and sent
9  * through the normal interrupt processing path.
10  *
11  * There are four kinds of events which can be mapped to an event
12  * channel:
13  *
14  * 1. Inter-domain notifications.  This includes all the virtual
15  *    device events, since they're driven by front-ends in another domain
16  *    (typically dom0).
17  * 2. VIRQs, typically used for timers.  These are per-cpu events.
18  * 3. IPIs.
19  * 4. PIRQs - Hardware interrupts.
20  *
21  * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
22  */
23
24 #include <linux/linkage.h>
25 #include <linux/interrupt.h>
26 #include <linux/irq.h>
27 #include <linux/module.h>
28 #include <linux/string.h>
29 #include <linux/bootmem.h>
30 #include <linux/slab.h>
31 #include <linux/irqnr.h>
32 #include <linux/pci.h>
33
34 #include <asm/desc.h>
35 #include <asm/ptrace.h>
36 #include <asm/irq.h>
37 #include <asm/idle.h>
38 #include <asm/io_apic.h>
39 #include <asm/sync_bitops.h>
40 #include <asm/xen/pci.h>
41 #include <asm/xen/hypercall.h>
42 #include <asm/xen/hypervisor.h>
43
44 #include <xen/xen.h>
45 #include <xen/hvm.h>
46 #include <xen/xen-ops.h>
47 #include <xen/events.h>
48 #include <xen/interface/xen.h>
49 #include <xen/interface/event_channel.h>
50 #include <xen/interface/hvm/hvm_op.h>
51 #include <xen/interface/hvm/params.h>
52
53 /*
54  * This lock protects updates to the following mapping and reference-count
55  * arrays. The lock does not need to be acquired to read the mapping tables.
56  */
57 static DEFINE_SPINLOCK(irq_mapping_update_lock);
58
59 static LIST_HEAD(xen_irq_list_head);
60
61 /* IRQ <-> VIRQ mapping. */
62 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
63
64 /* IRQ <-> IPI mapping */
65 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
66
67 /* Interrupt types. */
68 enum xen_irq_type {
69         IRQT_UNBOUND = 0,
70         IRQT_PIRQ,
71         IRQT_VIRQ,
72         IRQT_IPI,
73         IRQT_EVTCHN
74 };
75
76 /*
77  * Packed IRQ information:
78  * type - enum xen_irq_type
79  * event channel - irq->event channel mapping
80  * cpu - cpu this event channel is bound to
81  * index - type-specific information:
82  *    PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
83  *           guest, or GSI (real passthrough IRQ) of the device.
84  *    VIRQ - virq number
85  *    IPI - IPI vector
86  *    EVTCHN -
87  */
88 struct irq_info
89 {
90         struct list_head list;
91         enum xen_irq_type type; /* type */
92         unsigned irq;
93         unsigned short evtchn;  /* event channel */
94         unsigned short cpu;     /* cpu bound */
95
96         union {
97                 unsigned short virq;
98                 enum ipi_vector ipi;
99                 struct {
100                         unsigned short pirq;
101                         unsigned short gsi;
102                         unsigned char vector;
103                         unsigned char flags;
104                         uint16_t domid;
105                 } pirq;
106         } u;
107 };
108 #define PIRQ_NEEDS_EOI  (1 << 0)
109 #define PIRQ_SHAREABLE  (1 << 1)
110
111 static int *evtchn_to_irq;
112
113 static DEFINE_PER_CPU(unsigned long [NR_EVENT_CHANNELS/BITS_PER_LONG],
114                       cpu_evtchn_mask);
115
116 /* Xen will never allocate port zero for any purpose. */
117 #define VALID_EVTCHN(chn)       ((chn) != 0)
118
119 static struct irq_chip xen_dynamic_chip;
120 static struct irq_chip xen_percpu_chip;
121 static struct irq_chip xen_pirq_chip;
122
123 /* Get info for IRQ */
124 static struct irq_info *info_for_irq(unsigned irq)
125 {
126         return irq_get_handler_data(irq);
127 }
128
129 /* Constructors for packed IRQ information. */
130 static void xen_irq_info_common_init(struct irq_info *info,
131                                      unsigned irq,
132                                      enum xen_irq_type type,
133                                      unsigned short evtchn,
134                                      unsigned short cpu)
135 {
136
137         BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
138
139         info->type = type;
140         info->irq = irq;
141         info->evtchn = evtchn;
142         info->cpu = cpu;
143
144         evtchn_to_irq[evtchn] = irq;
145 }
146
147 static void xen_irq_info_evtchn_init(unsigned irq,
148                                      unsigned short evtchn)
149 {
150         struct irq_info *info = info_for_irq(irq);
151
152         xen_irq_info_common_init(info, irq, IRQT_EVTCHN, evtchn, 0);
153 }
154
155 static void xen_irq_info_ipi_init(unsigned cpu,
156                                   unsigned irq,
157                                   unsigned short evtchn,
158                                   enum ipi_vector ipi)
159 {
160         struct irq_info *info = info_for_irq(irq);
161
162         xen_irq_info_common_init(info, irq, IRQT_IPI, evtchn, 0);
163
164         info->u.ipi = ipi;
165
166         per_cpu(ipi_to_irq, cpu)[ipi] = irq;
167 }
168
169 static void xen_irq_info_virq_init(unsigned cpu,
170                                    unsigned irq,
171                                    unsigned short evtchn,
172                                    unsigned short virq)
173 {
174         struct irq_info *info = info_for_irq(irq);
175
176         xen_irq_info_common_init(info, irq, IRQT_VIRQ, evtchn, 0);
177
178         info->u.virq = virq;
179
180         per_cpu(virq_to_irq, cpu)[virq] = irq;
181 }
182
183 static void xen_irq_info_pirq_init(unsigned irq,
184                                    unsigned short evtchn,
185                                    unsigned short pirq,
186                                    unsigned short gsi,
187                                    unsigned short vector,
188                                    uint16_t domid,
189                                    unsigned char flags)
190 {
191         struct irq_info *info = info_for_irq(irq);
192
193         xen_irq_info_common_init(info, irq, IRQT_PIRQ, evtchn, 0);
194
195         info->u.pirq.pirq = pirq;
196         info->u.pirq.gsi = gsi;
197         info->u.pirq.vector = vector;
198         info->u.pirq.domid = domid;
199         info->u.pirq.flags = flags;
200 }
201
202 /*
203  * Accessors for packed IRQ information.
204  */
205 static unsigned int evtchn_from_irq(unsigned irq)
206 {
207         if (unlikely(WARN(irq < 0 || irq >= nr_irqs, "Invalid irq %d!\n", irq)))
208                 return 0;
209
210         return info_for_irq(irq)->evtchn;
211 }
212
213 unsigned irq_from_evtchn(unsigned int evtchn)
214 {
215         return evtchn_to_irq[evtchn];
216 }
217 EXPORT_SYMBOL_GPL(irq_from_evtchn);
218
219 static enum ipi_vector ipi_from_irq(unsigned irq)
220 {
221         struct irq_info *info = info_for_irq(irq);
222
223         BUG_ON(info == NULL);
224         BUG_ON(info->type != IRQT_IPI);
225
226         return info->u.ipi;
227 }
228
229 static unsigned virq_from_irq(unsigned irq)
230 {
231         struct irq_info *info = info_for_irq(irq);
232
233         BUG_ON(info == NULL);
234         BUG_ON(info->type != IRQT_VIRQ);
235
236         return info->u.virq;
237 }
238
239 static unsigned pirq_from_irq(unsigned irq)
240 {
241         struct irq_info *info = info_for_irq(irq);
242
243         BUG_ON(info == NULL);
244         BUG_ON(info->type != IRQT_PIRQ);
245
246         return info->u.pirq.pirq;
247 }
248
249 static enum xen_irq_type type_from_irq(unsigned irq)
250 {
251         return info_for_irq(irq)->type;
252 }
253
254 static unsigned cpu_from_irq(unsigned irq)
255 {
256         return info_for_irq(irq)->cpu;
257 }
258
259 static unsigned int cpu_from_evtchn(unsigned int evtchn)
260 {
261         int irq = evtchn_to_irq[evtchn];
262         unsigned ret = 0;
263
264         if (irq != -1)
265                 ret = cpu_from_irq(irq);
266
267         return ret;
268 }
269
270 static bool pirq_needs_eoi(unsigned irq)
271 {
272         struct irq_info *info = info_for_irq(irq);
273
274         BUG_ON(info->type != IRQT_PIRQ);
275
276         return info->u.pirq.flags & PIRQ_NEEDS_EOI;
277 }
278
279 static inline unsigned long active_evtchns(unsigned int cpu,
280                                            struct shared_info *sh,
281                                            unsigned int idx)
282 {
283         return (sh->evtchn_pending[idx] &
284                 per_cpu(cpu_evtchn_mask, cpu)[idx] &
285                 ~sh->evtchn_mask[idx]);
286 }
287
288 static void bind_evtchn_to_cpu(unsigned int chn, unsigned int cpu)
289 {
290         int irq = evtchn_to_irq[chn];
291
292         BUG_ON(irq == -1);
293 #ifdef CONFIG_SMP
294         cpumask_copy(irq_to_desc(irq)->irq_data.affinity, cpumask_of(cpu));
295 #endif
296
297         clear_bit(chn, per_cpu(cpu_evtchn_mask, cpu_from_irq(irq)));
298         set_bit(chn, per_cpu(cpu_evtchn_mask, cpu));
299
300         info_for_irq(irq)->cpu = cpu;
301 }
302
303 static void init_evtchn_cpu_bindings(void)
304 {
305         int i;
306 #ifdef CONFIG_SMP
307         struct irq_info *info;
308
309         /* By default all event channels notify CPU#0. */
310         list_for_each_entry(info, &xen_irq_list_head, list) {
311                 struct irq_desc *desc = irq_to_desc(info->irq);
312                 cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
313         }
314 #endif
315
316         for_each_possible_cpu(i)
317                 memset(per_cpu(cpu_evtchn_mask, i),
318                        (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i)));
319 }
320
321 static inline void clear_evtchn(int port)
322 {
323         struct shared_info *s = HYPERVISOR_shared_info;
324         sync_clear_bit(port, &s->evtchn_pending[0]);
325 }
326
327 static inline void set_evtchn(int port)
328 {
329         struct shared_info *s = HYPERVISOR_shared_info;
330         sync_set_bit(port, &s->evtchn_pending[0]);
331 }
332
333 static inline int test_evtchn(int port)
334 {
335         struct shared_info *s = HYPERVISOR_shared_info;
336         return sync_test_bit(port, &s->evtchn_pending[0]);
337 }
338
339
340 /**
341  * notify_remote_via_irq - send event to remote end of event channel via irq
342  * @irq: irq of event channel to send event to
343  *
344  * Unlike notify_remote_via_evtchn(), this is safe to use across
345  * save/restore. Notifications on a broken connection are silently
346  * dropped.
347  */
348 void notify_remote_via_irq(int irq)
349 {
350         int evtchn = evtchn_from_irq(irq);
351
352         if (VALID_EVTCHN(evtchn))
353                 notify_remote_via_evtchn(evtchn);
354 }
355 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
356
357 static void mask_evtchn(int port)
358 {
359         struct shared_info *s = HYPERVISOR_shared_info;
360         sync_set_bit(port, &s->evtchn_mask[0]);
361 }
362
363 static void unmask_evtchn(int port)
364 {
365         struct shared_info *s = HYPERVISOR_shared_info;
366         unsigned int cpu = get_cpu();
367
368         BUG_ON(!irqs_disabled());
369
370         /* Slow path (hypercall) if this is a non-local port. */
371         if (unlikely(cpu != cpu_from_evtchn(port))) {
372                 struct evtchn_unmask unmask = { .port = port };
373                 (void)HYPERVISOR_event_channel_op(EVTCHNOP_unmask, &unmask);
374         } else {
375                 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
376
377                 sync_clear_bit(port, &s->evtchn_mask[0]);
378
379                 /*
380                  * The following is basically the equivalent of
381                  * 'hw_resend_irq'. Just like a real IO-APIC we 'lose
382                  * the interrupt edge' if the channel is masked.
383                  */
384                 if (sync_test_bit(port, &s->evtchn_pending[0]) &&
385                     !sync_test_and_set_bit(port / BITS_PER_LONG,
386                                            &vcpu_info->evtchn_pending_sel))
387                         vcpu_info->evtchn_upcall_pending = 1;
388         }
389
390         put_cpu();
391 }
392
393 static void xen_irq_init(unsigned irq)
394 {
395         struct irq_info *info;
396         struct irq_desc *desc = irq_to_desc(irq);
397
398 #ifdef CONFIG_SMP
399         /* By default all event channels notify CPU#0. */
400         cpumask_copy(desc->irq_data.affinity, cpumask_of(0));
401 #endif
402
403         info = kzalloc(sizeof(*info), GFP_KERNEL);
404         if (info == NULL)
405                 panic("Unable to allocate metadata for IRQ%d\n", irq);
406
407         info->type = IRQT_UNBOUND;
408
409         irq_set_handler_data(irq, info);
410
411         list_add_tail(&info->list, &xen_irq_list_head);
412 }
413
414 static int __must_check xen_allocate_irq_dynamic(void)
415 {
416         int first = 0;
417         int irq;
418
419 #ifdef CONFIG_X86_IO_APIC
420         /*
421          * For an HVM guest or domain 0 which see "real" (emulated or
422          * actual respectively) GSIs we allocate dynamic IRQs
423          * e.g. those corresponding to event channels or MSIs
424          * etc. from the range above those "real" GSIs to avoid
425          * collisions.
426          */
427         if (xen_initial_domain() || xen_hvm_domain())
428                 first = get_nr_irqs_gsi();
429 #endif
430
431         irq = irq_alloc_desc_from(first, -1);
432
433         xen_irq_init(irq);
434
435         return irq;
436 }
437
438 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
439 {
440         int irq;
441
442         /*
443          * A PV guest has no concept of a GSI (since it has no ACPI
444          * nor access to/knowledge of the physical APICs). Therefore
445          * all IRQs are dynamically allocated from the entire IRQ
446          * space.
447          */
448         if (xen_pv_domain() && !xen_initial_domain())
449                 return xen_allocate_irq_dynamic();
450
451         /* Legacy IRQ descriptors are already allocated by the arch. */
452         if (gsi < NR_IRQS_LEGACY)
453                 irq = gsi;
454         else
455                 irq = irq_alloc_desc_at(gsi, -1);
456
457         xen_irq_init(irq);
458
459         return irq;
460 }
461
462 static void xen_free_irq(unsigned irq)
463 {
464         struct irq_info *info = irq_get_handler_data(irq);
465
466         list_del(&info->list);
467
468         irq_set_handler_data(irq, NULL);
469
470         kfree(info);
471
472         /* Legacy IRQ descriptors are managed by the arch. */
473         if (irq < NR_IRQS_LEGACY)
474                 return;
475
476         irq_free_desc(irq);
477 }
478
479 static void pirq_unmask_notify(int irq)
480 {
481         struct physdev_eoi eoi = { .irq = pirq_from_irq(irq) };
482
483         if (unlikely(pirq_needs_eoi(irq))) {
484                 int rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
485                 WARN_ON(rc);
486         }
487 }
488
489 static void pirq_query_unmask(int irq)
490 {
491         struct physdev_irq_status_query irq_status;
492         struct irq_info *info = info_for_irq(irq);
493
494         BUG_ON(info->type != IRQT_PIRQ);
495
496         irq_status.irq = pirq_from_irq(irq);
497         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
498                 irq_status.flags = 0;
499
500         info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
501         if (irq_status.flags & XENIRQSTAT_needs_eoi)
502                 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
503 }
504
505 static bool probing_irq(int irq)
506 {
507         struct irq_desc *desc = irq_to_desc(irq);
508
509         return desc && desc->action == NULL;
510 }
511
512 static unsigned int __startup_pirq(unsigned int irq)
513 {
514         struct evtchn_bind_pirq bind_pirq;
515         struct irq_info *info = info_for_irq(irq);
516         int evtchn = evtchn_from_irq(irq);
517         int rc;
518
519         BUG_ON(info->type != IRQT_PIRQ);
520
521         if (VALID_EVTCHN(evtchn))
522                 goto out;
523
524         bind_pirq.pirq = pirq_from_irq(irq);
525         /* NB. We are happy to share unless we are probing. */
526         bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
527                                         BIND_PIRQ__WILL_SHARE : 0;
528         rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
529         if (rc != 0) {
530                 if (!probing_irq(irq))
531                         printk(KERN_INFO "Failed to obtain physical IRQ %d\n",
532                                irq);
533                 return 0;
534         }
535         evtchn = bind_pirq.port;
536
537         pirq_query_unmask(irq);
538
539         evtchn_to_irq[evtchn] = irq;
540         bind_evtchn_to_cpu(evtchn, 0);
541         info->evtchn = evtchn;
542
543 out:
544         unmask_evtchn(evtchn);
545         pirq_unmask_notify(irq);
546
547         return 0;
548 }
549
550 static unsigned int startup_pirq(struct irq_data *data)
551 {
552         return __startup_pirq(data->irq);
553 }
554
555 static void shutdown_pirq(struct irq_data *data)
556 {
557         struct evtchn_close close;
558         unsigned int irq = data->irq;
559         struct irq_info *info = info_for_irq(irq);
560         int evtchn = evtchn_from_irq(irq);
561
562         BUG_ON(info->type != IRQT_PIRQ);
563
564         if (!VALID_EVTCHN(evtchn))
565                 return;
566
567         mask_evtchn(evtchn);
568
569         close.port = evtchn;
570         if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
571                 BUG();
572
573         bind_evtchn_to_cpu(evtchn, 0);
574         evtchn_to_irq[evtchn] = -1;
575         info->evtchn = 0;
576 }
577
578 static void enable_pirq(struct irq_data *data)
579 {
580         startup_pirq(data);
581 }
582
583 static void disable_pirq(struct irq_data *data)
584 {
585 }
586
587 static void ack_pirq(struct irq_data *data)
588 {
589         int evtchn = evtchn_from_irq(data->irq);
590
591         irq_move_irq(data);
592
593         if (VALID_EVTCHN(evtchn)) {
594                 mask_evtchn(evtchn);
595                 clear_evtchn(evtchn);
596         }
597 }
598
599 static int find_irq_by_gsi(unsigned gsi)
600 {
601         struct irq_info *info;
602
603         list_for_each_entry(info, &xen_irq_list_head, list) {
604                 if (info->type != IRQT_PIRQ)
605                         continue;
606
607                 if (info->u.pirq.gsi == gsi)
608                         return info->irq;
609         }
610
611         return -1;
612 }
613
614 int xen_allocate_pirq_gsi(unsigned gsi)
615 {
616         return gsi;
617 }
618
619 /*
620  * Do not make any assumptions regarding the relationship between the
621  * IRQ number returned here and the Xen pirq argument.
622  *
623  * Note: We don't assign an event channel until the irq actually started
624  * up.  Return an existing irq if we've already got one for the gsi.
625  */
626 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
627                              unsigned pirq, int shareable, char *name)
628 {
629         int irq = -1;
630         struct physdev_irq irq_op;
631
632         spin_lock(&irq_mapping_update_lock);
633
634         irq = find_irq_by_gsi(gsi);
635         if (irq != -1) {
636                 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
637                        irq, gsi);
638                 goto out;       /* XXX need refcount? */
639         }
640
641         irq = xen_allocate_irq_gsi(gsi);
642         if (irq < 0)
643                 goto out;
644
645         irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
646                                       name);
647
648         irq_op.irq = irq;
649         irq_op.vector = 0;
650
651         /* Only the privileged domain can do this. For non-priv, the pcifront
652          * driver provides a PCI bus that does the call to do exactly
653          * this in the priv domain. */
654         if (xen_initial_domain() &&
655             HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
656                 xen_free_irq(irq);
657                 irq = -ENOSPC;
658                 goto out;
659         }
660
661         xen_irq_info_pirq_init(irq, 0, pirq, gsi, irq_op.vector, DOMID_SELF,
662                                shareable ? PIRQ_SHAREABLE : 0);
663
664 out:
665         spin_unlock(&irq_mapping_update_lock);
666
667         return irq;
668 }
669
670 #ifdef CONFIG_PCI_MSI
671 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
672 {
673         int rc;
674         struct physdev_get_free_pirq op_get_free_pirq;
675
676         op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
677         rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
678
679         WARN_ONCE(rc == -ENOSYS,
680                   "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
681
682         return rc ? -1 : op_get_free_pirq.pirq;
683 }
684
685 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
686                              int pirq, int vector, const char *name,
687                              domid_t domid)
688 {
689         int irq, ret;
690
691         spin_lock(&irq_mapping_update_lock);
692
693         irq = xen_allocate_irq_dynamic();
694         if (irq == -1)
695                 goto out;
696
697         irq_set_chip_and_handler_name(irq, &xen_pirq_chip, handle_level_irq,
698                                       name);
699
700         xen_irq_info_pirq_init(irq, 0, pirq, 0, vector, domid, 0);
701         ret = irq_set_msi_desc(irq, msidesc);
702         if (ret < 0)
703                 goto error_irq;
704 out:
705         spin_unlock(&irq_mapping_update_lock);
706         return irq;
707 error_irq:
708         spin_unlock(&irq_mapping_update_lock);
709         xen_free_irq(irq);
710         return -1;
711 }
712 #endif
713
714 int xen_destroy_irq(int irq)
715 {
716         struct irq_desc *desc;
717         struct physdev_unmap_pirq unmap_irq;
718         struct irq_info *info = info_for_irq(irq);
719         int rc = -ENOENT;
720
721         spin_lock(&irq_mapping_update_lock);
722
723         desc = irq_to_desc(irq);
724         if (!desc)
725                 goto out;
726
727         if (xen_initial_domain()) {
728                 unmap_irq.pirq = info->u.pirq.pirq;
729                 unmap_irq.domid = info->u.pirq.domid;
730                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
731                 /* If another domain quits without making the pci_disable_msix
732                  * call, the Xen hypervisor takes care of freeing the PIRQs
733                  * (free_domain_pirqs).
734                  */
735                 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
736                         printk(KERN_INFO "domain %d does not have %d anymore\n",
737                                 info->u.pirq.domid, info->u.pirq.pirq);
738                 else if (rc) {
739                         printk(KERN_WARNING "unmap irq failed %d\n", rc);
740                         goto out;
741                 }
742         }
743
744         xen_free_irq(irq);
745
746 out:
747         spin_unlock(&irq_mapping_update_lock);
748         return rc;
749 }
750
751 int xen_irq_from_pirq(unsigned pirq)
752 {
753         int irq;
754
755         struct irq_info *info;
756
757         spin_lock(&irq_mapping_update_lock);
758
759         list_for_each_entry(info, &xen_irq_list_head, list) {
760                 if (info == NULL || info->type != IRQT_PIRQ)
761                         continue;
762                 irq = info->irq;
763                 if (info->u.pirq.pirq == pirq)
764                         goto out;
765         }
766         irq = -1;
767 out:
768         spin_unlock(&irq_mapping_update_lock);
769
770         return irq;
771 }
772
773
774 int xen_pirq_from_irq(unsigned irq)
775 {
776         return pirq_from_irq(irq);
777 }
778 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
779 int bind_evtchn_to_irq(unsigned int evtchn)
780 {
781         int irq;
782
783         spin_lock(&irq_mapping_update_lock);
784
785         irq = evtchn_to_irq[evtchn];
786
787         if (irq == -1) {
788                 irq = xen_allocate_irq_dynamic();
789                 if (irq == -1)
790                         goto out;
791
792                 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
793                                               handle_fasteoi_irq, "event");
794
795                 xen_irq_info_evtchn_init(irq, evtchn);
796         }
797
798 out:
799         spin_unlock(&irq_mapping_update_lock);
800
801         return irq;
802 }
803 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
804
805 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
806 {
807         struct evtchn_bind_ipi bind_ipi;
808         int evtchn, irq;
809
810         spin_lock(&irq_mapping_update_lock);
811
812         irq = per_cpu(ipi_to_irq, cpu)[ipi];
813
814         if (irq == -1) {
815                 irq = xen_allocate_irq_dynamic();
816                 if (irq < 0)
817                         goto out;
818
819                 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
820                                               handle_percpu_irq, "ipi");
821
822                 bind_ipi.vcpu = cpu;
823                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
824                                                 &bind_ipi) != 0)
825                         BUG();
826                 evtchn = bind_ipi.port;
827
828                 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
829
830                 bind_evtchn_to_cpu(evtchn, cpu);
831         }
832
833  out:
834         spin_unlock(&irq_mapping_update_lock);
835         return irq;
836 }
837
838 static int bind_interdomain_evtchn_to_irq(unsigned int remote_domain,
839                                           unsigned int remote_port)
840 {
841         struct evtchn_bind_interdomain bind_interdomain;
842         int err;
843
844         bind_interdomain.remote_dom  = remote_domain;
845         bind_interdomain.remote_port = remote_port;
846
847         err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
848                                           &bind_interdomain);
849
850         return err ? : bind_evtchn_to_irq(bind_interdomain.local_port);
851 }
852
853
854 int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
855 {
856         struct evtchn_bind_virq bind_virq;
857         int evtchn, irq;
858
859         spin_lock(&irq_mapping_update_lock);
860
861         irq = per_cpu(virq_to_irq, cpu)[virq];
862
863         if (irq == -1) {
864                 irq = xen_allocate_irq_dynamic();
865                 if (irq == -1)
866                         goto out;
867
868                 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
869                                               handle_percpu_irq, "virq");
870
871                 bind_virq.virq = virq;
872                 bind_virq.vcpu = cpu;
873                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
874                                                 &bind_virq) != 0)
875                         BUG();
876                 evtchn = bind_virq.port;
877
878                 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
879
880                 bind_evtchn_to_cpu(evtchn, cpu);
881         }
882
883 out:
884         spin_unlock(&irq_mapping_update_lock);
885
886         return irq;
887 }
888
889 static void unbind_from_irq(unsigned int irq)
890 {
891         struct evtchn_close close;
892         int evtchn = evtchn_from_irq(irq);
893
894         spin_lock(&irq_mapping_update_lock);
895
896         if (VALID_EVTCHN(evtchn)) {
897                 close.port = evtchn;
898                 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
899                         BUG();
900
901                 switch (type_from_irq(irq)) {
902                 case IRQT_VIRQ:
903                         per_cpu(virq_to_irq, cpu_from_evtchn(evtchn))
904                                 [virq_from_irq(irq)] = -1;
905                         break;
906                 case IRQT_IPI:
907                         per_cpu(ipi_to_irq, cpu_from_evtchn(evtchn))
908                                 [ipi_from_irq(irq)] = -1;
909                         break;
910                 default:
911                         break;
912                 }
913
914                 /* Closed ports are implicitly re-bound to VCPU0. */
915                 bind_evtchn_to_cpu(evtchn, 0);
916
917                 evtchn_to_irq[evtchn] = -1;
918         }
919
920         BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
921
922         xen_free_irq(irq);
923
924         spin_unlock(&irq_mapping_update_lock);
925 }
926
927 int bind_evtchn_to_irqhandler(unsigned int evtchn,
928                               irq_handler_t handler,
929                               unsigned long irqflags,
930                               const char *devname, void *dev_id)
931 {
932         int irq, retval;
933
934         irq = bind_evtchn_to_irq(evtchn);
935         if (irq < 0)
936                 return irq;
937         retval = request_irq(irq, handler, irqflags, devname, dev_id);
938         if (retval != 0) {
939                 unbind_from_irq(irq);
940                 return retval;
941         }
942
943         return irq;
944 }
945 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
946
947 int bind_interdomain_evtchn_to_irqhandler(unsigned int remote_domain,
948                                           unsigned int remote_port,
949                                           irq_handler_t handler,
950                                           unsigned long irqflags,
951                                           const char *devname,
952                                           void *dev_id)
953 {
954         int irq, retval;
955
956         irq = bind_interdomain_evtchn_to_irq(remote_domain, remote_port);
957         if (irq < 0)
958                 return irq;
959
960         retval = request_irq(irq, handler, irqflags, devname, dev_id);
961         if (retval != 0) {
962                 unbind_from_irq(irq);
963                 return retval;
964         }
965
966         return irq;
967 }
968 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler);
969
970 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
971                             irq_handler_t handler,
972                             unsigned long irqflags, const char *devname, void *dev_id)
973 {
974         int irq, retval;
975
976         irq = bind_virq_to_irq(virq, cpu);
977         if (irq < 0)
978                 return irq;
979         retval = request_irq(irq, handler, irqflags, devname, dev_id);
980         if (retval != 0) {
981                 unbind_from_irq(irq);
982                 return retval;
983         }
984
985         return irq;
986 }
987 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
988
989 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
990                            unsigned int cpu,
991                            irq_handler_t handler,
992                            unsigned long irqflags,
993                            const char *devname,
994                            void *dev_id)
995 {
996         int irq, retval;
997
998         irq = bind_ipi_to_irq(ipi, cpu);
999         if (irq < 0)
1000                 return irq;
1001
1002         irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME;
1003         retval = request_irq(irq, handler, irqflags, devname, dev_id);
1004         if (retval != 0) {
1005                 unbind_from_irq(irq);
1006                 return retval;
1007         }
1008
1009         return irq;
1010 }
1011
1012 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1013 {
1014         free_irq(irq, dev_id);
1015         unbind_from_irq(irq);
1016 }
1017 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1018
1019 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1020 {
1021         int irq = per_cpu(ipi_to_irq, cpu)[vector];
1022         BUG_ON(irq < 0);
1023         notify_remote_via_irq(irq);
1024 }
1025
1026 irqreturn_t xen_debug_interrupt(int irq, void *dev_id)
1027 {
1028         struct shared_info *sh = HYPERVISOR_shared_info;
1029         int cpu = smp_processor_id();
1030         unsigned long *cpu_evtchn = per_cpu(cpu_evtchn_mask, cpu);
1031         int i;
1032         unsigned long flags;
1033         static DEFINE_SPINLOCK(debug_lock);
1034         struct vcpu_info *v;
1035
1036         spin_lock_irqsave(&debug_lock, flags);
1037
1038         printk("\nvcpu %d\n  ", cpu);
1039
1040         for_each_online_cpu(i) {
1041                 int pending;
1042                 v = per_cpu(xen_vcpu, i);
1043                 pending = (get_irq_regs() && i == cpu)
1044                         ? xen_irqs_disabled(get_irq_regs())
1045                         : v->evtchn_upcall_mask;
1046                 printk("%d: masked=%d pending=%d event_sel %0*lx\n  ", i,
1047                        pending, v->evtchn_upcall_pending,
1048                        (int)(sizeof(v->evtchn_pending_sel)*2),
1049                        v->evtchn_pending_sel);
1050         }
1051         v = per_cpu(xen_vcpu, cpu);
1052
1053         printk("\npending:\n   ");
1054         for (i = ARRAY_SIZE(sh->evtchn_pending)-1; i >= 0; i--)
1055                 printk("%0*lx%s", (int)sizeof(sh->evtchn_pending[0])*2,
1056                        sh->evtchn_pending[i],
1057                        i % 8 == 0 ? "\n   " : " ");
1058         printk("\nglobal mask:\n   ");
1059         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1060                 printk("%0*lx%s",
1061                        (int)(sizeof(sh->evtchn_mask[0])*2),
1062                        sh->evtchn_mask[i],
1063                        i % 8 == 0 ? "\n   " : " ");
1064
1065         printk("\nglobally unmasked:\n   ");
1066         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--)
1067                 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1068                        sh->evtchn_pending[i] & ~sh->evtchn_mask[i],
1069                        i % 8 == 0 ? "\n   " : " ");
1070
1071         printk("\nlocal cpu%d mask:\n   ", cpu);
1072         for (i = (NR_EVENT_CHANNELS/BITS_PER_LONG)-1; i >= 0; i--)
1073                 printk("%0*lx%s", (int)(sizeof(cpu_evtchn[0])*2),
1074                        cpu_evtchn[i],
1075                        i % 8 == 0 ? "\n   " : " ");
1076
1077         printk("\nlocally unmasked:\n   ");
1078         for (i = ARRAY_SIZE(sh->evtchn_mask)-1; i >= 0; i--) {
1079                 unsigned long pending = sh->evtchn_pending[i]
1080                         & ~sh->evtchn_mask[i]
1081                         & cpu_evtchn[i];
1082                 printk("%0*lx%s", (int)(sizeof(sh->evtchn_mask[0])*2),
1083                        pending, i % 8 == 0 ? "\n   " : " ");
1084         }
1085
1086         printk("\npending list:\n");
1087         for (i = 0; i < NR_EVENT_CHANNELS; i++) {
1088                 if (sync_test_bit(i, sh->evtchn_pending)) {
1089                         int word_idx = i / BITS_PER_LONG;
1090                         printk("  %d: event %d -> irq %d%s%s%s\n",
1091                                cpu_from_evtchn(i), i,
1092                                evtchn_to_irq[i],
1093                                sync_test_bit(word_idx, &v->evtchn_pending_sel)
1094                                              ? "" : " l2-clear",
1095                                !sync_test_bit(i, sh->evtchn_mask)
1096                                              ? "" : " globally-masked",
1097                                sync_test_bit(i, cpu_evtchn)
1098                                              ? "" : " locally-masked");
1099                 }
1100         }
1101
1102         spin_unlock_irqrestore(&debug_lock, flags);
1103
1104         return IRQ_HANDLED;
1105 }
1106
1107 static DEFINE_PER_CPU(unsigned, xed_nesting_count);
1108 static DEFINE_PER_CPU(unsigned int, current_word_idx);
1109 static DEFINE_PER_CPU(unsigned int, current_bit_idx);
1110
1111 /*
1112  * Mask out the i least significant bits of w
1113  */
1114 #define MASK_LSBS(w, i) (w & ((~0UL) << i))
1115
1116 /*
1117  * Search the CPUs pending events bitmasks.  For each one found, map
1118  * the event number to an irq, and feed it into do_IRQ() for
1119  * handling.
1120  *
1121  * Xen uses a two-level bitmap to speed searching.  The first level is
1122  * a bitset of words which contain pending event bits.  The second
1123  * level is a bitset of pending events themselves.
1124  */
1125 static void __xen_evtchn_do_upcall(void)
1126 {
1127         int start_word_idx, start_bit_idx;
1128         int word_idx, bit_idx;
1129         int i;
1130         int cpu = get_cpu();
1131         struct shared_info *s = HYPERVISOR_shared_info;
1132         struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1133         unsigned count;
1134
1135         do {
1136                 unsigned long pending_words;
1137
1138                 vcpu_info->evtchn_upcall_pending = 0;
1139
1140                 if (__this_cpu_inc_return(xed_nesting_count) - 1)
1141                         goto out;
1142
1143 #ifndef CONFIG_X86 /* No need for a barrier -- XCHG is a barrier on x86. */
1144                 /* Clear master flag /before/ clearing selector flag. */
1145                 wmb();
1146 #endif
1147                 pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0);
1148
1149                 start_word_idx = __this_cpu_read(current_word_idx);
1150                 start_bit_idx = __this_cpu_read(current_bit_idx);
1151
1152                 word_idx = start_word_idx;
1153
1154                 for (i = 0; pending_words != 0; i++) {
1155                         unsigned long pending_bits;
1156                         unsigned long words;
1157
1158                         words = MASK_LSBS(pending_words, word_idx);
1159
1160                         /*
1161                          * If we masked out all events, wrap to beginning.
1162                          */
1163                         if (words == 0) {
1164                                 word_idx = 0;
1165                                 bit_idx = 0;
1166                                 continue;
1167                         }
1168                         word_idx = __ffs(words);
1169
1170                         pending_bits = active_evtchns(cpu, s, word_idx);
1171                         bit_idx = 0; /* usually scan entire word from start */
1172                         if (word_idx == start_word_idx) {
1173                                 /* We scan the starting word in two parts */
1174                                 if (i == 0)
1175                                         /* 1st time: start in the middle */
1176                                         bit_idx = start_bit_idx;
1177                                 else
1178                                         /* 2nd time: mask bits done already */
1179                                         bit_idx &= (1UL << start_bit_idx) - 1;
1180                         }
1181
1182                         do {
1183                                 unsigned long bits;
1184                                 int port, irq;
1185                                 struct irq_desc *desc;
1186
1187                                 bits = MASK_LSBS(pending_bits, bit_idx);
1188
1189                                 /* If we masked out all events, move on. */
1190                                 if (bits == 0)
1191                                         break;
1192
1193                                 bit_idx = __ffs(bits);
1194
1195                                 /* Process port. */
1196                                 port = (word_idx * BITS_PER_LONG) + bit_idx;
1197                                 irq = evtchn_to_irq[port];
1198
1199                                 mask_evtchn(port);
1200                                 clear_evtchn(port);
1201
1202                                 if (irq != -1) {
1203                                         desc = irq_to_desc(irq);
1204                                         if (desc)
1205                                                 generic_handle_irq_desc(irq, desc);
1206                                 }
1207
1208                                 bit_idx = (bit_idx + 1) % BITS_PER_LONG;
1209
1210                                 /* Next caller starts at last processed + 1 */
1211                                 __this_cpu_write(current_word_idx,
1212                                                  bit_idx ? word_idx :
1213                                                  (word_idx+1) % BITS_PER_LONG);
1214                                 __this_cpu_write(current_bit_idx, bit_idx);
1215                         } while (bit_idx != 0);
1216
1217                         /* Scan start_l1i twice; all others once. */
1218                         if ((word_idx != start_word_idx) || (i != 0))
1219                                 pending_words &= ~(1UL << word_idx);
1220
1221                         word_idx = (word_idx + 1) % BITS_PER_LONG;
1222                 }
1223
1224                 BUG_ON(!irqs_disabled());
1225
1226                 count = __this_cpu_read(xed_nesting_count);
1227                 __this_cpu_write(xed_nesting_count, 0);
1228         } while (count != 1 || vcpu_info->evtchn_upcall_pending);
1229
1230 out:
1231
1232         put_cpu();
1233 }
1234
1235 void xen_evtchn_do_upcall(struct pt_regs *regs)
1236 {
1237         struct pt_regs *old_regs = set_irq_regs(regs);
1238
1239         exit_idle();
1240         irq_enter();
1241
1242         __xen_evtchn_do_upcall();
1243
1244         irq_exit();
1245         set_irq_regs(old_regs);
1246 }
1247
1248 void xen_hvm_evtchn_do_upcall(void)
1249 {
1250         __xen_evtchn_do_upcall();
1251 }
1252 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1253
1254 /* Rebind a new event channel to an existing irq. */
1255 void rebind_evtchn_irq(int evtchn, int irq)
1256 {
1257         struct irq_info *info = info_for_irq(irq);
1258
1259         /* Make sure the irq is masked, since the new event channel
1260            will also be masked. */
1261         disable_irq(irq);
1262
1263         spin_lock(&irq_mapping_update_lock);
1264
1265         /* After resume the irq<->evtchn mappings are all cleared out */
1266         BUG_ON(evtchn_to_irq[evtchn] != -1);
1267         /* Expect irq to have been bound before,
1268            so there should be a proper type */
1269         BUG_ON(info->type == IRQT_UNBOUND);
1270
1271         xen_irq_info_evtchn_init(irq, evtchn);
1272
1273         spin_unlock(&irq_mapping_update_lock);
1274
1275         /* new event channels are always bound to cpu 0 */
1276         irq_set_affinity(irq, cpumask_of(0));
1277
1278         /* Unmask the event channel. */
1279         enable_irq(irq);
1280 }
1281
1282 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1283 static int rebind_irq_to_cpu(unsigned irq, unsigned tcpu)
1284 {
1285         struct evtchn_bind_vcpu bind_vcpu;
1286         int evtchn = evtchn_from_irq(irq);
1287
1288         if (!VALID_EVTCHN(evtchn))
1289                 return -1;
1290
1291         /*
1292          * Events delivered via platform PCI interrupts are always
1293          * routed to vcpu 0 and hence cannot be rebound.
1294          */
1295         if (xen_hvm_domain() && !xen_have_vector_callback)
1296                 return -1;
1297
1298         /* Send future instances of this interrupt to other vcpu. */
1299         bind_vcpu.port = evtchn;
1300         bind_vcpu.vcpu = tcpu;
1301
1302         /*
1303          * If this fails, it usually just indicates that we're dealing with a
1304          * virq or IPI channel, which don't actually need to be rebound. Ignore
1305          * it, but don't do the xenlinux-level rebind in that case.
1306          */
1307         if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1308                 bind_evtchn_to_cpu(evtchn, tcpu);
1309
1310         return 0;
1311 }
1312
1313 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1314                             bool force)
1315 {
1316         unsigned tcpu = cpumask_first(dest);
1317
1318         return rebind_irq_to_cpu(data->irq, tcpu);
1319 }
1320
1321 int resend_irq_on_evtchn(unsigned int irq)
1322 {
1323         int masked, evtchn = evtchn_from_irq(irq);
1324         struct shared_info *s = HYPERVISOR_shared_info;
1325
1326         if (!VALID_EVTCHN(evtchn))
1327                 return 1;
1328
1329         masked = sync_test_and_set_bit(evtchn, s->evtchn_mask);
1330         sync_set_bit(evtchn, s->evtchn_pending);
1331         if (!masked)
1332                 unmask_evtchn(evtchn);
1333
1334         return 1;
1335 }
1336
1337 static void enable_dynirq(struct irq_data *data)
1338 {
1339         int evtchn = evtchn_from_irq(data->irq);
1340
1341         if (VALID_EVTCHN(evtchn))
1342                 unmask_evtchn(evtchn);
1343 }
1344
1345 static void disable_dynirq(struct irq_data *data)
1346 {
1347         int evtchn = evtchn_from_irq(data->irq);
1348
1349         if (VALID_EVTCHN(evtchn))
1350                 mask_evtchn(evtchn);
1351 }
1352
1353 static void ack_dynirq(struct irq_data *data)
1354 {
1355         int evtchn = evtchn_from_irq(data->irq);
1356
1357         irq_move_masked_irq(data);
1358
1359         if (VALID_EVTCHN(evtchn))
1360                 unmask_evtchn(evtchn);
1361 }
1362
1363 static int retrigger_dynirq(struct irq_data *data)
1364 {
1365         int evtchn = evtchn_from_irq(data->irq);
1366         struct shared_info *sh = HYPERVISOR_shared_info;
1367         int ret = 0;
1368
1369         if (VALID_EVTCHN(evtchn)) {
1370                 int masked;
1371
1372                 masked = sync_test_and_set_bit(evtchn, sh->evtchn_mask);
1373                 sync_set_bit(evtchn, sh->evtchn_pending);
1374                 if (!masked)
1375                         unmask_evtchn(evtchn);
1376                 ret = 1;
1377         }
1378
1379         return ret;
1380 }
1381
1382 static void restore_pirqs(void)
1383 {
1384         int pirq, rc, irq, gsi;
1385         struct physdev_map_pirq map_irq;
1386         struct irq_info *info;
1387
1388         list_for_each_entry(info, &xen_irq_list_head, list) {
1389                 if (info->type != IRQT_PIRQ)
1390                         continue;
1391
1392                 pirq = info->u.pirq.pirq;
1393                 gsi = info->u.pirq.gsi;
1394                 irq = info->irq;
1395
1396                 /* save/restore of PT devices doesn't work, so at this point the
1397                  * only devices present are GSI based emulated devices */
1398                 if (!gsi)
1399                         continue;
1400
1401                 map_irq.domid = DOMID_SELF;
1402                 map_irq.type = MAP_PIRQ_TYPE_GSI;
1403                 map_irq.index = gsi;
1404                 map_irq.pirq = pirq;
1405
1406                 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1407                 if (rc) {
1408                         printk(KERN_WARNING "xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1409                                         gsi, irq, pirq, rc);
1410                         xen_free_irq(irq);
1411                         continue;
1412                 }
1413
1414                 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1415
1416                 __startup_pirq(irq);
1417         }
1418 }
1419
1420 static void restore_cpu_virqs(unsigned int cpu)
1421 {
1422         struct evtchn_bind_virq bind_virq;
1423         int virq, irq, evtchn;
1424
1425         for (virq = 0; virq < NR_VIRQS; virq++) {
1426                 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1427                         continue;
1428
1429                 BUG_ON(virq_from_irq(irq) != virq);
1430
1431                 /* Get a new binding from Xen. */
1432                 bind_virq.virq = virq;
1433                 bind_virq.vcpu = cpu;
1434                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1435                                                 &bind_virq) != 0)
1436                         BUG();
1437                 evtchn = bind_virq.port;
1438
1439                 /* Record the new mapping. */
1440                 xen_irq_info_virq_init(cpu, irq, evtchn, virq);
1441                 bind_evtchn_to_cpu(evtchn, cpu);
1442         }
1443 }
1444
1445 static void restore_cpu_ipis(unsigned int cpu)
1446 {
1447         struct evtchn_bind_ipi bind_ipi;
1448         int ipi, irq, evtchn;
1449
1450         for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1451                 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1452                         continue;
1453
1454                 BUG_ON(ipi_from_irq(irq) != ipi);
1455
1456                 /* Get a new binding from Xen. */
1457                 bind_ipi.vcpu = cpu;
1458                 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1459                                                 &bind_ipi) != 0)
1460                         BUG();
1461                 evtchn = bind_ipi.port;
1462
1463                 /* Record the new mapping. */
1464                 xen_irq_info_ipi_init(cpu, irq, evtchn, ipi);
1465                 bind_evtchn_to_cpu(evtchn, cpu);
1466         }
1467 }
1468
1469 /* Clear an irq's pending state, in preparation for polling on it */
1470 void xen_clear_irq_pending(int irq)
1471 {
1472         int evtchn = evtchn_from_irq(irq);
1473
1474         if (VALID_EVTCHN(evtchn))
1475                 clear_evtchn(evtchn);
1476 }
1477 EXPORT_SYMBOL(xen_clear_irq_pending);
1478 void xen_set_irq_pending(int irq)
1479 {
1480         int evtchn = evtchn_from_irq(irq);
1481
1482         if (VALID_EVTCHN(evtchn))
1483                 set_evtchn(evtchn);
1484 }
1485
1486 bool xen_test_irq_pending(int irq)
1487 {
1488         int evtchn = evtchn_from_irq(irq);
1489         bool ret = false;
1490
1491         if (VALID_EVTCHN(evtchn))
1492                 ret = test_evtchn(evtchn);
1493
1494         return ret;
1495 }
1496
1497 /* Poll waiting for an irq to become pending with timeout.  In the usual case,
1498  * the irq will be disabled so it won't deliver an interrupt. */
1499 void xen_poll_irq_timeout(int irq, u64 timeout)
1500 {
1501         evtchn_port_t evtchn = evtchn_from_irq(irq);
1502
1503         if (VALID_EVTCHN(evtchn)) {
1504                 struct sched_poll poll;
1505
1506                 poll.nr_ports = 1;
1507                 poll.timeout = timeout;
1508                 set_xen_guest_handle(poll.ports, &evtchn);
1509
1510                 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
1511                         BUG();
1512         }
1513 }
1514 EXPORT_SYMBOL(xen_poll_irq_timeout);
1515 /* Poll waiting for an irq to become pending.  In the usual case, the
1516  * irq will be disabled so it won't deliver an interrupt. */
1517 void xen_poll_irq(int irq)
1518 {
1519         xen_poll_irq_timeout(irq, 0 /* no timeout */);
1520 }
1521
1522 /* Check whether the IRQ line is shared with other guests. */
1523 int xen_test_irq_shared(int irq)
1524 {
1525         struct irq_info *info = info_for_irq(irq);
1526         struct physdev_irq_status_query irq_status = { .irq = info->u.pirq.pirq };
1527
1528         if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
1529                 return 0;
1530         return !(irq_status.flags & XENIRQSTAT_shared);
1531 }
1532 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
1533
1534 void xen_irq_resume(void)
1535 {
1536         unsigned int cpu, evtchn;
1537         struct irq_info *info;
1538
1539         init_evtchn_cpu_bindings();
1540
1541         /* New event-channel space is not 'live' yet. */
1542         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1543                 mask_evtchn(evtchn);
1544
1545         /* No IRQ <-> event-channel mappings. */
1546         list_for_each_entry(info, &xen_irq_list_head, list)
1547                 info->evtchn = 0; /* zap event-channel binding */
1548
1549         for (evtchn = 0; evtchn < NR_EVENT_CHANNELS; evtchn++)
1550                 evtchn_to_irq[evtchn] = -1;
1551
1552         for_each_possible_cpu(cpu) {
1553                 restore_cpu_virqs(cpu);
1554                 restore_cpu_ipis(cpu);
1555         }
1556
1557         restore_pirqs();
1558 }
1559
1560 static struct irq_chip xen_dynamic_chip __read_mostly = {
1561         .name                   = "xen-dyn",
1562
1563         .irq_disable            = disable_dynirq,
1564         .irq_mask               = disable_dynirq,
1565         .irq_unmask             = enable_dynirq,
1566
1567         .irq_eoi                = ack_dynirq,
1568         .irq_set_affinity       = set_affinity_irq,
1569         .irq_retrigger          = retrigger_dynirq,
1570 };
1571
1572 static struct irq_chip xen_pirq_chip __read_mostly = {
1573         .name                   = "xen-pirq",
1574
1575         .irq_startup            = startup_pirq,
1576         .irq_shutdown           = shutdown_pirq,
1577
1578         .irq_enable             = enable_pirq,
1579         .irq_unmask             = enable_pirq,
1580
1581         .irq_disable            = disable_pirq,
1582         .irq_mask               = disable_pirq,
1583
1584         .irq_ack                = ack_pirq,
1585
1586         .irq_set_affinity       = set_affinity_irq,
1587
1588         .irq_retrigger          = retrigger_dynirq,
1589 };
1590
1591 static struct irq_chip xen_percpu_chip __read_mostly = {
1592         .name                   = "xen-percpu",
1593
1594         .irq_disable            = disable_dynirq,
1595         .irq_mask               = disable_dynirq,
1596         .irq_unmask             = enable_dynirq,
1597
1598         .irq_ack                = ack_dynirq,
1599 };
1600
1601 int xen_set_callback_via(uint64_t via)
1602 {
1603         struct xen_hvm_param a;
1604         a.domid = DOMID_SELF;
1605         a.index = HVM_PARAM_CALLBACK_IRQ;
1606         a.value = via;
1607         return HYPERVISOR_hvm_op(HVMOP_set_param, &a);
1608 }
1609 EXPORT_SYMBOL_GPL(xen_set_callback_via);
1610
1611 #ifdef CONFIG_XEN_PVHVM
1612 /* Vector callbacks are better than PCI interrupts to receive event
1613  * channel notifications because we can receive vector callbacks on any
1614  * vcpu and we don't need PCI support or APIC interactions. */
1615 void xen_callback_vector(void)
1616 {
1617         int rc;
1618         uint64_t callback_via;
1619         if (xen_have_vector_callback) {
1620                 callback_via = HVM_CALLBACK_VECTOR(XEN_HVM_EVTCHN_CALLBACK);
1621                 rc = xen_set_callback_via(callback_via);
1622                 if (rc) {
1623                         printk(KERN_ERR "Request for Xen HVM callback vector"
1624                                         " failed.\n");
1625                         xen_have_vector_callback = 0;
1626                         return;
1627                 }
1628                 printk(KERN_INFO "Xen HVM callback vector for event delivery is "
1629                                 "enabled\n");
1630                 /* in the restore case the vector has already been allocated */
1631                 if (!test_bit(XEN_HVM_EVTCHN_CALLBACK, used_vectors))
1632                         alloc_intr_gate(XEN_HVM_EVTCHN_CALLBACK, xen_hvm_callback_vector);
1633         }
1634 }
1635 #else
1636 void xen_callback_vector(void) {}
1637 #endif
1638
1639 void __init xen_init_IRQ(void)
1640 {
1641         int i;
1642
1643         evtchn_to_irq = kcalloc(NR_EVENT_CHANNELS, sizeof(*evtchn_to_irq),
1644                                     GFP_KERNEL);
1645         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1646                 evtchn_to_irq[i] = -1;
1647
1648         init_evtchn_cpu_bindings();
1649
1650         /* No event channels are 'live' right now. */
1651         for (i = 0; i < NR_EVENT_CHANNELS; i++)
1652                 mask_evtchn(i);
1653
1654         if (xen_hvm_domain()) {
1655                 xen_callback_vector();
1656                 native_init_IRQ();
1657                 /* pci_xen_hvm_init must be called after native_init_IRQ so that
1658                  * __acpi_register_gsi can point at the right function */
1659                 pci_xen_hvm_init();
1660         } else {
1661                 irq_ctx_init(smp_processor_id());
1662                 if (xen_initial_domain())
1663                         xen_setup_pirqs();
1664         }
1665 }