1 // SPDX-License-Identifier: GPL-2.0-only
5 * Xen models interrupts with abstract event channels. Because each
6 * domain gets 1024 event channels, but NR_IRQ is not that large, we
7 * must dynamically map irqs<->event channels. The event channels
8 * interface with the rest of the kernel by defining a xen interrupt
9 * chip. When an event is received, it is mapped to an irq and sent
10 * through the normal interrupt processing path.
12 * There are four kinds of events which can be mapped to an event
15 * 1. Inter-domain notifications. This includes all the virtual
16 * device events, since they're driven by front-ends in another domain
18 * 2. VIRQs, typically used for timers. These are per-cpu events.
20 * 4. PIRQs - Hardware interrupts.
22 * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007
25 #define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
27 #include <linux/linkage.h>
28 #include <linux/interrupt.h>
29 #include <linux/irq.h>
30 #include <linux/moduleparam.h>
31 #include <linux/string.h>
32 #include <linux/memblock.h>
33 #include <linux/slab.h>
34 #include <linux/irqnr.h>
35 #include <linux/pci.h>
36 #include <linux/spinlock.h>
37 #include <linux/cpuhotplug.h>
38 #include <linux/atomic.h>
39 #include <linux/ktime.h>
43 #include <asm/ptrace.h>
44 #include <asm/idtentry.h>
46 #include <asm/io_apic.h>
47 #include <asm/i8259.h>
48 #include <asm/xen/pci.h>
50 #include <asm/sync_bitops.h>
51 #include <asm/xen/hypercall.h>
52 #include <asm/xen/hypervisor.h>
57 #include <xen/xen-ops.h>
58 #include <xen/events.h>
59 #include <xen/interface/xen.h>
60 #include <xen/interface/event_channel.h>
61 #include <xen/interface/hvm/hvm_op.h>
62 #include <xen/interface/hvm/params.h>
63 #include <xen/interface/physdev.h>
64 #include <xen/interface/sched.h>
65 #include <xen/interface/vcpu.h>
66 #include <xen/xenbus.h>
67 #include <asm/hw_irq.h>
69 #include "events_internal.h"
71 #undef MODULE_PARAM_PREFIX
72 #define MODULE_PARAM_PREFIX "xen."
74 /* Interrupt types. */
84 * Packed IRQ information:
85 * type - enum xen_irq_type
86 * event channel - irq->event channel mapping
87 * cpu - cpu this event channel is bound to
88 * index - type-specific information:
89 * PIRQ - vector, with MSB being "needs EIO", or physical IRQ of the HVM
90 * guest, or GSI (real passthrough IRQ) of the device.
96 struct list_head list;
97 struct list_head eoi_list;
101 short type; /* type: IRQT_* */
102 u8 mask_reason; /* Why is event channel masked */
103 #define EVT_MASK_REASON_EXPLICIT 0x01
104 #define EVT_MASK_REASON_TEMPORARY 0x02
105 #define EVT_MASK_REASON_EOI_PENDING 0x04
106 u8 is_active; /* Is event just being handled? */
108 evtchn_port_t evtchn; /* event channel */
109 unsigned short cpu; /* cpu bound */
110 unsigned short eoi_cpu; /* EOI must happen on this cpu-1 */
111 unsigned int irq_epoch; /* If eoi_cpu valid: irq_epoch of event */
112 u64 eoi_time; /* Time in jiffies when to EOI. */
121 unsigned char vector;
125 struct xenbus_device *interdomain;
129 #define PIRQ_NEEDS_EOI (1 << 0)
130 #define PIRQ_SHAREABLE (1 << 1)
131 #define PIRQ_MSI_GROUP (1 << 2)
133 static uint __read_mostly event_loop_timeout = 2;
134 module_param(event_loop_timeout, uint, 0644);
136 static uint __read_mostly event_eoi_delay = 10;
137 module_param(event_eoi_delay, uint, 0644);
139 const struct evtchn_ops *evtchn_ops;
142 * This lock protects updates to the following mapping and reference-count
143 * arrays. The lock does not need to be acquired to read the mapping tables.
145 static DEFINE_MUTEX(irq_mapping_update_lock);
148 * Lock protecting event handling loop against removing event channels.
149 * Adding of event channels is no issue as the associated IRQ becomes active
150 * only after everything is setup (before request_[threaded_]irq() the handler
151 * can't be entered for an event, as the event channel will be unmasked only
154 static DEFINE_RWLOCK(evtchn_rwlock);
159 * irq_mapping_update_lock
162 * percpu eoi_list_lock
166 static LIST_HEAD(xen_irq_list_head);
168 /* IRQ <-> VIRQ mapping. */
169 static DEFINE_PER_CPU(int [NR_VIRQS], virq_to_irq) = {[0 ... NR_VIRQS-1] = -1};
171 /* IRQ <-> IPI mapping */
172 static DEFINE_PER_CPU(int [XEN_NR_IPIS], ipi_to_irq) = {[0 ... XEN_NR_IPIS-1] = -1};
174 /* Event channel distribution data */
175 static atomic_t channels_on_cpu[NR_CPUS];
177 static int **evtchn_to_irq;
179 static unsigned long *pirq_eoi_map;
181 static bool (*pirq_needs_eoi)(unsigned irq);
183 #define EVTCHN_ROW(e) (e / (PAGE_SIZE/sizeof(**evtchn_to_irq)))
184 #define EVTCHN_COL(e) (e % (PAGE_SIZE/sizeof(**evtchn_to_irq)))
185 #define EVTCHN_PER_ROW (PAGE_SIZE / sizeof(**evtchn_to_irq))
187 /* Xen will never allocate port zero for any purpose. */
188 #define VALID_EVTCHN(chn) ((chn) != 0)
190 static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY];
192 static struct irq_chip xen_dynamic_chip;
193 static struct irq_chip xen_lateeoi_chip;
194 static struct irq_chip xen_percpu_chip;
195 static struct irq_chip xen_pirq_chip;
196 static void enable_dynirq(struct irq_data *data);
197 static void disable_dynirq(struct irq_data *data);
199 static DEFINE_PER_CPU(unsigned int, irq_epoch);
201 static void clear_evtchn_to_irq_row(unsigned row)
205 for (col = 0; col < EVTCHN_PER_ROW; col++)
206 WRITE_ONCE(evtchn_to_irq[row][col], -1);
209 static void clear_evtchn_to_irq_all(void)
213 for (row = 0; row < EVTCHN_ROW(xen_evtchn_max_channels()); row++) {
214 if (evtchn_to_irq[row] == NULL)
216 clear_evtchn_to_irq_row(row);
220 static int set_evtchn_to_irq(evtchn_port_t evtchn, unsigned int irq)
225 if (evtchn >= xen_evtchn_max_channels())
228 row = EVTCHN_ROW(evtchn);
229 col = EVTCHN_COL(evtchn);
231 if (evtchn_to_irq[row] == NULL) {
232 /* Unallocated irq entries return -1 anyway */
236 evtchn_to_irq[row] = (int *)get_zeroed_page(GFP_KERNEL);
237 if (evtchn_to_irq[row] == NULL)
240 clear_evtchn_to_irq_row(row);
243 WRITE_ONCE(evtchn_to_irq[row][col], irq);
247 int get_evtchn_to_irq(evtchn_port_t evtchn)
249 if (evtchn >= xen_evtchn_max_channels())
251 if (evtchn_to_irq[EVTCHN_ROW(evtchn)] == NULL)
253 return READ_ONCE(evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)]);
256 /* Get info for IRQ */
257 static struct irq_info *info_for_irq(unsigned irq)
259 if (irq < nr_legacy_irqs())
260 return legacy_info_ptrs[irq];
262 return irq_get_chip_data(irq);
265 static void set_info_for_irq(unsigned int irq, struct irq_info *info)
267 if (irq < nr_legacy_irqs())
268 legacy_info_ptrs[irq] = info;
270 irq_set_chip_data(irq, info);
273 /* Per CPU channel accounting */
274 static void channels_on_cpu_dec(struct irq_info *info)
276 if (!info->is_accounted)
279 info->is_accounted = 0;
281 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
284 WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], -1 , 0));
287 static void channels_on_cpu_inc(struct irq_info *info)
289 if (WARN_ON_ONCE(info->cpu >= nr_cpu_ids))
292 if (WARN_ON_ONCE(!atomic_add_unless(&channels_on_cpu[info->cpu], 1,
296 info->is_accounted = 1;
299 /* Constructors for packed IRQ information. */
300 static int xen_irq_info_common_setup(struct irq_info *info,
302 enum xen_irq_type type,
303 evtchn_port_t evtchn,
308 BUG_ON(info->type != IRQT_UNBOUND && info->type != type);
312 info->evtchn = evtchn;
314 info->mask_reason = EVT_MASK_REASON_EXPLICIT;
315 spin_lock_init(&info->lock);
317 ret = set_evtchn_to_irq(evtchn, irq);
321 irq_clear_status_flags(irq, IRQ_NOREQUEST|IRQ_NOAUTOEN);
323 return xen_evtchn_port_setup(evtchn);
326 static int xen_irq_info_evtchn_setup(unsigned irq,
327 evtchn_port_t evtchn,
328 struct xenbus_device *dev)
330 struct irq_info *info = info_for_irq(irq);
333 ret = xen_irq_info_common_setup(info, irq, IRQT_EVTCHN, evtchn, 0);
334 info->u.interdomain = dev;
336 atomic_inc(&dev->event_channels);
341 static int xen_irq_info_ipi_setup(unsigned cpu,
343 evtchn_port_t evtchn,
346 struct irq_info *info = info_for_irq(irq);
350 per_cpu(ipi_to_irq, cpu)[ipi] = irq;
352 return xen_irq_info_common_setup(info, irq, IRQT_IPI, evtchn, 0);
355 static int xen_irq_info_virq_setup(unsigned cpu,
357 evtchn_port_t evtchn,
360 struct irq_info *info = info_for_irq(irq);
364 per_cpu(virq_to_irq, cpu)[virq] = irq;
366 return xen_irq_info_common_setup(info, irq, IRQT_VIRQ, evtchn, 0);
369 static int xen_irq_info_pirq_setup(unsigned irq,
370 evtchn_port_t evtchn,
376 struct irq_info *info = info_for_irq(irq);
378 info->u.pirq.pirq = pirq;
379 info->u.pirq.gsi = gsi;
380 info->u.pirq.domid = domid;
381 info->u.pirq.flags = flags;
383 return xen_irq_info_common_setup(info, irq, IRQT_PIRQ, evtchn, 0);
386 static void xen_irq_info_cleanup(struct irq_info *info)
388 set_evtchn_to_irq(info->evtchn, -1);
389 xen_evtchn_port_remove(info->evtchn, info->cpu);
391 channels_on_cpu_dec(info);
395 * Accessors for packed IRQ information.
397 evtchn_port_t evtchn_from_irq(unsigned irq)
399 const struct irq_info *info = NULL;
401 if (likely(irq < nr_irqs))
402 info = info_for_irq(irq);
409 unsigned int irq_from_evtchn(evtchn_port_t evtchn)
411 return get_evtchn_to_irq(evtchn);
413 EXPORT_SYMBOL_GPL(irq_from_evtchn);
415 int irq_from_virq(unsigned int cpu, unsigned int virq)
417 return per_cpu(virq_to_irq, cpu)[virq];
420 static enum ipi_vector ipi_from_irq(unsigned irq)
422 struct irq_info *info = info_for_irq(irq);
424 BUG_ON(info == NULL);
425 BUG_ON(info->type != IRQT_IPI);
430 static unsigned virq_from_irq(unsigned irq)
432 struct irq_info *info = info_for_irq(irq);
434 BUG_ON(info == NULL);
435 BUG_ON(info->type != IRQT_VIRQ);
440 static unsigned pirq_from_irq(unsigned irq)
442 struct irq_info *info = info_for_irq(irq);
444 BUG_ON(info == NULL);
445 BUG_ON(info->type != IRQT_PIRQ);
447 return info->u.pirq.pirq;
450 static enum xen_irq_type type_from_irq(unsigned irq)
452 return info_for_irq(irq)->type;
455 static unsigned cpu_from_irq(unsigned irq)
457 return info_for_irq(irq)->cpu;
460 unsigned int cpu_from_evtchn(evtchn_port_t evtchn)
462 int irq = get_evtchn_to_irq(evtchn);
466 ret = cpu_from_irq(irq);
471 static void do_mask(struct irq_info *info, u8 reason)
475 spin_lock_irqsave(&info->lock, flags);
477 if (!info->mask_reason)
478 mask_evtchn(info->evtchn);
480 info->mask_reason |= reason;
482 spin_unlock_irqrestore(&info->lock, flags);
485 static void do_unmask(struct irq_info *info, u8 reason)
489 spin_lock_irqsave(&info->lock, flags);
491 info->mask_reason &= ~reason;
493 if (!info->mask_reason)
494 unmask_evtchn(info->evtchn);
496 spin_unlock_irqrestore(&info->lock, flags);
500 static bool pirq_check_eoi_map(unsigned irq)
502 return test_bit(pirq_from_irq(irq), pirq_eoi_map);
506 static bool pirq_needs_eoi_flag(unsigned irq)
508 struct irq_info *info = info_for_irq(irq);
509 BUG_ON(info->type != IRQT_PIRQ);
511 return info->u.pirq.flags & PIRQ_NEEDS_EOI;
514 static void bind_evtchn_to_cpu(evtchn_port_t evtchn, unsigned int cpu,
517 int irq = get_evtchn_to_irq(evtchn);
518 struct irq_info *info = info_for_irq(irq);
522 if (IS_ENABLED(CONFIG_SMP) && force_affinity) {
523 cpumask_copy(irq_get_affinity_mask(irq), cpumask_of(cpu));
524 cpumask_copy(irq_get_effective_affinity_mask(irq),
528 xen_evtchn_port_bind_to_cpu(evtchn, cpu, info->cpu);
530 channels_on_cpu_dec(info);
532 channels_on_cpu_inc(info);
536 * notify_remote_via_irq - send event to remote end of event channel via irq
537 * @irq: irq of event channel to send event to
539 * Unlike notify_remote_via_evtchn(), this is safe to use across
540 * save/restore. Notifications on a broken connection are silently
543 void notify_remote_via_irq(int irq)
545 evtchn_port_t evtchn = evtchn_from_irq(irq);
547 if (VALID_EVTCHN(evtchn))
548 notify_remote_via_evtchn(evtchn);
550 EXPORT_SYMBOL_GPL(notify_remote_via_irq);
552 struct lateeoi_work {
553 struct delayed_work delayed;
554 spinlock_t eoi_list_lock;
555 struct list_head eoi_list;
558 static DEFINE_PER_CPU(struct lateeoi_work, lateeoi);
560 static void lateeoi_list_del(struct irq_info *info)
562 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
565 spin_lock_irqsave(&eoi->eoi_list_lock, flags);
566 list_del_init(&info->eoi_list);
567 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
570 static void lateeoi_list_add(struct irq_info *info)
572 struct lateeoi_work *eoi = &per_cpu(lateeoi, info->eoi_cpu);
573 struct irq_info *elem;
574 u64 now = get_jiffies_64();
578 if (now < info->eoi_time)
579 delay = info->eoi_time - now;
583 spin_lock_irqsave(&eoi->eoi_list_lock, flags);
585 if (list_empty(&eoi->eoi_list)) {
586 list_add(&info->eoi_list, &eoi->eoi_list);
587 mod_delayed_work_on(info->eoi_cpu, system_wq,
588 &eoi->delayed, delay);
590 list_for_each_entry_reverse(elem, &eoi->eoi_list, eoi_list) {
591 if (elem->eoi_time <= info->eoi_time)
594 list_add(&info->eoi_list, &elem->eoi_list);
597 spin_unlock_irqrestore(&eoi->eoi_list_lock, flags);
600 static void xen_irq_lateeoi_locked(struct irq_info *info, bool spurious)
602 evtchn_port_t evtchn;
604 unsigned int delay = 0;
606 evtchn = info->evtchn;
607 if (!VALID_EVTCHN(evtchn) || !list_empty(&info->eoi_list))
611 struct xenbus_device *dev = info->u.interdomain;
612 unsigned int threshold = 1;
614 if (dev && dev->spurious_threshold)
615 threshold = dev->spurious_threshold;
617 if ((1 << info->spurious_cnt) < (HZ << 2)) {
618 if (info->spurious_cnt != 0xFF)
619 info->spurious_cnt++;
621 if (info->spurious_cnt > threshold) {
622 delay = 1 << (info->spurious_cnt - 1 - threshold);
626 info->eoi_cpu = smp_processor_id();
627 info->eoi_time = get_jiffies_64() + delay;
629 atomic_add(delay, &dev->jiffies_eoi_delayed);
632 atomic_inc(&dev->spurious_events);
634 info->spurious_cnt = 0;
638 if (info->eoi_time &&
639 (info->irq_epoch == per_cpu(irq_epoch, cpu) || delay)) {
640 lateeoi_list_add(info);
645 do_unmask(info, EVT_MASK_REASON_EOI_PENDING);
648 static void xen_irq_lateeoi_worker(struct work_struct *work)
650 struct lateeoi_work *eoi;
651 struct irq_info *info;
652 u64 now = get_jiffies_64();
655 eoi = container_of(to_delayed_work(work), struct lateeoi_work, delayed);
657 read_lock_irqsave(&evtchn_rwlock, flags);
660 spin_lock(&eoi->eoi_list_lock);
662 info = list_first_entry_or_null(&eoi->eoi_list, struct irq_info,
665 if (info == NULL || now < info->eoi_time) {
666 spin_unlock(&eoi->eoi_list_lock);
670 list_del_init(&info->eoi_list);
672 spin_unlock(&eoi->eoi_list_lock);
676 xen_irq_lateeoi_locked(info, false);
680 mod_delayed_work_on(info->eoi_cpu, system_wq,
681 &eoi->delayed, info->eoi_time - now);
683 read_unlock_irqrestore(&evtchn_rwlock, flags);
686 static void xen_cpu_init_eoi(unsigned int cpu)
688 struct lateeoi_work *eoi = &per_cpu(lateeoi, cpu);
690 INIT_DELAYED_WORK(&eoi->delayed, xen_irq_lateeoi_worker);
691 spin_lock_init(&eoi->eoi_list_lock);
692 INIT_LIST_HEAD(&eoi->eoi_list);
695 void xen_irq_lateeoi(unsigned int irq, unsigned int eoi_flags)
697 struct irq_info *info;
700 read_lock_irqsave(&evtchn_rwlock, flags);
702 info = info_for_irq(irq);
705 xen_irq_lateeoi_locked(info, eoi_flags & XEN_EOI_FLAG_SPURIOUS);
707 read_unlock_irqrestore(&evtchn_rwlock, flags);
709 EXPORT_SYMBOL_GPL(xen_irq_lateeoi);
711 static void xen_irq_init(unsigned irq)
713 struct irq_info *info;
715 info = kzalloc(sizeof(*info), GFP_KERNEL);
717 panic("Unable to allocate metadata for IRQ%d\n", irq);
719 info->type = IRQT_UNBOUND;
722 set_info_for_irq(irq, info);
724 * Interrupt affinity setting can be immediate. No point
725 * in delaying it until an interrupt is handled.
727 irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
729 INIT_LIST_HEAD(&info->eoi_list);
730 list_add_tail(&info->list, &xen_irq_list_head);
733 static int __must_check xen_allocate_irqs_dynamic(int nvec)
735 int i, irq = irq_alloc_descs(-1, 0, nvec, -1);
738 for (i = 0; i < nvec; i++)
739 xen_irq_init(irq + i);
745 static inline int __must_check xen_allocate_irq_dynamic(void)
748 return xen_allocate_irqs_dynamic(1);
751 static int __must_check xen_allocate_irq_gsi(unsigned gsi)
756 * A PV guest has no concept of a GSI (since it has no ACPI
757 * nor access to/knowledge of the physical APICs). Therefore
758 * all IRQs are dynamically allocated from the entire IRQ
761 if (xen_pv_domain() && !xen_initial_domain())
762 return xen_allocate_irq_dynamic();
764 /* Legacy IRQ descriptors are already allocated by the arch. */
765 if (gsi < nr_legacy_irqs())
768 irq = irq_alloc_desc_at(gsi, -1);
775 static void xen_free_irq(unsigned irq)
777 struct irq_info *info = info_for_irq(irq);
783 write_lock_irqsave(&evtchn_rwlock, flags);
785 if (!list_empty(&info->eoi_list))
786 lateeoi_list_del(info);
788 list_del(&info->list);
790 set_info_for_irq(irq, NULL);
792 WARN_ON(info->refcnt > 0);
794 write_unlock_irqrestore(&evtchn_rwlock, flags);
798 /* Legacy IRQ descriptors are managed by the arch. */
799 if (irq < nr_legacy_irqs())
805 static void xen_evtchn_close(evtchn_port_t port)
807 struct evtchn_close close;
810 if (HYPERVISOR_event_channel_op(EVTCHNOP_close, &close) != 0)
814 static void event_handler_exit(struct irq_info *info)
816 smp_store_release(&info->is_active, 0);
817 clear_evtchn(info->evtchn);
820 static void pirq_query_unmask(int irq)
822 struct physdev_irq_status_query irq_status;
823 struct irq_info *info = info_for_irq(irq);
825 BUG_ON(info->type != IRQT_PIRQ);
827 irq_status.irq = pirq_from_irq(irq);
828 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
829 irq_status.flags = 0;
831 info->u.pirq.flags &= ~PIRQ_NEEDS_EOI;
832 if (irq_status.flags & XENIRQSTAT_needs_eoi)
833 info->u.pirq.flags |= PIRQ_NEEDS_EOI;
836 static void eoi_pirq(struct irq_data *data)
838 struct irq_info *info = info_for_irq(data->irq);
839 evtchn_port_t evtchn = info ? info->evtchn : 0;
840 struct physdev_eoi eoi = { .irq = pirq_from_irq(data->irq) };
843 if (!VALID_EVTCHN(evtchn))
846 event_handler_exit(info);
848 if (pirq_needs_eoi(data->irq)) {
849 rc = HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi);
854 static void mask_ack_pirq(struct irq_data *data)
856 disable_dynirq(data);
860 static unsigned int __startup_pirq(unsigned int irq)
862 struct evtchn_bind_pirq bind_pirq;
863 struct irq_info *info = info_for_irq(irq);
864 evtchn_port_t evtchn = evtchn_from_irq(irq);
867 BUG_ON(info->type != IRQT_PIRQ);
869 if (VALID_EVTCHN(evtchn))
872 bind_pirq.pirq = pirq_from_irq(irq);
873 /* NB. We are happy to share unless we are probing. */
874 bind_pirq.flags = info->u.pirq.flags & PIRQ_SHAREABLE ?
875 BIND_PIRQ__WILL_SHARE : 0;
876 rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_pirq, &bind_pirq);
878 pr_warn("Failed to obtain physical IRQ %d\n", irq);
881 evtchn = bind_pirq.port;
883 pirq_query_unmask(irq);
885 rc = set_evtchn_to_irq(evtchn, irq);
889 info->evtchn = evtchn;
890 bind_evtchn_to_cpu(evtchn, 0, false);
892 rc = xen_evtchn_port_setup(evtchn);
897 do_unmask(info, EVT_MASK_REASON_EXPLICIT);
899 eoi_pirq(irq_get_irq_data(irq));
904 pr_err("irq%d: Failed to set port to irq mapping (%d)\n", irq, rc);
905 xen_evtchn_close(evtchn);
909 static unsigned int startup_pirq(struct irq_data *data)
911 return __startup_pirq(data->irq);
914 static void shutdown_pirq(struct irq_data *data)
916 unsigned int irq = data->irq;
917 struct irq_info *info = info_for_irq(irq);
918 evtchn_port_t evtchn = evtchn_from_irq(irq);
920 BUG_ON(info->type != IRQT_PIRQ);
922 if (!VALID_EVTCHN(evtchn))
925 do_mask(info, EVT_MASK_REASON_EXPLICIT);
926 xen_evtchn_close(evtchn);
927 xen_irq_info_cleanup(info);
930 static void enable_pirq(struct irq_data *data)
935 static void disable_pirq(struct irq_data *data)
937 disable_dynirq(data);
940 int xen_irq_from_gsi(unsigned gsi)
942 struct irq_info *info;
944 list_for_each_entry(info, &xen_irq_list_head, list) {
945 if (info->type != IRQT_PIRQ)
948 if (info->u.pirq.gsi == gsi)
954 EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
956 static void __unbind_from_irq(unsigned int irq)
958 evtchn_port_t evtchn = evtchn_from_irq(irq);
959 struct irq_info *info = info_for_irq(irq);
961 if (info->refcnt > 0) {
963 if (info->refcnt != 0)
967 if (VALID_EVTCHN(evtchn)) {
968 unsigned int cpu = cpu_from_irq(irq);
969 struct xenbus_device *dev;
971 xen_evtchn_close(evtchn);
973 switch (type_from_irq(irq)) {
975 per_cpu(virq_to_irq, cpu)[virq_from_irq(irq)] = -1;
978 per_cpu(ipi_to_irq, cpu)[ipi_from_irq(irq)] = -1;
981 dev = info->u.interdomain;
983 atomic_dec(&dev->event_channels);
989 xen_irq_info_cleanup(info);
996 * Do not make any assumptions regarding the relationship between the
997 * IRQ number returned here and the Xen pirq argument.
999 * Note: We don't assign an event channel until the irq actually started
1000 * up. Return an existing irq if we've already got one for the gsi.
1002 * Shareable implies level triggered, not shareable implies edge
1005 int xen_bind_pirq_gsi_to_irq(unsigned gsi,
1006 unsigned pirq, int shareable, char *name)
1009 struct physdev_irq irq_op;
1012 mutex_lock(&irq_mapping_update_lock);
1014 irq = xen_irq_from_gsi(gsi);
1016 pr_info("%s: returning irq %d for gsi %u\n",
1017 __func__, irq, gsi);
1021 irq = xen_allocate_irq_gsi(gsi);
1028 /* Only the privileged domain can do this. For non-priv, the pcifront
1029 * driver provides a PCI bus that does the call to do exactly
1030 * this in the priv domain. */
1031 if (xen_initial_domain() &&
1032 HYPERVISOR_physdev_op(PHYSDEVOP_alloc_irq_vector, &irq_op)) {
1038 ret = xen_irq_info_pirq_setup(irq, 0, pirq, gsi, DOMID_SELF,
1039 shareable ? PIRQ_SHAREABLE : 0);
1041 __unbind_from_irq(irq);
1046 pirq_query_unmask(irq);
1047 /* We try to use the handler with the appropriate semantic for the
1048 * type of interrupt: if the interrupt is an edge triggered
1049 * interrupt we use handle_edge_irq.
1051 * On the other hand if the interrupt is level triggered we use
1052 * handle_fasteoi_irq like the native code does for this kind of
1055 * Depending on the Xen version, pirq_needs_eoi might return true
1056 * not only for level triggered interrupts but for edge triggered
1057 * interrupts too. In any case Xen always honors the eoi mechanism,
1058 * not injecting any more pirqs of the same kind if the first one
1059 * hasn't received an eoi yet. Therefore using the fasteoi handler
1060 * is the right choice either way.
1063 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
1064 handle_fasteoi_irq, name);
1066 irq_set_chip_and_handler_name(irq, &xen_pirq_chip,
1067 handle_edge_irq, name);
1070 mutex_unlock(&irq_mapping_update_lock);
1075 #ifdef CONFIG_PCI_MSI
1076 int xen_allocate_pirq_msi(struct pci_dev *dev, struct msi_desc *msidesc)
1079 struct physdev_get_free_pirq op_get_free_pirq;
1081 op_get_free_pirq.type = MAP_PIRQ_TYPE_MSI;
1082 rc = HYPERVISOR_physdev_op(PHYSDEVOP_get_free_pirq, &op_get_free_pirq);
1084 WARN_ONCE(rc == -ENOSYS,
1085 "hypervisor does not support the PHYSDEVOP_get_free_pirq interface\n");
1087 return rc ? -1 : op_get_free_pirq.pirq;
1090 int xen_bind_pirq_msi_to_irq(struct pci_dev *dev, struct msi_desc *msidesc,
1091 int pirq, int nvec, const char *name, domid_t domid)
1095 mutex_lock(&irq_mapping_update_lock);
1097 irq = xen_allocate_irqs_dynamic(nvec);
1101 for (i = 0; i < nvec; i++) {
1102 irq_set_chip_and_handler_name(irq + i, &xen_pirq_chip, handle_edge_irq, name);
1104 ret = xen_irq_info_pirq_setup(irq + i, 0, pirq + i, 0, domid,
1105 i == 0 ? 0 : PIRQ_MSI_GROUP);
1110 ret = irq_set_msi_desc(irq, msidesc);
1114 mutex_unlock(&irq_mapping_update_lock);
1118 __unbind_from_irq(irq + nvec);
1119 mutex_unlock(&irq_mapping_update_lock);
1124 int xen_destroy_irq(int irq)
1126 struct physdev_unmap_pirq unmap_irq;
1127 struct irq_info *info = info_for_irq(irq);
1130 mutex_lock(&irq_mapping_update_lock);
1133 * If trying to remove a vector in a MSI group different
1134 * than the first one skip the PIRQ unmap unless this vector
1135 * is the first one in the group.
1137 if (xen_initial_domain() && !(info->u.pirq.flags & PIRQ_MSI_GROUP)) {
1138 unmap_irq.pirq = info->u.pirq.pirq;
1139 unmap_irq.domid = info->u.pirq.domid;
1140 rc = HYPERVISOR_physdev_op(PHYSDEVOP_unmap_pirq, &unmap_irq);
1141 /* If another domain quits without making the pci_disable_msix
1142 * call, the Xen hypervisor takes care of freeing the PIRQs
1143 * (free_domain_pirqs).
1145 if ((rc == -ESRCH && info->u.pirq.domid != DOMID_SELF))
1146 pr_info("domain %d does not have %d anymore\n",
1147 info->u.pirq.domid, info->u.pirq.pirq);
1149 pr_warn("unmap irq failed %d\n", rc);
1157 mutex_unlock(&irq_mapping_update_lock);
1161 int xen_irq_from_pirq(unsigned pirq)
1165 struct irq_info *info;
1167 mutex_lock(&irq_mapping_update_lock);
1169 list_for_each_entry(info, &xen_irq_list_head, list) {
1170 if (info->type != IRQT_PIRQ)
1173 if (info->u.pirq.pirq == pirq)
1178 mutex_unlock(&irq_mapping_update_lock);
1184 int xen_pirq_from_irq(unsigned irq)
1186 return pirq_from_irq(irq);
1188 EXPORT_SYMBOL_GPL(xen_pirq_from_irq);
1190 static int bind_evtchn_to_irq_chip(evtchn_port_t evtchn, struct irq_chip *chip,
1191 struct xenbus_device *dev)
1196 if (evtchn >= xen_evtchn_max_channels())
1199 mutex_lock(&irq_mapping_update_lock);
1201 irq = get_evtchn_to_irq(evtchn);
1204 irq = xen_allocate_irq_dynamic();
1208 irq_set_chip_and_handler_name(irq, chip,
1209 handle_edge_irq, "event");
1211 ret = xen_irq_info_evtchn_setup(irq, evtchn, dev);
1213 __unbind_from_irq(irq);
1218 * New interdomain events are initially bound to vCPU0 This
1219 * is required to setup the event channel in the first
1220 * place and also important for UP guests because the
1221 * affinity setting is not invoked on them so nothing would
1224 bind_evtchn_to_cpu(evtchn, 0, false);
1226 struct irq_info *info = info_for_irq(irq);
1227 WARN_ON(info == NULL || info->type != IRQT_EVTCHN);
1231 mutex_unlock(&irq_mapping_update_lock);
1236 int bind_evtchn_to_irq(evtchn_port_t evtchn)
1238 return bind_evtchn_to_irq_chip(evtchn, &xen_dynamic_chip, NULL);
1240 EXPORT_SYMBOL_GPL(bind_evtchn_to_irq);
1242 static int bind_ipi_to_irq(unsigned int ipi, unsigned int cpu)
1244 struct evtchn_bind_ipi bind_ipi;
1245 evtchn_port_t evtchn;
1248 mutex_lock(&irq_mapping_update_lock);
1250 irq = per_cpu(ipi_to_irq, cpu)[ipi];
1253 irq = xen_allocate_irq_dynamic();
1257 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
1258 handle_percpu_irq, "ipi");
1260 bind_ipi.vcpu = xen_vcpu_nr(cpu);
1261 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1264 evtchn = bind_ipi.port;
1266 ret = xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
1268 __unbind_from_irq(irq);
1273 * Force the affinity mask to the target CPU so proc shows
1274 * the correct target.
1276 bind_evtchn_to_cpu(evtchn, cpu, true);
1278 struct irq_info *info = info_for_irq(irq);
1279 WARN_ON(info == NULL || info->type != IRQT_IPI);
1283 mutex_unlock(&irq_mapping_update_lock);
1287 static int bind_interdomain_evtchn_to_irq_chip(struct xenbus_device *dev,
1288 evtchn_port_t remote_port,
1289 struct irq_chip *chip)
1291 struct evtchn_bind_interdomain bind_interdomain;
1294 bind_interdomain.remote_dom = dev->otherend_id;
1295 bind_interdomain.remote_port = remote_port;
1297 err = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain,
1300 return err ? : bind_evtchn_to_irq_chip(bind_interdomain.local_port,
1304 int bind_interdomain_evtchn_to_irq_lateeoi(struct xenbus_device *dev,
1305 evtchn_port_t remote_port)
1307 return bind_interdomain_evtchn_to_irq_chip(dev, remote_port,
1310 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irq_lateeoi);
1312 static int find_virq(unsigned int virq, unsigned int cpu, evtchn_port_t *evtchn)
1314 struct evtchn_status status;
1318 memset(&status, 0, sizeof(status));
1319 for (port = 0; port < xen_evtchn_max_channels(); port++) {
1320 status.dom = DOMID_SELF;
1322 rc = HYPERVISOR_event_channel_op(EVTCHNOP_status, &status);
1325 if (status.status != EVTCHNSTAT_virq)
1327 if (status.u.virq == virq && status.vcpu == xen_vcpu_nr(cpu)) {
1336 * xen_evtchn_nr_channels - number of usable event channel ports
1338 * This may be less than the maximum supported by the current
1339 * hypervisor ABI. Use xen_evtchn_max_channels() for the maximum
1342 unsigned xen_evtchn_nr_channels(void)
1344 return evtchn_ops->nr_channels();
1346 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
1348 int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
1350 struct evtchn_bind_virq bind_virq;
1351 evtchn_port_t evtchn = 0;
1354 mutex_lock(&irq_mapping_update_lock);
1356 irq = per_cpu(virq_to_irq, cpu)[virq];
1359 irq = xen_allocate_irq_dynamic();
1364 irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
1365 handle_percpu_irq, "virq");
1367 irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
1368 handle_edge_irq, "virq");
1370 bind_virq.virq = virq;
1371 bind_virq.vcpu = xen_vcpu_nr(cpu);
1372 ret = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1375 evtchn = bind_virq.port;
1378 ret = find_virq(virq, cpu, &evtchn);
1382 ret = xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1384 __unbind_from_irq(irq);
1390 * Force the affinity mask for percpu interrupts so proc
1391 * shows the correct target.
1393 bind_evtchn_to_cpu(evtchn, cpu, percpu);
1395 struct irq_info *info = info_for_irq(irq);
1396 WARN_ON(info == NULL || info->type != IRQT_VIRQ);
1400 mutex_unlock(&irq_mapping_update_lock);
1405 static void unbind_from_irq(unsigned int irq)
1407 mutex_lock(&irq_mapping_update_lock);
1408 __unbind_from_irq(irq);
1409 mutex_unlock(&irq_mapping_update_lock);
1412 static int bind_evtchn_to_irqhandler_chip(evtchn_port_t evtchn,
1413 irq_handler_t handler,
1414 unsigned long irqflags,
1415 const char *devname, void *dev_id,
1416 struct irq_chip *chip)
1420 irq = bind_evtchn_to_irq_chip(evtchn, chip, NULL);
1423 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1425 unbind_from_irq(irq);
1432 int bind_evtchn_to_irqhandler(evtchn_port_t evtchn,
1433 irq_handler_t handler,
1434 unsigned long irqflags,
1435 const char *devname, void *dev_id)
1437 return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1441 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler);
1443 int bind_evtchn_to_irqhandler_lateeoi(evtchn_port_t evtchn,
1444 irq_handler_t handler,
1445 unsigned long irqflags,
1446 const char *devname, void *dev_id)
1448 return bind_evtchn_to_irqhandler_chip(evtchn, handler, irqflags,
1452 EXPORT_SYMBOL_GPL(bind_evtchn_to_irqhandler_lateeoi);
1454 static int bind_interdomain_evtchn_to_irqhandler_chip(
1455 struct xenbus_device *dev, evtchn_port_t remote_port,
1456 irq_handler_t handler, unsigned long irqflags,
1457 const char *devname, void *dev_id, struct irq_chip *chip)
1461 irq = bind_interdomain_evtchn_to_irq_chip(dev, remote_port, chip);
1465 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1467 unbind_from_irq(irq);
1474 int bind_interdomain_evtchn_to_irqhandler_lateeoi(struct xenbus_device *dev,
1475 evtchn_port_t remote_port,
1476 irq_handler_t handler,
1477 unsigned long irqflags,
1478 const char *devname,
1481 return bind_interdomain_evtchn_to_irqhandler_chip(dev,
1482 remote_port, handler, irqflags, devname,
1483 dev_id, &xen_lateeoi_chip);
1485 EXPORT_SYMBOL_GPL(bind_interdomain_evtchn_to_irqhandler_lateeoi);
1487 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
1488 irq_handler_t handler,
1489 unsigned long irqflags, const char *devname, void *dev_id)
1493 irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
1496 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1498 unbind_from_irq(irq);
1504 EXPORT_SYMBOL_GPL(bind_virq_to_irqhandler);
1506 int bind_ipi_to_irqhandler(enum ipi_vector ipi,
1508 irq_handler_t handler,
1509 unsigned long irqflags,
1510 const char *devname,
1515 irq = bind_ipi_to_irq(ipi, cpu);
1519 irqflags |= IRQF_NO_SUSPEND | IRQF_FORCE_RESUME | IRQF_EARLY_RESUME;
1520 retval = request_irq(irq, handler, irqflags, devname, dev_id);
1522 unbind_from_irq(irq);
1529 void unbind_from_irqhandler(unsigned int irq, void *dev_id)
1531 struct irq_info *info = info_for_irq(irq);
1535 free_irq(irq, dev_id);
1536 unbind_from_irq(irq);
1538 EXPORT_SYMBOL_GPL(unbind_from_irqhandler);
1541 * xen_set_irq_priority() - set an event channel priority.
1542 * @irq:irq bound to an event channel.
1543 * @priority: priority between XEN_IRQ_PRIORITY_MAX and XEN_IRQ_PRIORITY_MIN.
1545 int xen_set_irq_priority(unsigned irq, unsigned priority)
1547 struct evtchn_set_priority set_priority;
1549 set_priority.port = evtchn_from_irq(irq);
1550 set_priority.priority = priority;
1552 return HYPERVISOR_event_channel_op(EVTCHNOP_set_priority,
1555 EXPORT_SYMBOL_GPL(xen_set_irq_priority);
1557 int evtchn_make_refcounted(evtchn_port_t evtchn)
1559 int irq = get_evtchn_to_irq(evtchn);
1560 struct irq_info *info;
1565 info = info_for_irq(irq);
1570 WARN_ON(info->refcnt != -1);
1576 EXPORT_SYMBOL_GPL(evtchn_make_refcounted);
1578 int evtchn_get(evtchn_port_t evtchn)
1581 struct irq_info *info;
1584 if (evtchn >= xen_evtchn_max_channels())
1587 mutex_lock(&irq_mapping_update_lock);
1589 irq = get_evtchn_to_irq(evtchn);
1593 info = info_for_irq(irq);
1599 if (info->refcnt <= 0 || info->refcnt == SHRT_MAX)
1605 mutex_unlock(&irq_mapping_update_lock);
1609 EXPORT_SYMBOL_GPL(evtchn_get);
1611 void evtchn_put(evtchn_port_t evtchn)
1613 int irq = get_evtchn_to_irq(evtchn);
1614 if (WARN_ON(irq == -1))
1616 unbind_from_irq(irq);
1618 EXPORT_SYMBOL_GPL(evtchn_put);
1620 void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector)
1625 if (unlikely(vector == XEN_NMI_VECTOR)) {
1626 int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, xen_vcpu_nr(cpu),
1629 printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc);
1633 irq = per_cpu(ipi_to_irq, cpu)[vector];
1635 notify_remote_via_irq(irq);
1638 struct evtchn_loop_ctrl {
1644 void handle_irq_for_port(evtchn_port_t port, struct evtchn_loop_ctrl *ctrl)
1647 struct irq_info *info;
1648 struct xenbus_device *dev;
1650 irq = get_evtchn_to_irq(port);
1655 * Check for timeout every 256 events.
1656 * We are setting the timeout value only after the first 256
1657 * events in order to not hurt the common case of few loop
1658 * iterations. The 256 is basically an arbitrary value.
1660 * In case we are hitting the timeout we need to defer all further
1661 * EOIs in order to ensure to leave the event handling loop rather
1662 * sooner than later.
1664 if (!ctrl->defer_eoi && !(++ctrl->count & 0xff)) {
1665 ktime_t kt = ktime_get();
1667 if (!ctrl->timeout) {
1668 kt = ktime_add_ms(kt,
1669 jiffies_to_msecs(event_loop_timeout));
1671 } else if (kt > ctrl->timeout) {
1672 ctrl->defer_eoi = true;
1676 info = info_for_irq(irq);
1677 if (xchg_acquire(&info->is_active, 1))
1680 dev = (info->type == IRQT_EVTCHN) ? info->u.interdomain : NULL;
1682 atomic_inc(&dev->events);
1684 if (ctrl->defer_eoi) {
1685 info->eoi_cpu = smp_processor_id();
1686 info->irq_epoch = __this_cpu_read(irq_epoch);
1687 info->eoi_time = get_jiffies_64() + event_eoi_delay;
1690 generic_handle_irq(irq);
1693 static void __xen_evtchn_do_upcall(void)
1695 struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu);
1696 int cpu = smp_processor_id();
1697 struct evtchn_loop_ctrl ctrl = { 0 };
1699 read_lock(&evtchn_rwlock);
1702 vcpu_info->evtchn_upcall_pending = 0;
1704 xen_evtchn_handle_events(cpu, &ctrl);
1706 BUG_ON(!irqs_disabled());
1708 virt_rmb(); /* Hypervisor can set upcall pending. */
1710 } while (vcpu_info->evtchn_upcall_pending);
1712 read_unlock(&evtchn_rwlock);
1715 * Increment irq_epoch only now to defer EOIs only for
1716 * xen_irq_lateeoi() invocations occurring from inside the loop
1719 __this_cpu_inc(irq_epoch);
1722 void xen_evtchn_do_upcall(struct pt_regs *regs)
1724 struct pt_regs *old_regs = set_irq_regs(regs);
1728 __xen_evtchn_do_upcall();
1731 set_irq_regs(old_regs);
1734 void xen_hvm_evtchn_do_upcall(void)
1736 __xen_evtchn_do_upcall();
1738 EXPORT_SYMBOL_GPL(xen_hvm_evtchn_do_upcall);
1740 /* Rebind a new event channel to an existing irq. */
1741 void rebind_evtchn_irq(evtchn_port_t evtchn, int irq)
1743 struct irq_info *info = info_for_irq(irq);
1748 /* Make sure the irq is masked, since the new event channel
1749 will also be masked. */
1752 mutex_lock(&irq_mapping_update_lock);
1754 /* After resume the irq<->evtchn mappings are all cleared out */
1755 BUG_ON(get_evtchn_to_irq(evtchn) != -1);
1756 /* Expect irq to have been bound before,
1757 so there should be a proper type */
1758 BUG_ON(info->type == IRQT_UNBOUND);
1760 (void)xen_irq_info_evtchn_setup(irq, evtchn, NULL);
1762 mutex_unlock(&irq_mapping_update_lock);
1764 bind_evtchn_to_cpu(evtchn, info->cpu, false);
1766 /* Unmask the event channel. */
1770 /* Rebind an evtchn so that it gets delivered to a specific cpu */
1771 static int xen_rebind_evtchn_to_cpu(struct irq_info *info, unsigned int tcpu)
1773 struct evtchn_bind_vcpu bind_vcpu;
1774 evtchn_port_t evtchn = info ? info->evtchn : 0;
1776 if (!VALID_EVTCHN(evtchn))
1779 if (!xen_support_evtchn_rebind())
1782 /* Send future instances of this interrupt to other vcpu. */
1783 bind_vcpu.port = evtchn;
1784 bind_vcpu.vcpu = xen_vcpu_nr(tcpu);
1787 * Mask the event while changing the VCPU binding to prevent
1788 * it being delivered on an unexpected VCPU.
1790 do_mask(info, EVT_MASK_REASON_TEMPORARY);
1793 * If this fails, it usually just indicates that we're dealing with a
1794 * virq or IPI channel, which don't actually need to be rebound. Ignore
1795 * it, but don't do the xenlinux-level rebind in that case.
1797 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_vcpu, &bind_vcpu) >= 0)
1798 bind_evtchn_to_cpu(evtchn, tcpu, false);
1800 do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1806 * Find the CPU within @dest mask which has the least number of channels
1807 * assigned. This is not precise as the per cpu counts can be modified
1810 static unsigned int select_target_cpu(const struct cpumask *dest)
1812 unsigned int cpu, best_cpu = UINT_MAX, minch = UINT_MAX;
1814 for_each_cpu_and(cpu, dest, cpu_online_mask) {
1815 unsigned int curch = atomic_read(&channels_on_cpu[cpu]);
1817 if (curch < minch) {
1824 * Catch the unlikely case that dest contains no online CPUs. Can't
1827 if (best_cpu == UINT_MAX)
1828 return select_target_cpu(cpu_online_mask);
1833 static int set_affinity_irq(struct irq_data *data, const struct cpumask *dest,
1836 unsigned int tcpu = select_target_cpu(dest);
1839 ret = xen_rebind_evtchn_to_cpu(info_for_irq(data->irq), tcpu);
1841 irq_data_update_effective_affinity(data, cpumask_of(tcpu));
1846 static void enable_dynirq(struct irq_data *data)
1848 struct irq_info *info = info_for_irq(data->irq);
1849 evtchn_port_t evtchn = info ? info->evtchn : 0;
1851 if (VALID_EVTCHN(evtchn))
1852 do_unmask(info, EVT_MASK_REASON_EXPLICIT);
1855 static void disable_dynirq(struct irq_data *data)
1857 struct irq_info *info = info_for_irq(data->irq);
1858 evtchn_port_t evtchn = info ? info->evtchn : 0;
1860 if (VALID_EVTCHN(evtchn))
1861 do_mask(info, EVT_MASK_REASON_EXPLICIT);
1864 static void ack_dynirq(struct irq_data *data)
1866 struct irq_info *info = info_for_irq(data->irq);
1867 evtchn_port_t evtchn = info ? info->evtchn : 0;
1869 if (VALID_EVTCHN(evtchn))
1870 event_handler_exit(info);
1873 static void mask_ack_dynirq(struct irq_data *data)
1875 disable_dynirq(data);
1879 static void lateeoi_ack_dynirq(struct irq_data *data)
1881 struct irq_info *info = info_for_irq(data->irq);
1882 evtchn_port_t evtchn = info ? info->evtchn : 0;
1884 if (VALID_EVTCHN(evtchn)) {
1885 do_mask(info, EVT_MASK_REASON_EOI_PENDING);
1886 event_handler_exit(info);
1890 static void lateeoi_mask_ack_dynirq(struct irq_data *data)
1892 struct irq_info *info = info_for_irq(data->irq);
1893 evtchn_port_t evtchn = info ? info->evtchn : 0;
1895 if (VALID_EVTCHN(evtchn)) {
1896 do_mask(info, EVT_MASK_REASON_EXPLICIT);
1897 event_handler_exit(info);
1901 static int retrigger_dynirq(struct irq_data *data)
1903 struct irq_info *info = info_for_irq(data->irq);
1904 evtchn_port_t evtchn = info ? info->evtchn : 0;
1906 if (!VALID_EVTCHN(evtchn))
1909 do_mask(info, EVT_MASK_REASON_TEMPORARY);
1911 do_unmask(info, EVT_MASK_REASON_TEMPORARY);
1916 static void restore_pirqs(void)
1918 int pirq, rc, irq, gsi;
1919 struct physdev_map_pirq map_irq;
1920 struct irq_info *info;
1922 list_for_each_entry(info, &xen_irq_list_head, list) {
1923 if (info->type != IRQT_PIRQ)
1926 pirq = info->u.pirq.pirq;
1927 gsi = info->u.pirq.gsi;
1930 /* save/restore of PT devices doesn't work, so at this point the
1931 * only devices present are GSI based emulated devices */
1935 map_irq.domid = DOMID_SELF;
1936 map_irq.type = MAP_PIRQ_TYPE_GSI;
1937 map_irq.index = gsi;
1938 map_irq.pirq = pirq;
1940 rc = HYPERVISOR_physdev_op(PHYSDEVOP_map_pirq, &map_irq);
1942 pr_warn("xen map irq failed gsi=%d irq=%d pirq=%d rc=%d\n",
1943 gsi, irq, pirq, rc);
1948 printk(KERN_DEBUG "xen: --> irq=%d, pirq=%d\n", irq, map_irq.pirq);
1950 __startup_pirq(irq);
1954 static void restore_cpu_virqs(unsigned int cpu)
1956 struct evtchn_bind_virq bind_virq;
1957 evtchn_port_t evtchn;
1960 for (virq = 0; virq < NR_VIRQS; virq++) {
1961 if ((irq = per_cpu(virq_to_irq, cpu)[virq]) == -1)
1964 BUG_ON(virq_from_irq(irq) != virq);
1966 /* Get a new binding from Xen. */
1967 bind_virq.virq = virq;
1968 bind_virq.vcpu = xen_vcpu_nr(cpu);
1969 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq,
1972 evtchn = bind_virq.port;
1974 /* Record the new mapping. */
1975 (void)xen_irq_info_virq_setup(cpu, irq, evtchn, virq);
1976 /* The affinity mask is still valid */
1977 bind_evtchn_to_cpu(evtchn, cpu, false);
1981 static void restore_cpu_ipis(unsigned int cpu)
1983 struct evtchn_bind_ipi bind_ipi;
1984 evtchn_port_t evtchn;
1987 for (ipi = 0; ipi < XEN_NR_IPIS; ipi++) {
1988 if ((irq = per_cpu(ipi_to_irq, cpu)[ipi]) == -1)
1991 BUG_ON(ipi_from_irq(irq) != ipi);
1993 /* Get a new binding from Xen. */
1994 bind_ipi.vcpu = xen_vcpu_nr(cpu);
1995 if (HYPERVISOR_event_channel_op(EVTCHNOP_bind_ipi,
1998 evtchn = bind_ipi.port;
2000 /* Record the new mapping. */
2001 (void)xen_irq_info_ipi_setup(cpu, irq, evtchn, ipi);
2002 /* The affinity mask is still valid */
2003 bind_evtchn_to_cpu(evtchn, cpu, false);
2007 /* Clear an irq's pending state, in preparation for polling on it */
2008 void xen_clear_irq_pending(int irq)
2010 struct irq_info *info = info_for_irq(irq);
2011 evtchn_port_t evtchn = info ? info->evtchn : 0;
2013 if (VALID_EVTCHN(evtchn))
2014 event_handler_exit(info);
2016 EXPORT_SYMBOL(xen_clear_irq_pending);
2017 void xen_set_irq_pending(int irq)
2019 evtchn_port_t evtchn = evtchn_from_irq(irq);
2021 if (VALID_EVTCHN(evtchn))
2025 bool xen_test_irq_pending(int irq)
2027 evtchn_port_t evtchn = evtchn_from_irq(irq);
2030 if (VALID_EVTCHN(evtchn))
2031 ret = test_evtchn(evtchn);
2036 /* Poll waiting for an irq to become pending with timeout. In the usual case,
2037 * the irq will be disabled so it won't deliver an interrupt. */
2038 void xen_poll_irq_timeout(int irq, u64 timeout)
2040 evtchn_port_t evtchn = evtchn_from_irq(irq);
2042 if (VALID_EVTCHN(evtchn)) {
2043 struct sched_poll poll;
2046 poll.timeout = timeout;
2047 set_xen_guest_handle(poll.ports, &evtchn);
2049 if (HYPERVISOR_sched_op(SCHEDOP_poll, &poll) != 0)
2053 EXPORT_SYMBOL(xen_poll_irq_timeout);
2054 /* Poll waiting for an irq to become pending. In the usual case, the
2055 * irq will be disabled so it won't deliver an interrupt. */
2056 void xen_poll_irq(int irq)
2058 xen_poll_irq_timeout(irq, 0 /* no timeout */);
2061 /* Check whether the IRQ line is shared with other guests. */
2062 int xen_test_irq_shared(int irq)
2064 struct irq_info *info = info_for_irq(irq);
2065 struct physdev_irq_status_query irq_status;
2070 irq_status.irq = info->u.pirq.pirq;
2072 if (HYPERVISOR_physdev_op(PHYSDEVOP_irq_status_query, &irq_status))
2074 return !(irq_status.flags & XENIRQSTAT_shared);
2076 EXPORT_SYMBOL_GPL(xen_test_irq_shared);
2078 void xen_irq_resume(void)
2081 struct irq_info *info;
2083 /* New event-channel space is not 'live' yet. */
2084 xen_evtchn_resume();
2086 /* No IRQ <-> event-channel mappings. */
2087 list_for_each_entry(info, &xen_irq_list_head, list) {
2088 /* Zap event-channel binding */
2090 /* Adjust accounting */
2091 channels_on_cpu_dec(info);
2094 clear_evtchn_to_irq_all();
2096 for_each_possible_cpu(cpu) {
2097 restore_cpu_virqs(cpu);
2098 restore_cpu_ipis(cpu);
2104 static struct irq_chip xen_dynamic_chip __read_mostly = {
2107 .irq_disable = disable_dynirq,
2108 .irq_mask = disable_dynirq,
2109 .irq_unmask = enable_dynirq,
2111 .irq_ack = ack_dynirq,
2112 .irq_mask_ack = mask_ack_dynirq,
2114 .irq_set_affinity = set_affinity_irq,
2115 .irq_retrigger = retrigger_dynirq,
2118 static struct irq_chip xen_lateeoi_chip __read_mostly = {
2119 /* The chip name needs to contain "xen-dyn" for irqbalance to work. */
2120 .name = "xen-dyn-lateeoi",
2122 .irq_disable = disable_dynirq,
2123 .irq_mask = disable_dynirq,
2124 .irq_unmask = enable_dynirq,
2126 .irq_ack = lateeoi_ack_dynirq,
2127 .irq_mask_ack = lateeoi_mask_ack_dynirq,
2129 .irq_set_affinity = set_affinity_irq,
2130 .irq_retrigger = retrigger_dynirq,
2133 static struct irq_chip xen_pirq_chip __read_mostly = {
2136 .irq_startup = startup_pirq,
2137 .irq_shutdown = shutdown_pirq,
2138 .irq_enable = enable_pirq,
2139 .irq_disable = disable_pirq,
2141 .irq_mask = disable_dynirq,
2142 .irq_unmask = enable_dynirq,
2144 .irq_ack = eoi_pirq,
2145 .irq_eoi = eoi_pirq,
2146 .irq_mask_ack = mask_ack_pirq,
2148 .irq_set_affinity = set_affinity_irq,
2150 .irq_retrigger = retrigger_dynirq,
2153 static struct irq_chip xen_percpu_chip __read_mostly = {
2154 .name = "xen-percpu",
2156 .irq_disable = disable_dynirq,
2157 .irq_mask = disable_dynirq,
2158 .irq_unmask = enable_dynirq,
2160 .irq_ack = ack_dynirq,
2163 #ifdef CONFIG_XEN_PVHVM
2164 /* Vector callbacks are better than PCI interrupts to receive event
2165 * channel notifications because we can receive vector callbacks on any
2166 * vcpu and we don't need PCI support or APIC interactions. */
2167 void xen_setup_callback_vector(void)
2169 uint64_t callback_via;
2171 if (xen_have_vector_callback) {
2172 callback_via = HVM_CALLBACK_VECTOR(HYPERVISOR_CALLBACK_VECTOR);
2173 if (xen_set_callback_via(callback_via)) {
2174 pr_err("Request for Xen HVM callback vector failed\n");
2175 xen_have_vector_callback = 0;
2180 static __init void xen_alloc_callback_vector(void)
2182 if (!xen_have_vector_callback)
2185 pr_info("Xen HVM callback vector for event delivery is enabled\n");
2186 alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_xen_hvm_callback);
2189 void xen_setup_callback_vector(void) {}
2190 static inline void xen_alloc_callback_vector(void) {}
2193 bool xen_fifo_events = true;
2194 module_param_named(fifo_events, xen_fifo_events, bool, 0);
2196 static int xen_evtchn_cpu_prepare(unsigned int cpu)
2200 xen_cpu_init_eoi(cpu);
2202 if (evtchn_ops->percpu_init)
2203 ret = evtchn_ops->percpu_init(cpu);
2208 static int xen_evtchn_cpu_dead(unsigned int cpu)
2212 if (evtchn_ops->percpu_deinit)
2213 ret = evtchn_ops->percpu_deinit(cpu);
2218 void __init xen_init_IRQ(void)
2221 evtchn_port_t evtchn;
2223 if (xen_fifo_events)
2224 ret = xen_evtchn_fifo_init();
2226 xen_evtchn_2l_init();
2227 xen_fifo_events = false;
2230 xen_cpu_init_eoi(smp_processor_id());
2232 cpuhp_setup_state_nocalls(CPUHP_XEN_EVTCHN_PREPARE,
2233 "xen/evtchn:prepare",
2234 xen_evtchn_cpu_prepare, xen_evtchn_cpu_dead);
2236 evtchn_to_irq = kcalloc(EVTCHN_ROW(xen_evtchn_max_channels()),
2237 sizeof(*evtchn_to_irq), GFP_KERNEL);
2238 BUG_ON(!evtchn_to_irq);
2240 /* No event channels are 'live' right now. */
2241 for (evtchn = 0; evtchn < xen_evtchn_nr_channels(); evtchn++)
2242 mask_evtchn(evtchn);
2244 pirq_needs_eoi = pirq_needs_eoi_flag;
2247 if (xen_pv_domain()) {
2248 if (xen_initial_domain())
2249 pci_xen_initial_domain();
2251 if (xen_feature(XENFEAT_hvm_callback_vector)) {
2252 xen_setup_callback_vector();
2253 xen_alloc_callback_vector();
2256 if (xen_hvm_domain()) {
2258 /* pci_xen_hvm_init must be called after native_init_IRQ so that
2259 * __acpi_register_gsi can point at the right function */
2263 struct physdev_pirq_eoi_gmfn eoi_gmfn;
2265 pirq_eoi_map = (void *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
2266 eoi_gmfn.gmfn = virt_to_gfn(pirq_eoi_map);
2267 rc = HYPERVISOR_physdev_op(PHYSDEVOP_pirq_eoi_gmfn_v2, &eoi_gmfn);
2269 free_page((unsigned long) pirq_eoi_map);
2270 pirq_eoi_map = NULL;
2272 pirq_needs_eoi = pirq_check_eoi_map;