2 * Copyright 2016,2017 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #define pr_fmt(fmt) "xive: " fmt
12 #include <linux/types.h>
13 #include <linux/threads.h>
14 #include <linux/kernel.h>
15 #include <linux/irq.h>
16 #include <linux/debugfs.h>
17 #include <linux/smp.h>
18 #include <linux/interrupt.h>
19 #include <linux/seq_file.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
23 #include <linux/slab.h>
24 #include <linux/spinlock.h>
25 #include <linux/msi.h>
30 #include <asm/machdep.h>
32 #include <asm/errno.h>
34 #include <asm/xive-regs.h>
37 #include "xive-internal.h"
43 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \
44 smp_processor_id(), ## __VA_ARGS__)
46 #define DBG_VERBOSE(fmt...) do { } while(0)
50 EXPORT_SYMBOL_GPL(__xive_enabled);
51 bool xive_cmdline_disabled;
53 /* We use only one priority for now */
54 static u8 xive_irq_priority;
56 /* TIMA exported to KVM */
57 void __iomem *xive_tima;
58 EXPORT_SYMBOL_GPL(xive_tima);
62 static const struct xive_ops *xive_ops;
64 /* Our global interrupt domain */
65 static struct irq_domain *xive_irq_domain;
68 /* The IPIs all use the same logical irq number */
69 static u32 xive_ipi_irq;
72 /* Xive state for each CPU */
73 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu);
76 * A "disabled" interrupt should never fire, to catch problems
77 * we set its logical number to this
79 #define XIVE_BAD_IRQ 0x7fffffff
80 #define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1)
82 /* An invalid CPU target */
83 #define XIVE_INVALID_TARGET (-1)
86 * Read the next entry in a queue, return its content if it's valid
87 * or 0 if there is no new entry.
89 * The queue pointer is moved forward unless "just_peek" is set
91 static u32 xive_read_eq(struct xive_q *q, bool just_peek)
97 cur = be32_to_cpup(q->qpage + q->idx);
99 /* Check valid bit (31) vs current toggle polarity */
100 if ((cur >> 31) == q->toggle)
103 /* If consuming from the queue ... */
106 q->idx = (q->idx + 1) & q->msk;
108 /* Wrap around: flip valid toggle */
112 /* Mask out the valid bit (31) */
113 return cur & 0x7fffffff;
117 * Scans all the queue that may have interrupts in them
118 * (based on "pending_prio") in priority order until an
119 * interrupt is found or all the queues are empty.
121 * Then updates the CPPR (Current Processor Priority
122 * Register) based on the most favored interrupt found
123 * (0xff if none) and return what was found (0 if none).
125 * If just_peek is set, return the most favored pending
126 * interrupt if any but don't update the queue pointers.
128 * Note: This function can operate generically on any number
129 * of queues (up to 8). The current implementation of the XIVE
130 * driver only uses a single queue however.
132 * Note2: This will also "flush" "the pending_count" of a queue
133 * into the "count" when that queue is observed to be empty.
134 * This is used to keep track of the amount of interrupts
135 * targetting a queue. When an interrupt is moved away from
136 * a queue, we only decrement that queue count once the queue
137 * has been observed empty to avoid races.
139 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek)
144 /* Find highest pending priority */
145 while (xc->pending_prio != 0) {
148 prio = ffs(xc->pending_prio) - 1;
149 DBG_VERBOSE("scan_irq: trying prio %d\n", prio);
152 irq = xive_read_eq(&xc->queue[prio], just_peek);
154 /* Found something ? That's it */
158 /* Clear pending bits */
159 xc->pending_prio &= ~(1 << prio);
162 * Check if the queue count needs adjusting due to
163 * interrupts being moved away. See description of
164 * xive_dec_target_count()
166 q = &xc->queue[prio];
167 if (atomic_read(&q->pending_count)) {
168 int p = atomic_xchg(&q->pending_count, 0);
170 WARN_ON(p > atomic_read(&q->count));
171 atomic_sub(p, &q->count);
176 /* If nothing was found, set CPPR to 0xff */
180 /* Update HW CPPR to match if necessary */
181 if (prio != xc->cppr) {
182 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio);
184 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio);
191 * This is used to perform the magic loads from an ESB
192 * described in xive.h
194 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset)
198 /* Handle HW errata */
199 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
200 offset |= offset << 4;
202 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
203 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0);
205 val = in_be64(xd->eoi_mmio + offset);
210 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data)
212 /* Handle HW errata */
213 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
214 offset |= offset << 4;
216 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw)
217 xive_ops->esb_rw(xd->hw_irq, offset, data, 1);
219 out_be64(xd->eoi_mmio + offset, data);
223 static notrace void xive_dump_eq(const char *name, struct xive_q *q)
230 i0 = be32_to_cpup(q->qpage + idx);
231 idx = (idx + 1) & q->msk;
232 i1 = be32_to_cpup(q->qpage + idx);
233 xmon_printf(" %s Q T=%d %08x %08x ...\n", name,
237 notrace void xmon_xive_do_dump(int cpu)
239 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
241 xmon_printf("XIVE state for CPU %d:\n", cpu);
242 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr);
243 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]);
246 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET);
247 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi,
248 val & XIVE_ESB_VAL_P ? 'P' : 'p',
249 val & XIVE_ESB_VAL_P ? 'Q' : 'q');
253 #endif /* CONFIG_XMON */
255 static unsigned int xive_get_irq(void)
257 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
261 * This can be called either as a result of a HW interrupt or
262 * as a "replay" because EOI decided there was still something
263 * in one of the queues.
265 * First we perform an ACK cycle in order to update our mask
266 * of pending priorities. This will also have the effect of
267 * updating the CPPR to the most favored pending interrupts.
269 * In the future, if we have a way to differenciate a first
270 * entry (on HW interrupt) from a replay triggered by EOI,
271 * we could skip this on replays unless we soft-mask tells us
272 * that a new HW interrupt occurred.
274 xive_ops->update_pending(xc);
276 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio);
278 /* Scan our queue(s) for interrupts */
279 irq = xive_scan_interrupts(xc, false);
281 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n",
282 irq, xc->pending_prio);
284 /* Return pending interrupt if any */
285 if (irq == XIVE_BAD_IRQ)
291 * After EOI'ing an interrupt, we need to re-check the queue
292 * to see if another interrupt is pending since multiple
293 * interrupts can coalesce into a single notification to the
296 * If we find that there is indeed more in there, we call
297 * force_external_irq_replay() to make Linux synthetize an
298 * external interrupt on the next call to local_irq_restore().
300 static void xive_do_queue_eoi(struct xive_cpu *xc)
302 if (xive_scan_interrupts(xc, true) != 0) {
303 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio);
304 force_external_irq_replay();
309 * EOI an interrupt at the source. There are several methods
310 * to do this depending on the HW version and source type
312 void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd)
314 /* If the XIVE supports the new "store EOI facility, use it */
315 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI)
316 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0);
317 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) {
319 * The FW told us to call it. This happens for some
320 * interrupt sources that need additional HW whacking
321 * beyond the ESB manipulation. For example LPC interrupts
322 * on P9 DD1.0 need a latch to be clared in the LPC bridge
323 * itself. The Firmware will take care of it.
325 if (WARN_ON_ONCE(!xive_ops->eoi))
327 xive_ops->eoi(hw_irq);
332 * Otherwise for EOI, we use the special MMIO that does
333 * a clear of both P and Q and returns the old Q,
334 * except for LSIs where we use the "EOI cycle" special
337 * This allows us to then do a re-trigger if Q was set
338 * rather than synthesizing an interrupt in software
340 * For LSIs, using the HW EOI cycle works around a problem
341 * on P9 DD1 PHBs where the other ESB accesses don't work
344 if (xd->flags & XIVE_IRQ_FLAG_LSI)
345 xive_esb_read(xd, XIVE_ESB_LOAD_EOI);
347 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
348 DBG_VERBOSE("eoi_val=%x\n", eoi_val);
350 /* Re-trigger if needed */
351 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio)
352 out_be64(xd->trig_mmio, 0);
357 /* irq_chip eoi callback */
358 static void xive_irq_eoi(struct irq_data *d)
360 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
361 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
363 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n",
364 d->irq, irqd_to_hwirq(d), xc->pending_prio);
367 * EOI the source if it hasn't been disabled and hasn't
368 * been passed-through to a KVM guest
370 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d))
371 xive_do_source_eoi(irqd_to_hwirq(d), xd);
374 * Clear saved_p to indicate that it's no longer occupying
375 * a queue slot on the target queue
379 /* Check for more work in the queue */
380 xive_do_queue_eoi(xc);
384 * Helper used to mask and unmask an interrupt source. This
385 * is only called for normal interrupts that do not require
386 * masking/unmasking via firmware.
388 static void xive_do_source_set_mask(struct xive_irq_data *xd,
394 * If the interrupt had P set, it may be in a queue.
396 * We need to make sure we don't re-enable it until it
397 * has been fetched from that queue and EOId. We keep
398 * a copy of that P state and use it to restore the
399 * ESB accordingly on unmask.
402 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01);
403 xd->saved_p = !!(val & XIVE_ESB_VAL_P);
404 } else if (xd->saved_p)
405 xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
407 xive_esb_read(xd, XIVE_ESB_SET_PQ_00);
411 * Try to chose "cpu" as a new interrupt target. Increments
412 * the queue accounting for that target if it's not already
415 static bool xive_try_pick_target(int cpu)
417 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
418 struct xive_q *q = &xc->queue[xive_irq_priority];
422 * Calculate max number of interrupts in that queue.
424 * We leave a gap of 1 just in case...
426 max = (q->msk + 1) - 1;
427 return !!atomic_add_unless(&q->count, 1, max);
431 * Un-account an interrupt for a target CPU. We don't directly
432 * decrement q->count since the interrupt might still be present
435 * Instead increment a separate counter "pending_count" which
436 * will be substracted from "count" later when that CPU observes
437 * the queue to be empty.
439 static void xive_dec_target_count(int cpu)
441 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
442 struct xive_q *q = &xc->queue[xive_irq_priority];
444 if (unlikely(WARN_ON(cpu < 0 || !xc))) {
445 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc);
450 * We increment the "pending count" which will be used
451 * to decrement the target queue count whenever it's next
452 * processed and found empty. This ensure that we don't
453 * decrement while we still have the interrupt there
456 atomic_inc(&q->pending_count);
459 /* Find a tentative CPU target in a CPU mask */
460 static int xive_find_target_in_mask(const struct cpumask *mask,
463 int cpu, first, num, i;
465 /* Pick up a starting point CPU in the mask based on fuzz */
466 num = min_t(int, cpumask_weight(mask), nr_cpu_ids);
470 cpu = cpumask_first(mask);
471 for (i = 0; i < first && cpu < nr_cpu_ids; i++)
472 cpu = cpumask_next(cpu, mask);
475 if (WARN_ON(cpu >= nr_cpu_ids))
476 cpu = cpumask_first(cpu_online_mask);
478 /* Remember first one to handle wrap-around */
482 * Now go through the entire mask until we find a valid
487 * We re-check online as the fallback case passes us
488 * an untested affinity mask
490 if (cpu_online(cpu) && xive_try_pick_target(cpu))
492 cpu = cpumask_next(cpu, mask);
496 if (cpu >= nr_cpu_ids)
497 cpu = cpumask_first(mask);
503 * Pick a target CPU for an interrupt. This is done at
504 * startup or if the affinity is changed in a way that
505 * invalidates the current target.
507 static int xive_pick_irq_target(struct irq_data *d,
508 const struct cpumask *affinity)
510 static unsigned int fuzz;
511 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
516 * If we have chip IDs, first we try to build a mask of
517 * CPUs matching the CPU and find a target in there
519 if (xd->src_chip != XIVE_INVALID_CHIP_ID &&
520 zalloc_cpumask_var(&mask, GFP_ATOMIC)) {
521 /* Build a mask of matching chip IDs */
522 for_each_cpu_and(cpu, affinity, cpu_online_mask) {
523 struct xive_cpu *xc = per_cpu(xive_cpu, cpu);
524 if (xc->chip_id == xd->src_chip)
525 cpumask_set_cpu(cpu, mask);
527 /* Try to find a target */
528 if (cpumask_empty(mask))
531 cpu = xive_find_target_in_mask(mask, fuzz++);
532 free_cpumask_var(mask);
538 /* No chip IDs, fallback to using the affinity mask */
539 return xive_find_target_in_mask(affinity, fuzz++);
542 static unsigned int xive_irq_startup(struct irq_data *d)
544 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
545 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
548 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n",
551 #ifdef CONFIG_PCI_MSI
553 * The generic MSI code returns with the interrupt disabled on the
554 * card, using the MSI mask bits. Firmware doesn't appear to unmask
555 * at that level, so we do it here by hand.
557 if (irq_data_get_msi_desc(d))
558 pci_msi_unmask_irq(d);
562 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d));
563 if (target == XIVE_INVALID_TARGET) {
564 /* Try again breaking affinity */
565 target = xive_pick_irq_target(d, cpu_online_mask);
566 if (target == XIVE_INVALID_TARGET)
568 pr_warn("irq %d started with broken affinity\n", d->irq);
572 if (WARN_ON(target == XIVE_INVALID_TARGET ||
573 target >= nr_cpu_ids))
574 target = smp_processor_id();
579 * Configure the logical number to be the Linux IRQ number
580 * and set the target queue
582 rc = xive_ops->configure_irq(hw_irq,
583 get_hard_smp_processor_id(target),
584 xive_irq_priority, d->irq);
589 xive_do_source_set_mask(xd, false);
594 static void xive_irq_shutdown(struct irq_data *d)
596 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
597 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
599 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n",
602 if (WARN_ON(xd->target == XIVE_INVALID_TARGET))
605 /* Mask the interrupt at the source */
606 xive_do_source_set_mask(xd, true);
609 * The above may have set saved_p. We clear it otherwise it
610 * will prevent re-enabling later on. It is ok to forget the
611 * fact that the interrupt might be in a queue because we are
612 * accounting that already in xive_dec_target_count() and will
613 * be re-routing it to a new queue with proper accounting when
614 * it's started up again
619 * Mask the interrupt in HW in the IVT/EAS and set the number
620 * to be the "bad" IRQ number
622 xive_ops->configure_irq(hw_irq,
623 get_hard_smp_processor_id(xd->target),
626 xive_dec_target_count(xd->target);
627 xd->target = XIVE_INVALID_TARGET;
630 static void xive_irq_unmask(struct irq_data *d)
632 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
634 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd);
637 * This is a workaround for PCI LSI problems on P9, for
638 * these, we call FW to set the mask. The problems might
639 * be fixed by P9 DD2.0, if that is the case, firmware
640 * will no longer set that flag.
642 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
643 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
644 xive_ops->configure_irq(hw_irq,
645 get_hard_smp_processor_id(xd->target),
646 xive_irq_priority, d->irq);
650 xive_do_source_set_mask(xd, false);
653 static void xive_irq_mask(struct irq_data *d)
655 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
657 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd);
660 * This is a workaround for PCI LSI problems on P9, for
661 * these, we call OPAL to set the mask. The problems might
662 * be fixed by P9 DD2.0, if that is the case, firmware
663 * will no longer set that flag.
665 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) {
666 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
667 xive_ops->configure_irq(hw_irq,
668 get_hard_smp_processor_id(xd->target),
673 xive_do_source_set_mask(xd, true);
676 static int xive_irq_set_affinity(struct irq_data *d,
677 const struct cpumask *cpumask,
680 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
681 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
682 u32 target, old_target;
685 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq);
687 /* Is this valid ? */
688 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids)
691 /* Don't do anything if the interrupt isn't started */
692 if (!irqd_is_started(d))
693 return IRQ_SET_MASK_OK;
696 * If existing target is already in the new mask, and is
697 * online then do nothing.
699 if (xd->target != XIVE_INVALID_TARGET &&
700 cpu_online(xd->target) &&
701 cpumask_test_cpu(xd->target, cpumask))
702 return IRQ_SET_MASK_OK;
704 /* Pick a new target */
705 target = xive_pick_irq_target(d, cpumask);
707 /* No target found */
708 if (target == XIVE_INVALID_TARGET)
712 if (WARN_ON(target >= nr_cpu_ids))
713 target = smp_processor_id();
715 old_target = xd->target;
718 * Only configure the irq if it's not currently passed-through to
721 if (!irqd_is_forwarded_to_vcpu(d))
722 rc = xive_ops->configure_irq(hw_irq,
723 get_hard_smp_processor_id(target),
724 xive_irq_priority, d->irq);
726 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq);
730 pr_devel(" target: 0x%x\n", target);
733 /* Give up previous target */
734 if (old_target != XIVE_INVALID_TARGET)
735 xive_dec_target_count(old_target);
737 return IRQ_SET_MASK_OK;
740 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type)
742 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
745 * We only support these. This has really no effect other than setting
746 * the corresponding descriptor bits mind you but those will in turn
747 * affect the resend function when re-enabling an edge interrupt.
749 * Set set the default to edge as explained in map().
751 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE)
752 flow_type = IRQ_TYPE_EDGE_RISING;
754 if (flow_type != IRQ_TYPE_EDGE_RISING &&
755 flow_type != IRQ_TYPE_LEVEL_LOW)
758 irqd_set_trigger_type(d, flow_type);
761 * Double check it matches what the FW thinks
763 * NOTE: We don't know yet if the PAPR interface will provide
764 * the LSI vs MSI information apart from the device-tree so
765 * this check might have to move into an optional backend call
766 * that is specific to the native backend
768 if ((flow_type == IRQ_TYPE_LEVEL_LOW) !=
769 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) {
770 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n",
771 d->irq, (u32)irqd_to_hwirq(d),
772 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge",
773 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge");
776 return IRQ_SET_MASK_OK_NOCOPY;
779 static int xive_irq_retrigger(struct irq_data *d)
781 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
783 /* This should be only for MSIs */
784 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI))
788 * To perform a retrigger, we first set the PQ bits to
789 * 11, then perform an EOI.
791 xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
794 * Note: We pass "0" to the hw_irq argument in order to
795 * avoid calling into the backend EOI code which we don't
796 * want to do in the case of a re-trigger. Backends typically
797 * only do EOI for LSIs anyway.
799 xive_do_source_eoi(0, xd);
804 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state)
806 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d);
807 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
812 * We only support this on interrupts that do not require
813 * firmware calls for masking and unmasking
815 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW)
819 * This is called by KVM with state non-NULL for enabling
820 * pass-through or NULL for disabling it
823 irqd_set_forwarded_to_vcpu(d);
825 /* Set it to PQ=10 state to prevent further sends */
826 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10);
828 /* No target ? nothing to do */
829 if (xd->target == XIVE_INVALID_TARGET) {
831 * An untargetted interrupt should have been
832 * also masked at the source
840 * If P was set, adjust state to PQ=11 to indicate
841 * that a resend is needed for the interrupt to reach
842 * the guest. Also remember the value of P.
844 * This also tells us that it's in flight to a host queue
845 * or has already been fetched but hasn't been EOIed yet
846 * by the host. This it's potentially using up a host
847 * queue slot. This is important to know because as long
848 * as this is the case, we must not hard-unmask it when
849 * "returning" that interrupt to the host.
851 * This saved_p is cleared by the host EOI, when we know
852 * for sure the queue slot is no longer in use.
855 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_11);
859 * Sync the XIVE source HW to ensure the interrupt
860 * has gone through the EAS before we change its
861 * target to the guest. That should guarantee us
862 * that we *will* eventually get an EOI for it on
863 * the host. Otherwise there would be a small window
864 * for P to be seen here but the interrupt going
865 * to the guest queue.
867 if (xive_ops->sync_source)
868 xive_ops->sync_source(hw_irq);
872 irqd_clr_forwarded_to_vcpu(d);
874 /* No host target ? hard mask and return */
875 if (xd->target == XIVE_INVALID_TARGET) {
876 xive_do_source_set_mask(xd, true);
881 * Sync the XIVE source HW to ensure the interrupt
882 * has gone through the EAS before we change its
883 * target to the host.
885 if (xive_ops->sync_source)
886 xive_ops->sync_source(hw_irq);
889 * By convention we are called with the interrupt in
890 * a PQ=10 or PQ=11 state, ie, it won't fire and will
891 * have latched in Q whether there's a pending HW
894 * First reconfigure the target.
896 rc = xive_ops->configure_irq(hw_irq,
897 get_hard_smp_processor_id(xd->target),
898 xive_irq_priority, d->irq);
903 * Then if saved_p is not set, effectively re-enable the
904 * interrupt with an EOI. If it is set, we know there is
905 * still a message in a host queue somewhere that will be
908 * Note: We don't check irqd_irq_disabled(). Effectively,
909 * we *will* let the irq get through even if masked if the
910 * HW is still firing it in order to deal with the whole
911 * saved_p business properly. If the interrupt triggers
912 * while masked, the generic code will re-mask it anyway.
915 xive_do_source_eoi(hw_irq, xd);
921 static struct irq_chip xive_irq_chip = {
923 .irq_startup = xive_irq_startup,
924 .irq_shutdown = xive_irq_shutdown,
925 .irq_eoi = xive_irq_eoi,
926 .irq_mask = xive_irq_mask,
927 .irq_unmask = xive_irq_unmask,
928 .irq_set_affinity = xive_irq_set_affinity,
929 .irq_set_type = xive_irq_set_type,
930 .irq_retrigger = xive_irq_retrigger,
931 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity,
934 bool is_xive_irq(struct irq_chip *chip)
936 return chip == &xive_irq_chip;
938 EXPORT_SYMBOL_GPL(is_xive_irq);
940 void xive_cleanup_irq_data(struct xive_irq_data *xd)
943 iounmap(xd->eoi_mmio);
944 if (xd->eoi_mmio == xd->trig_mmio)
945 xd->trig_mmio = NULL;
949 iounmap(xd->trig_mmio);
950 xd->trig_mmio = NULL;
953 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data);
955 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw)
957 struct xive_irq_data *xd;
960 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL);
963 rc = xive_ops->populate_irq_data(hw, xd);
968 xd->target = XIVE_INVALID_TARGET;
969 irq_set_handler_data(virq, xd);
974 static void xive_irq_free_data(unsigned int virq)
976 struct xive_irq_data *xd = irq_get_handler_data(virq);
980 irq_set_handler_data(virq, NULL);
981 xive_cleanup_irq_data(xd);
987 static void xive_cause_ipi(int cpu)
990 struct xive_irq_data *xd;
992 xc = per_cpu(xive_cpu, cpu);
994 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n",
995 smp_processor_id(), cpu, xc->hw_ipi);
998 if (WARN_ON(!xd->trig_mmio))
1000 out_be64(xd->trig_mmio, 0);
1003 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id)
1005 return smp_ipi_demux();
1008 static void xive_ipi_eoi(struct irq_data *d)
1010 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1012 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n",
1013 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio);
1015 /* Handle possible race with unplug and drop stale IPIs */
1018 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data);
1019 xive_do_queue_eoi(xc);
1022 static void xive_ipi_do_nothing(struct irq_data *d)
1025 * Nothing to do, we never mask/unmask IPIs, but the callback
1026 * has to exist for the struct irq_chip.
1030 static struct irq_chip xive_ipi_chip = {
1032 .irq_eoi = xive_ipi_eoi,
1033 .irq_mask = xive_ipi_do_nothing,
1034 .irq_unmask = xive_ipi_do_nothing,
1037 static void __init xive_request_ipi(void)
1042 * Initialization failed, move on, we might manage to
1043 * reach the point where we display our errors before
1044 * the system falls appart
1046 if (!xive_irq_domain)
1050 virq = irq_create_mapping(xive_irq_domain, 0);
1051 xive_ipi_irq = virq;
1053 WARN_ON(request_irq(virq, xive_muxed_ipi_action,
1054 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL));
1057 static int xive_setup_cpu_ipi(unsigned int cpu)
1059 struct xive_cpu *xc;
1062 pr_debug("Setting up IPI for CPU %d\n", cpu);
1064 xc = per_cpu(xive_cpu, cpu);
1066 /* Check if we are already setup */
1067 if (xc->hw_ipi != 0)
1070 /* Grab an IPI from the backend, this will populate xc->hw_ipi */
1071 if (xive_ops->get_ipi(cpu, xc))
1075 * Populate the IRQ data in the xive_cpu structure and
1076 * configure the HW / enable the IPIs.
1078 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data);
1080 pr_err("Failed to populate IPI data on CPU %d\n", cpu);
1083 rc = xive_ops->configure_irq(xc->hw_ipi,
1084 get_hard_smp_processor_id(cpu),
1085 xive_irq_priority, xive_ipi_irq);
1087 pr_err("Failed to map IPI CPU %d\n", cpu);
1090 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu,
1091 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio);
1094 xive_do_source_set_mask(&xc->ipi_data, false);
1099 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc)
1101 /* Disable the IPI and free the IRQ data */
1103 /* Already cleaned up ? */
1104 if (xc->hw_ipi == 0)
1108 xive_do_source_set_mask(&xc->ipi_data, true);
1111 * Note: We don't call xive_cleanup_irq_data() to free
1112 * the mappings as this is called from an IPI on kexec
1113 * which is not a safe environment to call iounmap()
1116 /* Deconfigure/mask in the backend */
1117 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(),
1118 0xff, xive_ipi_irq);
1120 /* Free the IPIs in the backend */
1121 xive_ops->put_ipi(cpu, xc);
1124 void __init xive_smp_probe(void)
1126 smp_ops->cause_ipi = xive_cause_ipi;
1128 /* Register the IPI */
1131 /* Allocate and setup IPI for the boot CPU */
1132 xive_setup_cpu_ipi(smp_processor_id());
1135 #endif /* CONFIG_SMP */
1137 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq,
1143 * Mark interrupts as edge sensitive by default so that resend
1144 * actually works. Will fix that up below if needed.
1146 irq_clear_status_flags(virq, IRQ_LEVEL);
1149 /* IPIs are special and come up with HW number 0 */
1152 * IPIs are marked per-cpu. We use separate HW interrupts under
1153 * the hood but associated with the same "linux" interrupt
1155 irq_set_chip_and_handler(virq, &xive_ipi_chip,
1161 rc = xive_irq_alloc_data(virq, hw);
1165 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq);
1170 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq)
1172 struct irq_data *data = irq_get_irq_data(virq);
1173 unsigned int hw_irq;
1175 /* XXX Assign BAD number */
1178 hw_irq = (unsigned int)irqd_to_hwirq(data);
1180 xive_irq_free_data(virq);
1183 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct,
1184 const u32 *intspec, unsigned int intsize,
1185 irq_hw_number_t *out_hwirq, unsigned int *out_flags)
1188 *out_hwirq = intspec[0];
1191 * If intsize is at least 2, we look for the type in the second cell,
1192 * we assume the LSB indicates a level interrupt.
1196 *out_flags = IRQ_TYPE_LEVEL_LOW;
1198 *out_flags = IRQ_TYPE_EDGE_RISING;
1200 *out_flags = IRQ_TYPE_LEVEL_LOW;
1205 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node,
1206 enum irq_domain_bus_token bus_token)
1208 return xive_ops->match(node);
1211 static const struct irq_domain_ops xive_irq_domain_ops = {
1212 .match = xive_irq_domain_match,
1213 .map = xive_irq_domain_map,
1214 .unmap = xive_irq_domain_unmap,
1215 .xlate = xive_irq_domain_xlate,
1218 static void __init xive_init_host(void)
1220 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ,
1221 &xive_irq_domain_ops, NULL);
1222 if (WARN_ON(xive_irq_domain == NULL))
1224 irq_set_default_host(xive_irq_domain);
1227 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1229 if (xc->queue[xive_irq_priority].qpage)
1230 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority);
1233 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc)
1237 /* We setup 1 queues for now with a 64k page */
1238 if (!xc->queue[xive_irq_priority].qpage)
1239 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority);
1244 static int xive_prepare_cpu(unsigned int cpu)
1246 struct xive_cpu *xc;
1248 xc = per_cpu(xive_cpu, cpu);
1250 struct device_node *np;
1252 xc = kzalloc_node(sizeof(struct xive_cpu),
1253 GFP_KERNEL, cpu_to_node(cpu));
1256 np = of_get_cpu_node(cpu, NULL);
1258 xc->chip_id = of_get_ibm_chip_id(np);
1261 per_cpu(xive_cpu, cpu) = xc;
1264 /* Setup EQs if not already */
1265 return xive_setup_cpu_queues(cpu, xc);
1268 static void xive_setup_cpu(void)
1270 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1272 /* Debug: Dump the TM state */
1273 pr_devel("CPU %d [HW 0x%02x] VT=%02x\n",
1274 smp_processor_id(), hard_smp_processor_id(),
1275 in_8(xive_tima + xive_tima_offset + TM_WORD2));
1277 /* The backend might have additional things to do */
1278 if (xive_ops->setup_cpu)
1279 xive_ops->setup_cpu(smp_processor_id(), xc);
1281 /* Set CPPR to 0xff to enable flow of interrupts */
1283 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1287 void xive_smp_setup_cpu(void)
1289 pr_devel("SMP setup CPU %d\n", smp_processor_id());
1291 /* This will have already been done on the boot CPU */
1292 if (smp_processor_id() != boot_cpuid)
1297 int xive_smp_prepare_cpu(unsigned int cpu)
1301 /* Allocate per-CPU data and queues */
1302 rc = xive_prepare_cpu(cpu);
1306 /* Allocate and setup IPI for the new CPU */
1307 return xive_setup_cpu_ipi(cpu);
1310 #ifdef CONFIG_HOTPLUG_CPU
1311 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc)
1315 /* We assume local irqs are disabled */
1316 WARN_ON(!irqs_disabled());
1318 /* Check what's already in the CPU queue */
1319 while ((irq = xive_scan_interrupts(xc, false)) != 0) {
1321 * We need to re-route that interrupt to its new destination.
1322 * First get and lock the descriptor
1324 struct irq_desc *desc = irq_to_desc(irq);
1325 struct irq_data *d = irq_desc_get_irq_data(desc);
1326 struct xive_irq_data *xd;
1327 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d);
1330 * Ignore anything that isn't a XIVE irq and ignore
1331 * IPIs, so can just be dropped.
1333 if (d->domain != xive_irq_domain || hw_irq == 0)
1337 * The IRQ should have already been re-routed, it's just a
1338 * stale in the old queue, so re-trigger it in order to make
1339 * it reach is new destination.
1342 pr_info("CPU %d: Got irq %d while offline, re-sending...\n",
1345 raw_spin_lock(&desc->lock);
1346 xd = irq_desc_get_handler_data(desc);
1349 * For LSIs, we EOI, this will cause a resend if it's
1350 * still asserted. Otherwise do an MSI retrigger.
1352 if (xd->flags & XIVE_IRQ_FLAG_LSI)
1353 xive_do_source_eoi(irqd_to_hwirq(d), xd);
1355 xive_irq_retrigger(d);
1357 raw_spin_unlock(&desc->lock);
1361 void xive_smp_disable_cpu(void)
1363 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1364 unsigned int cpu = smp_processor_id();
1366 /* Migrate interrupts away from the CPU */
1367 irq_migrate_all_off_this_cpu();
1369 /* Set CPPR to 0 to disable flow of interrupts */
1371 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1373 /* Flush everything still in the queue */
1374 xive_flush_cpu_queue(cpu, xc);
1376 /* Re-enable CPPR */
1378 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff);
1381 void xive_flush_interrupt(void)
1383 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1384 unsigned int cpu = smp_processor_id();
1386 /* Called if an interrupt occurs while the CPU is hot unplugged */
1387 xive_flush_cpu_queue(cpu, xc);
1390 #endif /* CONFIG_HOTPLUG_CPU */
1392 #endif /* CONFIG_SMP */
1394 void xive_teardown_cpu(void)
1396 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1397 unsigned int cpu = smp_processor_id();
1399 /* Set CPPR to 0 to disable flow of interrupts */
1401 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1403 if (xive_ops->teardown_cpu)
1404 xive_ops->teardown_cpu(cpu, xc);
1407 void xive_kexec_teardown_cpu(int secondary)
1409 struct xive_cpu *xc = __this_cpu_read(xive_cpu);
1410 unsigned int cpu = smp_processor_id();
1412 /* Set CPPR to 0 to disable flow of interrupts */
1414 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0);
1416 /* Backend cleanup if any */
1417 if (xive_ops->teardown_cpu)
1418 xive_ops->teardown_cpu(cpu, xc);
1421 /* Get rid of IPI */
1422 xive_cleanup_cpu_ipi(cpu, xc);
1425 /* Disable and free the queues */
1426 xive_cleanup_cpu_queues(cpu, xc);
1429 void xive_shutdown(void)
1431 xive_ops->shutdown();
1434 bool __init xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
1438 xive_tima_offset = offset;
1440 xive_irq_priority = max_prio;
1442 ppc_md.get_irq = xive_get_irq;
1443 __xive_enabled = true;
1445 pr_devel("Initializing host..\n");
1448 pr_devel("Initializing boot CPU..\n");
1450 /* Allocate per-CPU data and queues */
1451 xive_prepare_cpu(smp_processor_id());
1453 /* Get ready for interrupts */
1456 pr_info("Interrupt handling initialized with %s backend\n",
1458 pr_info("Using priority %d for all interrupts\n", max_prio);
1463 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift)
1465 unsigned int alloc_order;
1469 alloc_order = xive_alloc_order(queue_shift);
1470 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order);
1472 return ERR_PTR(-ENOMEM);
1473 qpage = (__be32 *)page_address(pages);
1474 memset(qpage, 0, 1 << queue_shift);
1479 static int __init xive_off(char *arg)
1481 xive_cmdline_disabled = true;
1484 __setup("xive=off", xive_off);