2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/kernel_stat.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/hvcall.h>
19 #include <asm/debug.h>
20 #include <asm/synch.h>
21 #include <asm/cputhreads.h>
22 #include <asm/pgtable.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/pnv-pci.h>
27 #include "book3s_xics.h"
31 int h_ipi_redirect = 1;
32 EXPORT_SYMBOL(h_ipi_redirect);
33 int kvm_irq_bypass = 1;
34 EXPORT_SYMBOL(kvm_irq_bypass);
36 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
38 static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
40 /* -- ICS routines -- */
41 static void ics_rm_check_resend(struct kvmppc_xics *xics,
42 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
46 arch_spin_lock(&ics->lock);
48 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
49 struct ics_irq_state *state = &ics->irq_state[i];
54 arch_spin_unlock(&ics->lock);
55 icp_rm_deliver_irq(xics, icp, state->number);
56 arch_spin_lock(&ics->lock);
59 arch_spin_unlock(&ics->lock);
62 /* -- ICP routines -- */
65 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
69 hcpu = hcore << threads_shift;
70 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
71 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
72 icp_native_cause_ipi_rm(hcpu);
75 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
79 * We start the search from our current CPU Id in the core map
80 * and go in a circle until we get back to our ID looking for a
81 * core that is running in host context and that hasn't already
82 * been targeted for another rm_host_ops.
84 * In the future, could consider using a fairer algorithm (one
85 * that distributes the IPIs better)
87 * Returns -1, if no CPU could be found in the host
88 * Else, returns a CPU Id which has been reserved for use
90 static inline int grab_next_hostcore(int start,
91 struct kvmppc_host_rm_core *rm_core, int max, int action)
95 union kvmppc_rm_state old, new;
97 for (core = start + 1; core < max; core++) {
98 old = new = READ_ONCE(rm_core[core].rm_state);
100 if (!old.in_host || old.rm_action)
103 /* Try to grab this host core if not taken already. */
104 new.rm_action = action;
106 success = cmpxchg64(&rm_core[core].rm_state.raw,
107 old.raw, new.raw) == old.raw;
110 * Make sure that the store to the rm_action is made
111 * visible before we return to caller (and the
112 * subsequent store to rm_data) to synchronize with
123 static inline int find_available_hostcore(int action)
126 int my_core = smp_processor_id() >> threads_shift;
127 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
129 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
131 core = grab_next_hostcore(core, rm_core, my_core, action);
136 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
137 struct kvm_vcpu *this_vcpu)
139 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
143 /* Mark the target VCPU as having an interrupt pending */
144 vcpu->stat.queue_intr++;
145 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
147 /* Kick self ? Just set MER and return */
148 if (vcpu == this_vcpu) {
149 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
154 * Check if the core is loaded,
155 * if not, find an available host core to post to wake the VCPU,
156 * if we can't find one, set up state to eventually return too hard.
158 cpu = vcpu->arch.thread_cpu;
159 if (cpu < 0 || cpu >= nr_cpu_ids) {
161 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
162 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
164 icp_send_hcore_msg(hcore, vcpu);
166 this_icp->rm_action |= XICS_RM_KICK_VCPU;
167 this_icp->rm_kick_target = vcpu;
173 kvmhv_rm_send_ipi(cpu);
176 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
178 /* Note: Only called on self ! */
179 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
180 &vcpu->arch.pending_exceptions);
181 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
184 static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
185 union kvmppc_icp_state old,
186 union kvmppc_icp_state new)
188 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
191 /* Calculate new output value */
192 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
194 /* Attempt atomic update */
195 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
200 * Check for output state update
202 * Note that this is racy since another processor could be updating
203 * the state already. This is why we never clear the interrupt output
204 * here, we only ever set it. The clear only happens prior to doing
205 * an update and only by the processor itself. Currently we do it
206 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
208 * We also do not try to figure out whether the EE state has changed,
209 * we unconditionally set it if the new state calls for it. The reason
210 * for that is that we opportunistically remove the pending interrupt
211 * flag when raising CPPR, so we need to set it back here if an
212 * interrupt is still pending.
215 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
217 /* Expose the state change for debug purposes */
218 this_vcpu->arch.icp->rm_dbgstate = new;
219 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
225 static inline int check_too_hard(struct kvmppc_xics *xics,
226 struct kvmppc_icp *icp)
228 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
231 static void icp_rm_check_resend(struct kvmppc_xics *xics,
232 struct kvmppc_icp *icp)
236 /* Order this load with the test for need_resend in the caller */
238 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
239 struct kvmppc_ics *ics = xics->ics[icsid];
241 if (!test_and_clear_bit(icsid, icp->resend_map))
245 ics_rm_check_resend(xics, ics, icp);
249 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
252 union kvmppc_icp_state old_state, new_state;
256 old_state = new_state = READ_ONCE(icp->state);
260 /* See if we can deliver */
261 success = new_state.cppr > priority &&
262 new_state.mfrr > priority &&
263 new_state.pending_pri > priority;
266 * If we can, check for a rejection and perform the
270 *reject = new_state.xisr;
271 new_state.xisr = irq;
272 new_state.pending_pri = priority;
275 * If we failed to deliver we set need_resend
276 * so a subsequent CPPR state change causes us
277 * to try a new delivery.
279 new_state.need_resend = true;
282 } while (!icp_rm_try_update(icp, old_state, new_state));
287 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
290 struct ics_irq_state *state;
291 struct kvmppc_ics *ics;
296 * This is used both for initial delivery of an interrupt and
297 * for subsequent rejection.
299 * Rejection can be racy vs. resends. We have evaluated the
300 * rejection in an atomic ICP transaction which is now complete,
301 * so potentially the ICP can already accept the interrupt again.
303 * So we need to retry the delivery. Essentially the reject path
304 * boils down to a failed delivery. Always.
306 * Now the interrupt could also have moved to a different target,
307 * thus we may need to re-do the ICP lookup as well
311 /* Get the ICS state and lock it */
312 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
314 /* Unsafe increment, but this does not need to be accurate */
318 state = &ics->irq_state[src];
320 /* Get a lock on the ICS */
321 arch_spin_lock(&ics->lock);
324 if (!icp || state->server != icp->server_num) {
325 icp = kvmppc_xics_find_server(xics->kvm, state->server);
327 /* Unsafe increment again*/
333 /* Clear the resend bit of that interrupt */
337 * If masked, bail out
339 * Note: PAPR doesn't mention anything about masked pending
340 * when doing a resend, only when doing a delivery.
342 * However that would have the effect of losing a masked
343 * interrupt that was rejected and isn't consistent with
344 * the whole masked_pending business which is about not
345 * losing interrupts that occur while masked.
347 * I don't differentiate normal deliveries and resends, this
348 * implementation will differ from PAPR and not lose such
351 if (state->priority == MASKED) {
352 state->masked_pending = 1;
357 * Try the delivery, this will set the need_resend flag
358 * in the ICP as part of the atomic transaction if the
359 * delivery is not possible.
361 * Note that if successful, the new delivery might have itself
362 * rejected an interrupt that was "delivered" before we took the
365 * In this case we do the whole sequence all over again for the
366 * new guy. We cannot assume that the rejected interrupt is less
367 * favored than the new one, and thus doesn't need to be delivered,
368 * because by the time we exit icp_rm_try_to_deliver() the target
369 * processor may well have already consumed & completed it, and thus
370 * the rejected interrupt might actually be already acceptable.
372 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
374 * Delivery was successful, did we reject somebody else ?
376 if (reject && reject != XICS_IPI) {
377 arch_spin_unlock(&ics->lock);
383 * We failed to deliver the interrupt we need to set the
384 * resend map bit and mark the ICS state as needing a resend
386 set_bit(ics->icsid, icp->resend_map);
390 * If the need_resend flag got cleared in the ICP some time
391 * between icp_rm_try_to_deliver() atomic update and now, then
392 * we know it might have missed the resend_map bit. So we
396 if (!icp->state.need_resend) {
397 arch_spin_unlock(&ics->lock);
402 arch_spin_unlock(&ics->lock);
405 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
408 union kvmppc_icp_state old_state, new_state;
412 * This handles several related states in one operation:
414 * ICP State: Down_CPPR
416 * Load CPPR with new value and if the XISR is 0
417 * then check for resends:
421 * If MFRR is more favored than CPPR, check for IPIs
422 * and notify ICS of a potential resend. This is done
423 * asynchronously (when used in real mode, we will have
426 * We do not handle the complete Check_IPI as documented
427 * here. In the PAPR, this state will be used for both
428 * Set_MFRR and Down_CPPR. However, we know that we aren't
429 * changing the MFRR state here so we don't need to handle
430 * the case of an MFRR causing a reject of a pending irq,
431 * this will have been handled when the MFRR was set in the
434 * Thus we don't have to handle rejects, only resends.
436 * When implementing real mode for HV KVM, resend will lead to
437 * a H_TOO_HARD return and the whole transaction will be handled
441 old_state = new_state = READ_ONCE(icp->state);
444 new_state.cppr = new_cppr;
447 * Cut down Resend / Check_IPI / IPI
449 * The logic is that we cannot have a pending interrupt
450 * trumped by an IPI at this point (see above), so we
451 * know that either the pending interrupt is already an
452 * IPI (in which case we don't care to override it) or
453 * it's either more favored than us or non existent
455 if (new_state.mfrr < new_cppr &&
456 new_state.mfrr <= new_state.pending_pri) {
457 new_state.pending_pri = new_state.mfrr;
458 new_state.xisr = XICS_IPI;
461 /* Latch/clear resend bit */
462 resend = new_state.need_resend;
463 new_state.need_resend = 0;
465 } while (!icp_rm_try_update(icp, old_state, new_state));
468 * Now handle resend checks. Those are asynchronous to the ICP
469 * state update in HW (ie bus transactions) so we can handle them
470 * separately here as well.
473 icp->n_check_resend++;
474 icp_rm_check_resend(xics, icp);
479 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
481 union kvmppc_icp_state old_state, new_state;
482 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
483 struct kvmppc_icp *icp = vcpu->arch.icp;
486 if (!xics || !xics->real_mode)
489 /* First clear the interrupt */
490 icp_rm_clr_vcpu_irq(icp->vcpu);
493 * ICP State: Accept_Interrupt
495 * Return the pending interrupt (if any) along with the
496 * current CPPR, then clear the XISR & set CPPR to the
500 old_state = new_state = READ_ONCE(icp->state);
502 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
505 new_state.cppr = new_state.pending_pri;
506 new_state.pending_pri = 0xff;
509 } while (!icp_rm_try_update(icp, old_state, new_state));
511 /* Return the result in GPR4 */
512 vcpu->arch.gpr[4] = xirr;
514 return check_too_hard(xics, icp);
517 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
520 union kvmppc_icp_state old_state, new_state;
521 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
522 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
527 if (!xics || !xics->real_mode)
530 local = this_icp->server_num == server;
534 icp = kvmppc_xics_find_server(vcpu->kvm, server);
539 * ICP state: Set_MFRR
541 * If the CPPR is more favored than the new MFRR, then
542 * nothing needs to be done as there can be no XISR to
545 * ICP state: Check_IPI
547 * If the CPPR is less favored, then we might be replacing
548 * an interrupt, and thus need to possibly reject it.
552 * Besides rejecting any pending interrupts, we also
553 * update XISR and pending_pri to mark IPI as pending.
555 * PAPR does not describe this state, but if the MFRR is being
556 * made less favored than its earlier value, there might be
557 * a previously-rejected interrupt needing to be resent.
558 * Ideally, we would want to resend only if
559 * prio(pending_interrupt) < mfrr &&
560 * prio(pending_interrupt) < cppr
561 * where pending interrupt is the one that was rejected. But
562 * we don't have that state, so we simply trigger a resend
563 * whenever the MFRR is made less favored.
566 old_state = new_state = READ_ONCE(icp->state);
569 new_state.mfrr = mfrr;
574 if (mfrr < new_state.cppr) {
575 /* Reject a pending interrupt if not an IPI */
576 if (mfrr <= new_state.pending_pri) {
577 reject = new_state.xisr;
578 new_state.pending_pri = mfrr;
579 new_state.xisr = XICS_IPI;
583 if (mfrr > old_state.mfrr) {
584 resend = new_state.need_resend;
585 new_state.need_resend = 0;
587 } while (!icp_rm_try_update(icp, old_state, new_state));
589 /* Handle reject in real mode */
590 if (reject && reject != XICS_IPI) {
591 this_icp->n_reject++;
592 icp_rm_deliver_irq(xics, icp, reject);
595 /* Handle resends in real mode */
597 this_icp->n_check_resend++;
598 icp_rm_check_resend(xics, icp);
601 return check_too_hard(xics, this_icp);
604 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
606 union kvmppc_icp_state old_state, new_state;
607 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
608 struct kvmppc_icp *icp = vcpu->arch.icp;
611 if (!xics || !xics->real_mode)
615 * ICP State: Set_CPPR
617 * We can safely compare the new value with the current
618 * value outside of the transaction as the CPPR is only
619 * ever changed by the processor on itself
621 if (cppr > icp->state.cppr) {
622 icp_rm_down_cppr(xics, icp, cppr);
624 } else if (cppr == icp->state.cppr)
630 * The processor is raising its priority, this can result
631 * in a rejection of a pending interrupt:
633 * ICP State: Reject_Current
635 * We can remove EE from the current processor, the update
636 * transaction will set it again if needed
638 icp_rm_clr_vcpu_irq(icp->vcpu);
641 old_state = new_state = READ_ONCE(icp->state);
644 new_state.cppr = cppr;
646 if (cppr <= new_state.pending_pri) {
647 reject = new_state.xisr;
649 new_state.pending_pri = 0xff;
652 } while (!icp_rm_try_update(icp, old_state, new_state));
655 * Check for rejects. They are handled by doing a new delivery
656 * attempt (see comments in icp_rm_deliver_irq).
658 if (reject && reject != XICS_IPI) {
660 icp_rm_deliver_irq(xics, icp, reject);
663 return check_too_hard(xics, icp);
666 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
668 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
669 struct kvmppc_icp *icp = vcpu->arch.icp;
670 struct kvmppc_ics *ics;
671 struct ics_irq_state *state;
672 u32 irq = xirr & 0x00ffffff;
675 if (!xics || !xics->real_mode)
681 * Note: If EOI is incorrectly used by SW to lower the CPPR
682 * value (ie more favored), we do not check for rejection of
683 * a pending interrupt, this is a SW error and PAPR sepcifies
684 * that we don't have to deal with it.
686 * The sending of an EOI to the ICS is handled after the
689 * ICP State: Down_CPPR which we handle
690 * in a separate function as it's shared with H_CPPR.
692 icp_rm_down_cppr(xics, icp, xirr >> 24);
694 /* IPIs have no EOI */
698 * EOI handling: If the interrupt is still asserted, we need to
699 * resend it. We can take a lockless "peek" at the ICS state here.
701 * "Message" interrupts will never have "asserted" set
703 ics = kvmppc_xics_find_ics(xics, irq, &src);
706 state = &ics->irq_state[src];
708 /* Still asserted, resend it */
709 if (state->asserted) {
711 icp_rm_deliver_irq(xics, icp, irq);
714 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
715 icp->rm_action |= XICS_RM_NOTIFY_EOI;
716 icp->rm_eoied_irq = irq;
719 if (state->host_irq) {
720 ++vcpu->stat.pthru_all;
721 if (state->intr_cpu != -1) {
722 int pcpu = raw_smp_processor_id();
724 pcpu = cpu_first_thread_sibling(pcpu);
725 ++vcpu->stat.pthru_host;
726 if (state->intr_cpu != pcpu) {
727 ++vcpu->stat.pthru_bad_aff;
728 xics_opal_rm_set_server(state->host_irq, pcpu);
730 state->intr_cpu = -1;
734 return check_too_hard(xics, icp);
737 unsigned long eoi_rc;
739 static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
741 unsigned long xics_phys;
744 rc = pnv_opal_pci_msi_eoi(c, hwirq);
752 xics_phys = local_paca->kvm_hstate.xics_phys;
753 _stwcix(xics_phys + XICS_XIRR, xirr);
756 static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
758 unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
760 return opal_rm_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
764 * Increment a per-CPU 32-bit unsigned integer variable.
765 * Safe to call in real-mode. Handles vmalloc'ed addresses
767 * ToDo: Make this work for any integral type
770 static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
774 int cpu = smp_processor_id();
776 raddr = per_cpu_ptr(addr, cpu);
777 l = (unsigned long)raddr;
779 if (REGION_ID(l) == VMALLOC_REGION_ID) {
780 l = vmalloc_to_phys(raddr);
781 raddr = (unsigned int *)l;
787 * We don't try to update the flags in the irq_desc 'istate' field in
788 * here as would happen in the normal IRQ handling path for several reasons:
789 * - state flags represent internal IRQ state and are not expected to be
790 * updated outside the IRQ subsystem
791 * - more importantly, these are useful for edge triggered interrupts,
792 * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here
793 * and these states shouldn't apply to us.
795 * However, we do update irq_stats - we somewhat duplicate the code in
796 * kstat_incr_irqs_this_cpu() for this since this function is defined
797 * in irq/internal.h which we don't want to include here.
798 * The only difference is that desc->kstat_irqs is an allocated per CPU
799 * variable and could have been vmalloc'ed, so we can't directly
800 * call __this_cpu_inc() on it. The kstat structure is a static
801 * per CPU variable and it should be accessible by real-mode KVM.
804 static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
806 this_cpu_inc_rm(desc->kstat_irqs);
807 __this_cpu_inc(kstat.irqs_sum);
810 long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
812 struct kvmppc_irq_map *irq_map,
813 struct kvmppc_passthru_irqmap *pimap)
815 struct kvmppc_xics *xics;
816 struct kvmppc_icp *icp;
819 irq = irq_map->v_hwirq;
820 xics = vcpu->kvm->arch.xics;
821 icp = vcpu->arch.icp;
823 kvmppc_rm_handle_irq_desc(irq_map->desc);
824 icp_rm_deliver_irq(xics, icp, irq);
826 /* EOI the interrupt */
827 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
829 if (check_too_hard(xics, icp) == H_TOO_HARD)
835 /* --- Non-real mode XICS-related built-in routines --- */
838 * Host Operations poked by RM KVM
840 static void rm_host_ipi_action(int action, void *data)
843 case XICS_RM_KICK_VCPU:
844 kvmppc_host_rm_ops_hv->vcpu_kick(data);
847 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
853 void kvmppc_xics_ipi_action(void)
856 unsigned int cpu = smp_processor_id();
857 struct kvmppc_host_rm_core *rm_corep;
859 core = cpu >> threads_shift;
860 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
862 if (rm_corep->rm_data) {
863 rm_host_ipi_action(rm_corep->rm_state.rm_action,
865 /* Order these stores against the real mode KVM */
866 rm_corep->rm_data = NULL;
868 rm_corep->rm_state.rm_action = 0;