2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
13 #include <linux/kernel_stat.h>
15 #include <asm/kvm_book3s.h>
16 #include <asm/kvm_ppc.h>
17 #include <asm/hvcall.h>
19 #include <asm/debug.h>
20 #include <asm/synch.h>
21 #include <asm/cputhreads.h>
22 #include <asm/pgtable.h>
23 #include <asm/ppc-opcode.h>
24 #include <asm/pnv-pci.h>
28 #include "book3s_xics.h"
32 int h_ipi_redirect = 1;
33 EXPORT_SYMBOL(h_ipi_redirect);
34 int kvm_irq_bypass = 1;
35 EXPORT_SYMBOL(kvm_irq_bypass);
37 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
39 static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu);
41 /* -- ICS routines -- */
42 static void ics_rm_check_resend(struct kvmppc_xics *xics,
43 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
47 arch_spin_lock(&ics->lock);
49 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
50 struct ics_irq_state *state = &ics->irq_state[i];
55 arch_spin_unlock(&ics->lock);
56 icp_rm_deliver_irq(xics, icp, state->number);
57 arch_spin_lock(&ics->lock);
60 arch_spin_unlock(&ics->lock);
63 /* -- ICP routines -- */
66 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
70 hcpu = hcore << threads_shift;
71 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
72 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
73 icp_native_cause_ipi_rm(hcpu);
76 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
80 * We start the search from our current CPU Id in the core map
81 * and go in a circle until we get back to our ID looking for a
82 * core that is running in host context and that hasn't already
83 * been targeted for another rm_host_ops.
85 * In the future, could consider using a fairer algorithm (one
86 * that distributes the IPIs better)
88 * Returns -1, if no CPU could be found in the host
89 * Else, returns a CPU Id which has been reserved for use
91 static inline int grab_next_hostcore(int start,
92 struct kvmppc_host_rm_core *rm_core, int max, int action)
96 union kvmppc_rm_state old, new;
98 for (core = start + 1; core < max; core++) {
99 old = new = READ_ONCE(rm_core[core].rm_state);
101 if (!old.in_host || old.rm_action)
104 /* Try to grab this host core if not taken already. */
105 new.rm_action = action;
107 success = cmpxchg64(&rm_core[core].rm_state.raw,
108 old.raw, new.raw) == old.raw;
111 * Make sure that the store to the rm_action is made
112 * visible before we return to caller (and the
113 * subsequent store to rm_data) to synchronize with
124 static inline int find_available_hostcore(int action)
127 int my_core = smp_processor_id() >> threads_shift;
128 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
130 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
132 core = grab_next_hostcore(core, rm_core, my_core, action);
137 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
138 struct kvm_vcpu *this_vcpu)
140 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
144 /* Mark the target VCPU as having an interrupt pending */
145 vcpu->stat.queue_intr++;
146 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
148 /* Kick self ? Just set MER and return */
149 if (vcpu == this_vcpu) {
150 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
155 * Check if the core is loaded,
156 * if not, find an available host core to post to wake the VCPU,
157 * if we can't find one, set up state to eventually return too hard.
159 cpu = vcpu->arch.thread_cpu;
160 if (cpu < 0 || cpu >= nr_cpu_ids) {
162 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
163 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
165 icp_send_hcore_msg(hcore, vcpu);
167 this_icp->rm_action |= XICS_RM_KICK_VCPU;
168 this_icp->rm_kick_target = vcpu;
174 kvmhv_rm_send_ipi(cpu);
177 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
179 /* Note: Only called on self ! */
180 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
181 &vcpu->arch.pending_exceptions);
182 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
185 static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
186 union kvmppc_icp_state old,
187 union kvmppc_icp_state new)
189 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
192 /* Calculate new output value */
193 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
195 /* Attempt atomic update */
196 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
201 * Check for output state update
203 * Note that this is racy since another processor could be updating
204 * the state already. This is why we never clear the interrupt output
205 * here, we only ever set it. The clear only happens prior to doing
206 * an update and only by the processor itself. Currently we do it
207 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
209 * We also do not try to figure out whether the EE state has changed,
210 * we unconditionally set it if the new state calls for it. The reason
211 * for that is that we opportunistically remove the pending interrupt
212 * flag when raising CPPR, so we need to set it back here if an
213 * interrupt is still pending.
216 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
218 /* Expose the state change for debug purposes */
219 this_vcpu->arch.icp->rm_dbgstate = new;
220 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
226 static inline int check_too_hard(struct kvmppc_xics *xics,
227 struct kvmppc_icp *icp)
229 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
232 static void icp_rm_check_resend(struct kvmppc_xics *xics,
233 struct kvmppc_icp *icp)
237 /* Order this load with the test for need_resend in the caller */
239 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
240 struct kvmppc_ics *ics = xics->ics[icsid];
242 if (!test_and_clear_bit(icsid, icp->resend_map))
246 ics_rm_check_resend(xics, ics, icp);
250 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
253 union kvmppc_icp_state old_state, new_state;
257 old_state = new_state = READ_ONCE(icp->state);
261 /* See if we can deliver */
262 success = new_state.cppr > priority &&
263 new_state.mfrr > priority &&
264 new_state.pending_pri > priority;
267 * If we can, check for a rejection and perform the
271 *reject = new_state.xisr;
272 new_state.xisr = irq;
273 new_state.pending_pri = priority;
276 * If we failed to deliver we set need_resend
277 * so a subsequent CPPR state change causes us
278 * to try a new delivery.
280 new_state.need_resend = true;
283 } while (!icp_rm_try_update(icp, old_state, new_state));
288 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
291 struct ics_irq_state *state;
292 struct kvmppc_ics *ics;
297 * This is used both for initial delivery of an interrupt and
298 * for subsequent rejection.
300 * Rejection can be racy vs. resends. We have evaluated the
301 * rejection in an atomic ICP transaction which is now complete,
302 * so potentially the ICP can already accept the interrupt again.
304 * So we need to retry the delivery. Essentially the reject path
305 * boils down to a failed delivery. Always.
307 * Now the interrupt could also have moved to a different target,
308 * thus we may need to re-do the ICP lookup as well
312 /* Get the ICS state and lock it */
313 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
315 /* Unsafe increment, but this does not need to be accurate */
319 state = &ics->irq_state[src];
321 /* Get a lock on the ICS */
322 arch_spin_lock(&ics->lock);
325 if (!icp || state->server != icp->server_num) {
326 icp = kvmppc_xics_find_server(xics->kvm, state->server);
328 /* Unsafe increment again*/
334 /* Clear the resend bit of that interrupt */
338 * If masked, bail out
340 * Note: PAPR doesn't mention anything about masked pending
341 * when doing a resend, only when doing a delivery.
343 * However that would have the effect of losing a masked
344 * interrupt that was rejected and isn't consistent with
345 * the whole masked_pending business which is about not
346 * losing interrupts that occur while masked.
348 * I don't differentiate normal deliveries and resends, this
349 * implementation will differ from PAPR and not lose such
352 if (state->priority == MASKED) {
353 state->masked_pending = 1;
358 * Try the delivery, this will set the need_resend flag
359 * in the ICP as part of the atomic transaction if the
360 * delivery is not possible.
362 * Note that if successful, the new delivery might have itself
363 * rejected an interrupt that was "delivered" before we took the
366 * In this case we do the whole sequence all over again for the
367 * new guy. We cannot assume that the rejected interrupt is less
368 * favored than the new one, and thus doesn't need to be delivered,
369 * because by the time we exit icp_rm_try_to_deliver() the target
370 * processor may well have already consumed & completed it, and thus
371 * the rejected interrupt might actually be already acceptable.
373 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
375 * Delivery was successful, did we reject somebody else ?
377 if (reject && reject != XICS_IPI) {
378 arch_spin_unlock(&ics->lock);
384 * We failed to deliver the interrupt we need to set the
385 * resend map bit and mark the ICS state as needing a resend
387 set_bit(ics->icsid, icp->resend_map);
391 * If the need_resend flag got cleared in the ICP some time
392 * between icp_rm_try_to_deliver() atomic update and now, then
393 * we know it might have missed the resend_map bit. So we
397 if (!icp->state.need_resend) {
398 arch_spin_unlock(&ics->lock);
403 arch_spin_unlock(&ics->lock);
406 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
409 union kvmppc_icp_state old_state, new_state;
413 * This handles several related states in one operation:
415 * ICP State: Down_CPPR
417 * Load CPPR with new value and if the XISR is 0
418 * then check for resends:
422 * If MFRR is more favored than CPPR, check for IPIs
423 * and notify ICS of a potential resend. This is done
424 * asynchronously (when used in real mode, we will have
427 * We do not handle the complete Check_IPI as documented
428 * here. In the PAPR, this state will be used for both
429 * Set_MFRR and Down_CPPR. However, we know that we aren't
430 * changing the MFRR state here so we don't need to handle
431 * the case of an MFRR causing a reject of a pending irq,
432 * this will have been handled when the MFRR was set in the
435 * Thus we don't have to handle rejects, only resends.
437 * When implementing real mode for HV KVM, resend will lead to
438 * a H_TOO_HARD return and the whole transaction will be handled
442 old_state = new_state = READ_ONCE(icp->state);
445 new_state.cppr = new_cppr;
448 * Cut down Resend / Check_IPI / IPI
450 * The logic is that we cannot have a pending interrupt
451 * trumped by an IPI at this point (see above), so we
452 * know that either the pending interrupt is already an
453 * IPI (in which case we don't care to override it) or
454 * it's either more favored than us or non existent
456 if (new_state.mfrr < new_cppr &&
457 new_state.mfrr <= new_state.pending_pri) {
458 new_state.pending_pri = new_state.mfrr;
459 new_state.xisr = XICS_IPI;
462 /* Latch/clear resend bit */
463 resend = new_state.need_resend;
464 new_state.need_resend = 0;
466 } while (!icp_rm_try_update(icp, old_state, new_state));
469 * Now handle resend checks. Those are asynchronous to the ICP
470 * state update in HW (ie bus transactions) so we can handle them
471 * separately here as well.
474 icp->n_check_resend++;
475 icp_rm_check_resend(xics, icp);
480 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
482 union kvmppc_icp_state old_state, new_state;
483 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
484 struct kvmppc_icp *icp = vcpu->arch.icp;
487 if (!xics || !xics->real_mode)
490 /* First clear the interrupt */
491 icp_rm_clr_vcpu_irq(icp->vcpu);
494 * ICP State: Accept_Interrupt
496 * Return the pending interrupt (if any) along with the
497 * current CPPR, then clear the XISR & set CPPR to the
501 old_state = new_state = READ_ONCE(icp->state);
503 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
506 new_state.cppr = new_state.pending_pri;
507 new_state.pending_pri = 0xff;
510 } while (!icp_rm_try_update(icp, old_state, new_state));
512 /* Return the result in GPR4 */
513 vcpu->arch.gpr[4] = xirr;
515 return check_too_hard(xics, icp);
518 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
521 union kvmppc_icp_state old_state, new_state;
522 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
523 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
528 if (!xics || !xics->real_mode)
531 local = this_icp->server_num == server;
535 icp = kvmppc_xics_find_server(vcpu->kvm, server);
540 * ICP state: Set_MFRR
542 * If the CPPR is more favored than the new MFRR, then
543 * nothing needs to be done as there can be no XISR to
546 * ICP state: Check_IPI
548 * If the CPPR is less favored, then we might be replacing
549 * an interrupt, and thus need to possibly reject it.
553 * Besides rejecting any pending interrupts, we also
554 * update XISR and pending_pri to mark IPI as pending.
556 * PAPR does not describe this state, but if the MFRR is being
557 * made less favored than its earlier value, there might be
558 * a previously-rejected interrupt needing to be resent.
559 * Ideally, we would want to resend only if
560 * prio(pending_interrupt) < mfrr &&
561 * prio(pending_interrupt) < cppr
562 * where pending interrupt is the one that was rejected. But
563 * we don't have that state, so we simply trigger a resend
564 * whenever the MFRR is made less favored.
567 old_state = new_state = READ_ONCE(icp->state);
570 new_state.mfrr = mfrr;
575 if (mfrr < new_state.cppr) {
576 /* Reject a pending interrupt if not an IPI */
577 if (mfrr <= new_state.pending_pri) {
578 reject = new_state.xisr;
579 new_state.pending_pri = mfrr;
580 new_state.xisr = XICS_IPI;
584 if (mfrr > old_state.mfrr) {
585 resend = new_state.need_resend;
586 new_state.need_resend = 0;
588 } while (!icp_rm_try_update(icp, old_state, new_state));
590 /* Handle reject in real mode */
591 if (reject && reject != XICS_IPI) {
592 this_icp->n_reject++;
593 icp_rm_deliver_irq(xics, icp, reject);
596 /* Handle resends in real mode */
598 this_icp->n_check_resend++;
599 icp_rm_check_resend(xics, icp);
602 return check_too_hard(xics, this_icp);
605 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
607 union kvmppc_icp_state old_state, new_state;
608 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
609 struct kvmppc_icp *icp = vcpu->arch.icp;
612 if (!xics || !xics->real_mode)
616 * ICP State: Set_CPPR
618 * We can safely compare the new value with the current
619 * value outside of the transaction as the CPPR is only
620 * ever changed by the processor on itself
622 if (cppr > icp->state.cppr) {
623 icp_rm_down_cppr(xics, icp, cppr);
625 } else if (cppr == icp->state.cppr)
631 * The processor is raising its priority, this can result
632 * in a rejection of a pending interrupt:
634 * ICP State: Reject_Current
636 * We can remove EE from the current processor, the update
637 * transaction will set it again if needed
639 icp_rm_clr_vcpu_irq(icp->vcpu);
642 old_state = new_state = READ_ONCE(icp->state);
645 new_state.cppr = cppr;
647 if (cppr <= new_state.pending_pri) {
648 reject = new_state.xisr;
650 new_state.pending_pri = 0xff;
653 } while (!icp_rm_try_update(icp, old_state, new_state));
656 * Check for rejects. They are handled by doing a new delivery
657 * attempt (see comments in icp_rm_deliver_irq).
659 if (reject && reject != XICS_IPI) {
661 icp_rm_deliver_irq(xics, icp, reject);
664 return check_too_hard(xics, icp);
667 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
669 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
670 struct kvmppc_icp *icp = vcpu->arch.icp;
671 struct kvmppc_ics *ics;
672 struct ics_irq_state *state;
673 u32 irq = xirr & 0x00ffffff;
676 if (!xics || !xics->real_mode)
682 * Note: If EOI is incorrectly used by SW to lower the CPPR
683 * value (ie more favored), we do not check for rejection of
684 * a pending interrupt, this is a SW error and PAPR sepcifies
685 * that we don't have to deal with it.
687 * The sending of an EOI to the ICS is handled after the
690 * ICP State: Down_CPPR which we handle
691 * in a separate function as it's shared with H_CPPR.
693 icp_rm_down_cppr(xics, icp, xirr >> 24);
695 /* IPIs have no EOI */
699 * EOI handling: If the interrupt is still asserted, we need to
700 * resend it. We can take a lockless "peek" at the ICS state here.
702 * "Message" interrupts will never have "asserted" set
704 ics = kvmppc_xics_find_ics(xics, irq, &src);
707 state = &ics->irq_state[src];
709 /* Still asserted, resend it */
710 if (state->asserted) {
712 icp_rm_deliver_irq(xics, icp, irq);
715 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
716 icp->rm_action |= XICS_RM_NOTIFY_EOI;
717 icp->rm_eoied_irq = irq;
720 if (state->host_irq) {
721 ++vcpu->stat.pthru_all;
722 if (state->intr_cpu != -1) {
723 int pcpu = raw_smp_processor_id();
725 pcpu = cpu_first_thread_sibling(pcpu);
726 ++vcpu->stat.pthru_host;
727 if (state->intr_cpu != pcpu) {
728 ++vcpu->stat.pthru_bad_aff;
729 xics_opal_rm_set_server(state->host_irq, pcpu);
731 state->intr_cpu = -1;
735 return check_too_hard(xics, icp);
738 unsigned long eoi_rc;
740 static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
742 unsigned long xics_phys;
745 rc = pnv_opal_pci_msi_eoi(c, hwirq);
753 xics_phys = local_paca->kvm_hstate.xics_phys;
754 _stwcix(xics_phys + XICS_XIRR, xirr);
757 static int xics_opal_rm_set_server(unsigned int hw_irq, int server_cpu)
759 unsigned int mangle_cpu = get_hard_smp_processor_id(server_cpu) << 2;
761 return opal_rm_set_xive(hw_irq, mangle_cpu, DEFAULT_PRIORITY);
765 * Increment a per-CPU 32-bit unsigned integer variable.
766 * Safe to call in real-mode. Handles vmalloc'ed addresses
768 * ToDo: Make this work for any integral type
771 static inline void this_cpu_inc_rm(unsigned int __percpu *addr)
775 int cpu = smp_processor_id();
777 raddr = per_cpu_ptr(addr, cpu);
778 l = (unsigned long)raddr;
780 if (REGION_ID(l) == VMALLOC_REGION_ID) {
781 l = vmalloc_to_phys(raddr);
782 raddr = (unsigned int *)l;
788 * We don't try to update the flags in the irq_desc 'istate' field in
789 * here as would happen in the normal IRQ handling path for several reasons:
790 * - state flags represent internal IRQ state and are not expected to be
791 * updated outside the IRQ subsystem
792 * - more importantly, these are useful for edge triggered interrupts,
793 * IRQ probing, etc., but we are only handling MSI/MSIx interrupts here
794 * and these states shouldn't apply to us.
796 * However, we do update irq_stats - we somewhat duplicate the code in
797 * kstat_incr_irqs_this_cpu() for this since this function is defined
798 * in irq/internal.h which we don't want to include here.
799 * The only difference is that desc->kstat_irqs is an allocated per CPU
800 * variable and could have been vmalloc'ed, so we can't directly
801 * call __this_cpu_inc() on it. The kstat structure is a static
802 * per CPU variable and it should be accessible by real-mode KVM.
805 static void kvmppc_rm_handle_irq_desc(struct irq_desc *desc)
807 this_cpu_inc_rm(desc->kstat_irqs);
808 __this_cpu_inc(kstat.irqs_sum);
811 long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
813 struct kvmppc_irq_map *irq_map,
814 struct kvmppc_passthru_irqmap *pimap)
816 struct kvmppc_xics *xics;
817 struct kvmppc_icp *icp;
820 irq = irq_map->v_hwirq;
821 xics = vcpu->kvm->arch.xics;
822 icp = vcpu->arch.icp;
824 kvmppc_rm_handle_irq_desc(irq_map->desc);
825 icp_rm_deliver_irq(xics, icp, irq);
827 /* EOI the interrupt */
828 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
830 if (check_too_hard(xics, icp) == H_TOO_HARD)
836 /* --- Non-real mode XICS-related built-in routines --- */
839 * Host Operations poked by RM KVM
841 static void rm_host_ipi_action(int action, void *data)
844 case XICS_RM_KICK_VCPU:
845 kvmppc_host_rm_ops_hv->vcpu_kick(data);
848 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
854 void kvmppc_xics_ipi_action(void)
857 unsigned int cpu = smp_processor_id();
858 struct kvmppc_host_rm_core *rm_corep;
860 core = cpu >> threads_shift;
861 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
863 if (rm_corep->rm_data) {
864 rm_host_ipi_action(rm_corep->rm_state.rm_action,
866 /* Order these stores against the real mode KVM */
867 rm_corep->rm_data = NULL;
869 rm_corep->rm_state.rm_action = 0;