2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
14 #include <asm/kvm_book3s.h>
15 #include <asm/kvm_ppc.h>
16 #include <asm/hvcall.h>
18 #include <asm/debug.h>
19 #include <asm/synch.h>
20 #include <asm/cputhreads.h>
21 #include <asm/ppc-opcode.h>
22 #include <asm/pnv-pci.h>
24 #include "book3s_xics.h"
28 int h_ipi_redirect = 1;
29 EXPORT_SYMBOL(h_ipi_redirect);
30 int kvm_irq_bypass = 1;
31 EXPORT_SYMBOL(kvm_irq_bypass);
33 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
36 /* -- ICS routines -- */
37 static void ics_rm_check_resend(struct kvmppc_xics *xics,
38 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
42 arch_spin_lock(&ics->lock);
44 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
45 struct ics_irq_state *state = &ics->irq_state[i];
50 arch_spin_unlock(&ics->lock);
51 icp_rm_deliver_irq(xics, icp, state->number);
52 arch_spin_lock(&ics->lock);
55 arch_spin_unlock(&ics->lock);
58 /* -- ICP routines -- */
61 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
65 hcpu = hcore << threads_shift;
66 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
67 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
68 icp_native_cause_ipi_rm(hcpu);
71 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
75 * We start the search from our current CPU Id in the core map
76 * and go in a circle until we get back to our ID looking for a
77 * core that is running in host context and that hasn't already
78 * been targeted for another rm_host_ops.
80 * In the future, could consider using a fairer algorithm (one
81 * that distributes the IPIs better)
83 * Returns -1, if no CPU could be found in the host
84 * Else, returns a CPU Id which has been reserved for use
86 static inline int grab_next_hostcore(int start,
87 struct kvmppc_host_rm_core *rm_core, int max, int action)
91 union kvmppc_rm_state old, new;
93 for (core = start + 1; core < max; core++) {
94 old = new = READ_ONCE(rm_core[core].rm_state);
96 if (!old.in_host || old.rm_action)
99 /* Try to grab this host core if not taken already. */
100 new.rm_action = action;
102 success = cmpxchg64(&rm_core[core].rm_state.raw,
103 old.raw, new.raw) == old.raw;
106 * Make sure that the store to the rm_action is made
107 * visible before we return to caller (and the
108 * subsequent store to rm_data) to synchronize with
119 static inline int find_available_hostcore(int action)
122 int my_core = smp_processor_id() >> threads_shift;
123 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
125 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
127 core = grab_next_hostcore(core, rm_core, my_core, action);
132 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
133 struct kvm_vcpu *this_vcpu)
135 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
139 /* Mark the target VCPU as having an interrupt pending */
140 vcpu->stat.queue_intr++;
141 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
143 /* Kick self ? Just set MER and return */
144 if (vcpu == this_vcpu) {
145 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
150 * Check if the core is loaded,
151 * if not, find an available host core to post to wake the VCPU,
152 * if we can't find one, set up state to eventually return too hard.
154 cpu = vcpu->arch.thread_cpu;
155 if (cpu < 0 || cpu >= nr_cpu_ids) {
157 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
158 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
160 icp_send_hcore_msg(hcore, vcpu);
162 this_icp->rm_action |= XICS_RM_KICK_VCPU;
163 this_icp->rm_kick_target = vcpu;
169 kvmhv_rm_send_ipi(cpu);
172 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
174 /* Note: Only called on self ! */
175 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
176 &vcpu->arch.pending_exceptions);
177 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
180 static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
181 union kvmppc_icp_state old,
182 union kvmppc_icp_state new)
184 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
187 /* Calculate new output value */
188 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
190 /* Attempt atomic update */
191 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
196 * Check for output state update
198 * Note that this is racy since another processor could be updating
199 * the state already. This is why we never clear the interrupt output
200 * here, we only ever set it. The clear only happens prior to doing
201 * an update and only by the processor itself. Currently we do it
202 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
204 * We also do not try to figure out whether the EE state has changed,
205 * we unconditionally set it if the new state calls for it. The reason
206 * for that is that we opportunistically remove the pending interrupt
207 * flag when raising CPPR, so we need to set it back here if an
208 * interrupt is still pending.
211 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
213 /* Expose the state change for debug purposes */
214 this_vcpu->arch.icp->rm_dbgstate = new;
215 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
221 static inline int check_too_hard(struct kvmppc_xics *xics,
222 struct kvmppc_icp *icp)
224 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
227 static void icp_rm_check_resend(struct kvmppc_xics *xics,
228 struct kvmppc_icp *icp)
232 /* Order this load with the test for need_resend in the caller */
234 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
235 struct kvmppc_ics *ics = xics->ics[icsid];
237 if (!test_and_clear_bit(icsid, icp->resend_map))
241 ics_rm_check_resend(xics, ics, icp);
245 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
248 union kvmppc_icp_state old_state, new_state;
252 old_state = new_state = READ_ONCE(icp->state);
256 /* See if we can deliver */
257 success = new_state.cppr > priority &&
258 new_state.mfrr > priority &&
259 new_state.pending_pri > priority;
262 * If we can, check for a rejection and perform the
266 *reject = new_state.xisr;
267 new_state.xisr = irq;
268 new_state.pending_pri = priority;
271 * If we failed to deliver we set need_resend
272 * so a subsequent CPPR state change causes us
273 * to try a new delivery.
275 new_state.need_resend = true;
278 } while (!icp_rm_try_update(icp, old_state, new_state));
283 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
286 struct ics_irq_state *state;
287 struct kvmppc_ics *ics;
292 * This is used both for initial delivery of an interrupt and
293 * for subsequent rejection.
295 * Rejection can be racy vs. resends. We have evaluated the
296 * rejection in an atomic ICP transaction which is now complete,
297 * so potentially the ICP can already accept the interrupt again.
299 * So we need to retry the delivery. Essentially the reject path
300 * boils down to a failed delivery. Always.
302 * Now the interrupt could also have moved to a different target,
303 * thus we may need to re-do the ICP lookup as well
307 /* Get the ICS state and lock it */
308 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
310 /* Unsafe increment, but this does not need to be accurate */
314 state = &ics->irq_state[src];
316 /* Get a lock on the ICS */
317 arch_spin_lock(&ics->lock);
320 if (!icp || state->server != icp->server_num) {
321 icp = kvmppc_xics_find_server(xics->kvm, state->server);
323 /* Unsafe increment again*/
329 /* Clear the resend bit of that interrupt */
333 * If masked, bail out
335 * Note: PAPR doesn't mention anything about masked pending
336 * when doing a resend, only when doing a delivery.
338 * However that would have the effect of losing a masked
339 * interrupt that was rejected and isn't consistent with
340 * the whole masked_pending business which is about not
341 * losing interrupts that occur while masked.
343 * I don't differentiate normal deliveries and resends, this
344 * implementation will differ from PAPR and not lose such
347 if (state->priority == MASKED) {
348 state->masked_pending = 1;
353 * Try the delivery, this will set the need_resend flag
354 * in the ICP as part of the atomic transaction if the
355 * delivery is not possible.
357 * Note that if successful, the new delivery might have itself
358 * rejected an interrupt that was "delivered" before we took the
361 * In this case we do the whole sequence all over again for the
362 * new guy. We cannot assume that the rejected interrupt is less
363 * favored than the new one, and thus doesn't need to be delivered,
364 * because by the time we exit icp_rm_try_to_deliver() the target
365 * processor may well have already consumed & completed it, and thus
366 * the rejected interrupt might actually be already acceptable.
368 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
370 * Delivery was successful, did we reject somebody else ?
372 if (reject && reject != XICS_IPI) {
373 arch_spin_unlock(&ics->lock);
379 * We failed to deliver the interrupt we need to set the
380 * resend map bit and mark the ICS state as needing a resend
382 set_bit(ics->icsid, icp->resend_map);
386 * If the need_resend flag got cleared in the ICP some time
387 * between icp_rm_try_to_deliver() atomic update and now, then
388 * we know it might have missed the resend_map bit. So we
392 if (!icp->state.need_resend) {
393 arch_spin_unlock(&ics->lock);
398 arch_spin_unlock(&ics->lock);
401 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
404 union kvmppc_icp_state old_state, new_state;
408 * This handles several related states in one operation:
410 * ICP State: Down_CPPR
412 * Load CPPR with new value and if the XISR is 0
413 * then check for resends:
417 * If MFRR is more favored than CPPR, check for IPIs
418 * and notify ICS of a potential resend. This is done
419 * asynchronously (when used in real mode, we will have
422 * We do not handle the complete Check_IPI as documented
423 * here. In the PAPR, this state will be used for both
424 * Set_MFRR and Down_CPPR. However, we know that we aren't
425 * changing the MFRR state here so we don't need to handle
426 * the case of an MFRR causing a reject of a pending irq,
427 * this will have been handled when the MFRR was set in the
430 * Thus we don't have to handle rejects, only resends.
432 * When implementing real mode for HV KVM, resend will lead to
433 * a H_TOO_HARD return and the whole transaction will be handled
437 old_state = new_state = READ_ONCE(icp->state);
440 new_state.cppr = new_cppr;
443 * Cut down Resend / Check_IPI / IPI
445 * The logic is that we cannot have a pending interrupt
446 * trumped by an IPI at this point (see above), so we
447 * know that either the pending interrupt is already an
448 * IPI (in which case we don't care to override it) or
449 * it's either more favored than us or non existent
451 if (new_state.mfrr < new_cppr &&
452 new_state.mfrr <= new_state.pending_pri) {
453 new_state.pending_pri = new_state.mfrr;
454 new_state.xisr = XICS_IPI;
457 /* Latch/clear resend bit */
458 resend = new_state.need_resend;
459 new_state.need_resend = 0;
461 } while (!icp_rm_try_update(icp, old_state, new_state));
464 * Now handle resend checks. Those are asynchronous to the ICP
465 * state update in HW (ie bus transactions) so we can handle them
466 * separately here as well.
469 icp->n_check_resend++;
470 icp_rm_check_resend(xics, icp);
475 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
477 union kvmppc_icp_state old_state, new_state;
478 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
479 struct kvmppc_icp *icp = vcpu->arch.icp;
482 if (!xics || !xics->real_mode)
485 /* First clear the interrupt */
486 icp_rm_clr_vcpu_irq(icp->vcpu);
489 * ICP State: Accept_Interrupt
491 * Return the pending interrupt (if any) along with the
492 * current CPPR, then clear the XISR & set CPPR to the
496 old_state = new_state = READ_ONCE(icp->state);
498 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
501 new_state.cppr = new_state.pending_pri;
502 new_state.pending_pri = 0xff;
505 } while (!icp_rm_try_update(icp, old_state, new_state));
507 /* Return the result in GPR4 */
508 vcpu->arch.gpr[4] = xirr;
510 return check_too_hard(xics, icp);
513 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
516 union kvmppc_icp_state old_state, new_state;
517 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
518 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
523 if (!xics || !xics->real_mode)
526 local = this_icp->server_num == server;
530 icp = kvmppc_xics_find_server(vcpu->kvm, server);
535 * ICP state: Set_MFRR
537 * If the CPPR is more favored than the new MFRR, then
538 * nothing needs to be done as there can be no XISR to
541 * ICP state: Check_IPI
543 * If the CPPR is less favored, then we might be replacing
544 * an interrupt, and thus need to possibly reject it.
548 * Besides rejecting any pending interrupts, we also
549 * update XISR and pending_pri to mark IPI as pending.
551 * PAPR does not describe this state, but if the MFRR is being
552 * made less favored than its earlier value, there might be
553 * a previously-rejected interrupt needing to be resent.
554 * Ideally, we would want to resend only if
555 * prio(pending_interrupt) < mfrr &&
556 * prio(pending_interrupt) < cppr
557 * where pending interrupt is the one that was rejected. But
558 * we don't have that state, so we simply trigger a resend
559 * whenever the MFRR is made less favored.
562 old_state = new_state = READ_ONCE(icp->state);
565 new_state.mfrr = mfrr;
570 if (mfrr < new_state.cppr) {
571 /* Reject a pending interrupt if not an IPI */
572 if (mfrr <= new_state.pending_pri) {
573 reject = new_state.xisr;
574 new_state.pending_pri = mfrr;
575 new_state.xisr = XICS_IPI;
579 if (mfrr > old_state.mfrr) {
580 resend = new_state.need_resend;
581 new_state.need_resend = 0;
583 } while (!icp_rm_try_update(icp, old_state, new_state));
585 /* Handle reject in real mode */
586 if (reject && reject != XICS_IPI) {
587 this_icp->n_reject++;
588 icp_rm_deliver_irq(xics, icp, reject);
591 /* Handle resends in real mode */
593 this_icp->n_check_resend++;
594 icp_rm_check_resend(xics, icp);
597 return check_too_hard(xics, this_icp);
600 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
602 union kvmppc_icp_state old_state, new_state;
603 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
604 struct kvmppc_icp *icp = vcpu->arch.icp;
607 if (!xics || !xics->real_mode)
611 * ICP State: Set_CPPR
613 * We can safely compare the new value with the current
614 * value outside of the transaction as the CPPR is only
615 * ever changed by the processor on itself
617 if (cppr > icp->state.cppr) {
618 icp_rm_down_cppr(xics, icp, cppr);
620 } else if (cppr == icp->state.cppr)
626 * The processor is raising its priority, this can result
627 * in a rejection of a pending interrupt:
629 * ICP State: Reject_Current
631 * We can remove EE from the current processor, the update
632 * transaction will set it again if needed
634 icp_rm_clr_vcpu_irq(icp->vcpu);
637 old_state = new_state = READ_ONCE(icp->state);
640 new_state.cppr = cppr;
642 if (cppr <= new_state.pending_pri) {
643 reject = new_state.xisr;
645 new_state.pending_pri = 0xff;
648 } while (!icp_rm_try_update(icp, old_state, new_state));
651 * Check for rejects. They are handled by doing a new delivery
652 * attempt (see comments in icp_rm_deliver_irq).
654 if (reject && reject != XICS_IPI) {
656 icp_rm_deliver_irq(xics, icp, reject);
659 return check_too_hard(xics, icp);
662 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
664 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
665 struct kvmppc_icp *icp = vcpu->arch.icp;
666 struct kvmppc_ics *ics;
667 struct ics_irq_state *state;
668 u32 irq = xirr & 0x00ffffff;
671 if (!xics || !xics->real_mode)
677 * Note: If EOI is incorrectly used by SW to lower the CPPR
678 * value (ie more favored), we do not check for rejection of
679 * a pending interrupt, this is a SW error and PAPR sepcifies
680 * that we don't have to deal with it.
682 * The sending of an EOI to the ICS is handled after the
685 * ICP State: Down_CPPR which we handle
686 * in a separate function as it's shared with H_CPPR.
688 icp_rm_down_cppr(xics, icp, xirr >> 24);
690 /* IPIs have no EOI */
694 * EOI handling: If the interrupt is still asserted, we need to
695 * resend it. We can take a lockless "peek" at the ICS state here.
697 * "Message" interrupts will never have "asserted" set
699 ics = kvmppc_xics_find_ics(xics, irq, &src);
702 state = &ics->irq_state[src];
704 /* Still asserted, resend it */
705 if (state->asserted) {
707 icp_rm_deliver_irq(xics, icp, irq);
710 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
711 icp->rm_action |= XICS_RM_NOTIFY_EOI;
712 icp->rm_eoied_irq = irq;
715 return check_too_hard(xics, icp);
718 unsigned long eoi_rc;
720 static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
722 unsigned long xics_phys;
725 rc = pnv_opal_pci_msi_eoi(c, hwirq);
733 xics_phys = local_paca->kvm_hstate.xics_phys;
734 _stwcix(xics_phys + XICS_XIRR, xirr);
737 long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
739 struct kvmppc_irq_map *irq_map,
740 struct kvmppc_passthru_irqmap *pimap)
742 struct kvmppc_xics *xics;
743 struct kvmppc_icp *icp;
746 irq = irq_map->v_hwirq;
747 xics = vcpu->kvm->arch.xics;
748 icp = vcpu->arch.icp;
750 icp_rm_deliver_irq(xics, icp, irq);
752 /* EOI the interrupt */
753 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
755 if (check_too_hard(xics, icp) == H_TOO_HARD)
761 /* --- Non-real mode XICS-related built-in routines --- */
764 * Host Operations poked by RM KVM
766 static void rm_host_ipi_action(int action, void *data)
769 case XICS_RM_KICK_VCPU:
770 kvmppc_host_rm_ops_hv->vcpu_kick(data);
773 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
779 void kvmppc_xics_ipi_action(void)
782 unsigned int cpu = smp_processor_id();
783 struct kvmppc_host_rm_core *rm_corep;
785 core = cpu >> threads_shift;
786 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
788 if (rm_corep->rm_data) {
789 rm_host_ipi_action(rm_corep->rm_state.rm_action,
791 /* Order these stores against the real mode KVM */
792 rm_corep->rm_data = NULL;
794 rm_corep->rm_state.rm_action = 0;