2 * Copyright 2012 Michael Ellerman, IBM Corporation.
3 * Copyright 2012 Benjamin Herrenschmidt, IBM Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
10 #include <linux/kernel.h>
11 #include <linux/kvm_host.h>
12 #include <linux/err.h>
14 #include <asm/kvm_book3s.h>
15 #include <asm/kvm_ppc.h>
16 #include <asm/hvcall.h>
18 #include <asm/debug.h>
19 #include <asm/synch.h>
20 #include <asm/cputhreads.h>
21 #include <asm/ppc-opcode.h>
22 #include <asm/pnv-pci.h>
24 #include "book3s_xics.h"
28 int h_ipi_redirect = 1;
29 EXPORT_SYMBOL(h_ipi_redirect);
31 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
34 /* -- ICS routines -- */
35 static void ics_rm_check_resend(struct kvmppc_xics *xics,
36 struct kvmppc_ics *ics, struct kvmppc_icp *icp)
40 arch_spin_lock(&ics->lock);
42 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
43 struct ics_irq_state *state = &ics->irq_state[i];
48 arch_spin_unlock(&ics->lock);
49 icp_rm_deliver_irq(xics, icp, state->number);
50 arch_spin_lock(&ics->lock);
53 arch_spin_unlock(&ics->lock);
56 /* -- ICP routines -- */
59 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
63 hcpu = hcore << threads_shift;
64 kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
65 smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
66 icp_native_cause_ipi_rm(hcpu);
69 static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
73 * We start the search from our current CPU Id in the core map
74 * and go in a circle until we get back to our ID looking for a
75 * core that is running in host context and that hasn't already
76 * been targeted for another rm_host_ops.
78 * In the future, could consider using a fairer algorithm (one
79 * that distributes the IPIs better)
81 * Returns -1, if no CPU could be found in the host
82 * Else, returns a CPU Id which has been reserved for use
84 static inline int grab_next_hostcore(int start,
85 struct kvmppc_host_rm_core *rm_core, int max, int action)
89 union kvmppc_rm_state old, new;
91 for (core = start + 1; core < max; core++) {
92 old = new = READ_ONCE(rm_core[core].rm_state);
94 if (!old.in_host || old.rm_action)
97 /* Try to grab this host core if not taken already. */
98 new.rm_action = action;
100 success = cmpxchg64(&rm_core[core].rm_state.raw,
101 old.raw, new.raw) == old.raw;
104 * Make sure that the store to the rm_action is made
105 * visible before we return to caller (and the
106 * subsequent store to rm_data) to synchronize with
117 static inline int find_available_hostcore(int action)
120 int my_core = smp_processor_id() >> threads_shift;
121 struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
123 core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
125 core = grab_next_hostcore(core, rm_core, my_core, action);
130 static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
131 struct kvm_vcpu *this_vcpu)
133 struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
137 /* Mark the target VCPU as having an interrupt pending */
138 vcpu->stat.queue_intr++;
139 set_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
141 /* Kick self ? Just set MER and return */
142 if (vcpu == this_vcpu) {
143 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_MER);
148 * Check if the core is loaded,
149 * if not, find an available host core to post to wake the VCPU,
150 * if we can't find one, set up state to eventually return too hard.
152 cpu = vcpu->arch.thread_cpu;
153 if (cpu < 0 || cpu >= nr_cpu_ids) {
155 if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
156 hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
158 icp_send_hcore_msg(hcore, vcpu);
160 this_icp->rm_action |= XICS_RM_KICK_VCPU;
161 this_icp->rm_kick_target = vcpu;
167 kvmhv_rm_send_ipi(cpu);
170 static void icp_rm_clr_vcpu_irq(struct kvm_vcpu *vcpu)
172 /* Note: Only called on self ! */
173 clear_bit(BOOK3S_IRQPRIO_EXTERNAL_LEVEL,
174 &vcpu->arch.pending_exceptions);
175 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_MER);
178 static inline bool icp_rm_try_update(struct kvmppc_icp *icp,
179 union kvmppc_icp_state old,
180 union kvmppc_icp_state new)
182 struct kvm_vcpu *this_vcpu = local_paca->kvm_hstate.kvm_vcpu;
185 /* Calculate new output value */
186 new.out_ee = (new.xisr && (new.pending_pri < new.cppr));
188 /* Attempt atomic update */
189 success = cmpxchg64(&icp->state.raw, old.raw, new.raw) == old.raw;
194 * Check for output state update
196 * Note that this is racy since another processor could be updating
197 * the state already. This is why we never clear the interrupt output
198 * here, we only ever set it. The clear only happens prior to doing
199 * an update and only by the processor itself. Currently we do it
200 * in Accept (H_XIRR) and Up_Cppr (H_XPPR).
202 * We also do not try to figure out whether the EE state has changed,
203 * we unconditionally set it if the new state calls for it. The reason
204 * for that is that we opportunistically remove the pending interrupt
205 * flag when raising CPPR, so we need to set it back here if an
206 * interrupt is still pending.
209 icp_rm_set_vcpu_irq(icp->vcpu, this_vcpu);
211 /* Expose the state change for debug purposes */
212 this_vcpu->arch.icp->rm_dbgstate = new;
213 this_vcpu->arch.icp->rm_dbgtgt = icp->vcpu;
219 static inline int check_too_hard(struct kvmppc_xics *xics,
220 struct kvmppc_icp *icp)
222 return (xics->real_mode_dbg || icp->rm_action) ? H_TOO_HARD : H_SUCCESS;
225 static void icp_rm_check_resend(struct kvmppc_xics *xics,
226 struct kvmppc_icp *icp)
230 /* Order this load with the test for need_resend in the caller */
232 for_each_set_bit(icsid, icp->resend_map, xics->max_icsid + 1) {
233 struct kvmppc_ics *ics = xics->ics[icsid];
235 if (!test_and_clear_bit(icsid, icp->resend_map))
239 ics_rm_check_resend(xics, ics, icp);
243 static bool icp_rm_try_to_deliver(struct kvmppc_icp *icp, u32 irq, u8 priority,
246 union kvmppc_icp_state old_state, new_state;
250 old_state = new_state = READ_ONCE(icp->state);
254 /* See if we can deliver */
255 success = new_state.cppr > priority &&
256 new_state.mfrr > priority &&
257 new_state.pending_pri > priority;
260 * If we can, check for a rejection and perform the
264 *reject = new_state.xisr;
265 new_state.xisr = irq;
266 new_state.pending_pri = priority;
269 * If we failed to deliver we set need_resend
270 * so a subsequent CPPR state change causes us
271 * to try a new delivery.
273 new_state.need_resend = true;
276 } while (!icp_rm_try_update(icp, old_state, new_state));
281 static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
284 struct ics_irq_state *state;
285 struct kvmppc_ics *ics;
290 * This is used both for initial delivery of an interrupt and
291 * for subsequent rejection.
293 * Rejection can be racy vs. resends. We have evaluated the
294 * rejection in an atomic ICP transaction which is now complete,
295 * so potentially the ICP can already accept the interrupt again.
297 * So we need to retry the delivery. Essentially the reject path
298 * boils down to a failed delivery. Always.
300 * Now the interrupt could also have moved to a different target,
301 * thus we may need to re-do the ICP lookup as well
305 /* Get the ICS state and lock it */
306 ics = kvmppc_xics_find_ics(xics, new_irq, &src);
308 /* Unsafe increment, but this does not need to be accurate */
312 state = &ics->irq_state[src];
314 /* Get a lock on the ICS */
315 arch_spin_lock(&ics->lock);
318 if (!icp || state->server != icp->server_num) {
319 icp = kvmppc_xics_find_server(xics->kvm, state->server);
321 /* Unsafe increment again*/
327 /* Clear the resend bit of that interrupt */
331 * If masked, bail out
333 * Note: PAPR doesn't mention anything about masked pending
334 * when doing a resend, only when doing a delivery.
336 * However that would have the effect of losing a masked
337 * interrupt that was rejected and isn't consistent with
338 * the whole masked_pending business which is about not
339 * losing interrupts that occur while masked.
341 * I don't differentiate normal deliveries and resends, this
342 * implementation will differ from PAPR and not lose such
345 if (state->priority == MASKED) {
346 state->masked_pending = 1;
351 * Try the delivery, this will set the need_resend flag
352 * in the ICP as part of the atomic transaction if the
353 * delivery is not possible.
355 * Note that if successful, the new delivery might have itself
356 * rejected an interrupt that was "delivered" before we took the
359 * In this case we do the whole sequence all over again for the
360 * new guy. We cannot assume that the rejected interrupt is less
361 * favored than the new one, and thus doesn't need to be delivered,
362 * because by the time we exit icp_rm_try_to_deliver() the target
363 * processor may well have already consumed & completed it, and thus
364 * the rejected interrupt might actually be already acceptable.
366 if (icp_rm_try_to_deliver(icp, new_irq, state->priority, &reject)) {
368 * Delivery was successful, did we reject somebody else ?
370 if (reject && reject != XICS_IPI) {
371 arch_spin_unlock(&ics->lock);
377 * We failed to deliver the interrupt we need to set the
378 * resend map bit and mark the ICS state as needing a resend
380 set_bit(ics->icsid, icp->resend_map);
384 * If the need_resend flag got cleared in the ICP some time
385 * between icp_rm_try_to_deliver() atomic update and now, then
386 * we know it might have missed the resend_map bit. So we
390 if (!icp->state.need_resend) {
391 arch_spin_unlock(&ics->lock);
396 arch_spin_unlock(&ics->lock);
399 static void icp_rm_down_cppr(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
402 union kvmppc_icp_state old_state, new_state;
406 * This handles several related states in one operation:
408 * ICP State: Down_CPPR
410 * Load CPPR with new value and if the XISR is 0
411 * then check for resends:
415 * If MFRR is more favored than CPPR, check for IPIs
416 * and notify ICS of a potential resend. This is done
417 * asynchronously (when used in real mode, we will have
420 * We do not handle the complete Check_IPI as documented
421 * here. In the PAPR, this state will be used for both
422 * Set_MFRR and Down_CPPR. However, we know that we aren't
423 * changing the MFRR state here so we don't need to handle
424 * the case of an MFRR causing a reject of a pending irq,
425 * this will have been handled when the MFRR was set in the
428 * Thus we don't have to handle rejects, only resends.
430 * When implementing real mode for HV KVM, resend will lead to
431 * a H_TOO_HARD return and the whole transaction will be handled
435 old_state = new_state = READ_ONCE(icp->state);
438 new_state.cppr = new_cppr;
441 * Cut down Resend / Check_IPI / IPI
443 * The logic is that we cannot have a pending interrupt
444 * trumped by an IPI at this point (see above), so we
445 * know that either the pending interrupt is already an
446 * IPI (in which case we don't care to override it) or
447 * it's either more favored than us or non existent
449 if (new_state.mfrr < new_cppr &&
450 new_state.mfrr <= new_state.pending_pri) {
451 new_state.pending_pri = new_state.mfrr;
452 new_state.xisr = XICS_IPI;
455 /* Latch/clear resend bit */
456 resend = new_state.need_resend;
457 new_state.need_resend = 0;
459 } while (!icp_rm_try_update(icp, old_state, new_state));
462 * Now handle resend checks. Those are asynchronous to the ICP
463 * state update in HW (ie bus transactions) so we can handle them
464 * separately here as well.
467 icp->n_check_resend++;
468 icp_rm_check_resend(xics, icp);
473 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu)
475 union kvmppc_icp_state old_state, new_state;
476 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
477 struct kvmppc_icp *icp = vcpu->arch.icp;
480 if (!xics || !xics->real_mode)
483 /* First clear the interrupt */
484 icp_rm_clr_vcpu_irq(icp->vcpu);
487 * ICP State: Accept_Interrupt
489 * Return the pending interrupt (if any) along with the
490 * current CPPR, then clear the XISR & set CPPR to the
494 old_state = new_state = READ_ONCE(icp->state);
496 xirr = old_state.xisr | (((u32)old_state.cppr) << 24);
499 new_state.cppr = new_state.pending_pri;
500 new_state.pending_pri = 0xff;
503 } while (!icp_rm_try_update(icp, old_state, new_state));
505 /* Return the result in GPR4 */
506 vcpu->arch.gpr[4] = xirr;
508 return check_too_hard(xics, icp);
511 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
514 union kvmppc_icp_state old_state, new_state;
515 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
516 struct kvmppc_icp *icp, *this_icp = vcpu->arch.icp;
521 if (!xics || !xics->real_mode)
524 local = this_icp->server_num == server;
528 icp = kvmppc_xics_find_server(vcpu->kvm, server);
533 * ICP state: Set_MFRR
535 * If the CPPR is more favored than the new MFRR, then
536 * nothing needs to be done as there can be no XISR to
539 * ICP state: Check_IPI
541 * If the CPPR is less favored, then we might be replacing
542 * an interrupt, and thus need to possibly reject it.
546 * Besides rejecting any pending interrupts, we also
547 * update XISR and pending_pri to mark IPI as pending.
549 * PAPR does not describe this state, but if the MFRR is being
550 * made less favored than its earlier value, there might be
551 * a previously-rejected interrupt needing to be resent.
552 * Ideally, we would want to resend only if
553 * prio(pending_interrupt) < mfrr &&
554 * prio(pending_interrupt) < cppr
555 * where pending interrupt is the one that was rejected. But
556 * we don't have that state, so we simply trigger a resend
557 * whenever the MFRR is made less favored.
560 old_state = new_state = READ_ONCE(icp->state);
563 new_state.mfrr = mfrr;
568 if (mfrr < new_state.cppr) {
569 /* Reject a pending interrupt if not an IPI */
570 if (mfrr <= new_state.pending_pri) {
571 reject = new_state.xisr;
572 new_state.pending_pri = mfrr;
573 new_state.xisr = XICS_IPI;
577 if (mfrr > old_state.mfrr) {
578 resend = new_state.need_resend;
579 new_state.need_resend = 0;
581 } while (!icp_rm_try_update(icp, old_state, new_state));
583 /* Handle reject in real mode */
584 if (reject && reject != XICS_IPI) {
585 this_icp->n_reject++;
586 icp_rm_deliver_irq(xics, icp, reject);
589 /* Handle resends in real mode */
591 this_icp->n_check_resend++;
592 icp_rm_check_resend(xics, icp);
595 return check_too_hard(xics, this_icp);
598 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr)
600 union kvmppc_icp_state old_state, new_state;
601 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
602 struct kvmppc_icp *icp = vcpu->arch.icp;
605 if (!xics || !xics->real_mode)
609 * ICP State: Set_CPPR
611 * We can safely compare the new value with the current
612 * value outside of the transaction as the CPPR is only
613 * ever changed by the processor on itself
615 if (cppr > icp->state.cppr) {
616 icp_rm_down_cppr(xics, icp, cppr);
618 } else if (cppr == icp->state.cppr)
624 * The processor is raising its priority, this can result
625 * in a rejection of a pending interrupt:
627 * ICP State: Reject_Current
629 * We can remove EE from the current processor, the update
630 * transaction will set it again if needed
632 icp_rm_clr_vcpu_irq(icp->vcpu);
635 old_state = new_state = READ_ONCE(icp->state);
638 new_state.cppr = cppr;
640 if (cppr <= new_state.pending_pri) {
641 reject = new_state.xisr;
643 new_state.pending_pri = 0xff;
646 } while (!icp_rm_try_update(icp, old_state, new_state));
649 * Check for rejects. They are handled by doing a new delivery
650 * attempt (see comments in icp_rm_deliver_irq).
652 if (reject && reject != XICS_IPI) {
654 icp_rm_deliver_irq(xics, icp, reject);
657 return check_too_hard(xics, icp);
660 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr)
662 struct kvmppc_xics *xics = vcpu->kvm->arch.xics;
663 struct kvmppc_icp *icp = vcpu->arch.icp;
664 struct kvmppc_ics *ics;
665 struct ics_irq_state *state;
666 u32 irq = xirr & 0x00ffffff;
669 if (!xics || !xics->real_mode)
675 * Note: If EOI is incorrectly used by SW to lower the CPPR
676 * value (ie more favored), we do not check for rejection of
677 * a pending interrupt, this is a SW error and PAPR sepcifies
678 * that we don't have to deal with it.
680 * The sending of an EOI to the ICS is handled after the
683 * ICP State: Down_CPPR which we handle
684 * in a separate function as it's shared with H_CPPR.
686 icp_rm_down_cppr(xics, icp, xirr >> 24);
688 /* IPIs have no EOI */
692 * EOI handling: If the interrupt is still asserted, we need to
693 * resend it. We can take a lockless "peek" at the ICS state here.
695 * "Message" interrupts will never have "asserted" set
697 ics = kvmppc_xics_find_ics(xics, irq, &src);
700 state = &ics->irq_state[src];
702 /* Still asserted, resend it */
703 if (state->asserted) {
705 icp_rm_deliver_irq(xics, icp, irq);
708 if (!hlist_empty(&vcpu->kvm->irq_ack_notifier_list)) {
709 icp->rm_action |= XICS_RM_NOTIFY_EOI;
710 icp->rm_eoied_irq = irq;
713 return check_too_hard(xics, icp);
716 unsigned long eoi_rc;
718 static void icp_eoi(struct irq_chip *c, u32 hwirq, u32 xirr)
720 unsigned long xics_phys;
723 rc = pnv_opal_pci_msi_eoi(c, hwirq);
731 xics_phys = local_paca->kvm_hstate.xics_phys;
732 _stwcix(xics_phys + XICS_XIRR, xirr);
735 long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu,
737 struct kvmppc_irq_map *irq_map,
738 struct kvmppc_passthru_irqmap *pimap)
740 struct kvmppc_xics *xics;
741 struct kvmppc_icp *icp;
744 irq = irq_map->v_hwirq;
745 xics = vcpu->kvm->arch.xics;
746 icp = vcpu->arch.icp;
748 icp_rm_deliver_irq(xics, icp, irq);
750 /* EOI the interrupt */
751 icp_eoi(irq_desc_get_chip(irq_map->desc), irq_map->r_hwirq, xirr);
753 if (check_too_hard(xics, icp) == H_TOO_HARD)
759 /* --- Non-real mode XICS-related built-in routines --- */
762 * Host Operations poked by RM KVM
764 static void rm_host_ipi_action(int action, void *data)
767 case XICS_RM_KICK_VCPU:
768 kvmppc_host_rm_ops_hv->vcpu_kick(data);
771 WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
777 void kvmppc_xics_ipi_action(void)
780 unsigned int cpu = smp_processor_id();
781 struct kvmppc_host_rm_core *rm_corep;
783 core = cpu >> threads_shift;
784 rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
786 if (rm_corep->rm_data) {
787 rm_host_ipi_action(rm_corep->rm_state.rm_action,
789 /* Order these stores against the real mode KVM */
790 rm_corep->rm_data = NULL;
792 rm_corep->rm_state.rm_action = 0;