x86, asmlinkage, xen, kvm: Make {xen,kvm}_lock_spinning global and visible
authorAndi Kleen <ak@linux.intel.com>
Tue, 22 Oct 2013 16:07:58 +0000 (09:07 -0700)
committerH. Peter Anvin <hpa@linux.intel.com>
Thu, 30 Jan 2014 06:17:18 +0000 (22:17 -0800)
These functions are called from inline assembler stubs, thus
need to be global and visible.

Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Gleb Natapov <gleb@kernel.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Link: http://lkml.kernel.org/r/1382458079-24450-7-git-send-email-andi@firstfloor.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
arch/x86/kernel/kvm.c
arch/x86/xen/spinlock.c

index 6dd802c..cd1b362 100644 (file)
@@ -673,7 +673,7 @@ static cpumask_t waiting_cpus;
 /* Track spinlock on which a cpu is waiting */
 static DEFINE_PER_CPU(struct kvm_lock_waiting, klock_waiting);
 
-static void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+__visible void kvm_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 {
        struct kvm_lock_waiting *w;
        int cpu;
index 0e36cde..581521c 100644 (file)
@@ -106,7 +106,7 @@ static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
 static cpumask_t waiting_cpus;
 
 static bool xen_pvspin = true;
-static void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
+__visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 {
        int irq = __this_cpu_read(lock_kicker_irq);
        struct xen_lock_waiting *w = &__get_cpu_var(lock_waiting);