Merge tag 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Sep 2012 11:07:15 +0000 (19:07 +0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 13 Sep 2012 11:07:15 +0000 (19:07 +0800)
Pull additional AHCI PCI IDs from Jeff Garzik.

* tag 'upstream-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/jgarzik/libata-dev:
  ahci: Add identifiers for ASM106x devices
  ahci: Add alternate identifier for the 88SE9172
  ahci: Add JMicron 362 device IDs

49 files changed:
arch/arm/Kconfig.debug
arch/arm/Makefile
arch/arm/boot/compressed/head.S
arch/arm/include/asm/assembler.h
arch/arm/include/asm/memory.h
arch/arm/include/asm/tlb.h
arch/arm/include/asm/uaccess.h
arch/arm/kernel/hw_breakpoint.c
arch/arm/kernel/traps.c
arch/arm/lib/delay.c
arch/arm/lib/getuser.S
arch/arm/lib/putuser.S
arch/arm/mm/context.c
arch/arm/mm/mm.h
arch/arm/mm/mmu.c
arch/blackfin/Kconfig
arch/blackfin/Makefile
arch/blackfin/include/asm/smp.h
arch/blackfin/mach-common/smp.c
arch/x86/kvm/i8259.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
crypto/authenc.c
drivers/crypto/caam/key_gen.c
drivers/scsi/megaraid/megaraid_sas_base.c
drivers/scsi/mpt2sas/mpt2sas_base.c
drivers/scsi/scsi_error.c
drivers/scsi/scsi_lib.c
drivers/scsi/scsi_scan.c
fs/fuse/control.c
fs/fuse/cuse.c
fs/fuse/dev.c
fs/fuse/inode.c
fs/nfs/file.c
fs/nfs/inode.c
fs/nfs/nfs3proc.c
fs/nfs/nfs4file.c
fs/nfs/nfs4proc.c
fs/nfs/nfs4xdr.c
fs/nfs/super.c
include/linux/nfs_fs.h
include/linux/nfs_xdr.h
include/linux/sunrpc/xprt.h
kernel/workqueue.c
lib/digsig.c
net/sunrpc/xprt.c
net/sunrpc/xprtrdma/transport.c
net/sunrpc/xprtsock.c
scripts/link-vmlinux.sh

index f15f82b..e968a52 100644 (file)
@@ -356,15 +356,15 @@ choice
                  is nothing connected to read from the DCC.
 
        config DEBUG_SEMIHOSTING
-               bool "Kernel low-level debug output via semihosting I"
+               bool "Kernel low-level debug output via semihosting I/O"
                help
                  Semihosting enables code running on an ARM target to use
                  the I/O facilities on a host debugger/emulator through a
-                 simple SVC calls. The host debugger or emulator must have
+                 simple SVC call. The host debugger or emulator must have
                  semihosting enabled for the special svc call to be trapped
                  otherwise the kernel will crash.
 
-                 This is known to work with OpenOCD, as wellas
+                 This is known to work with OpenOCD, as well as
                  ARM's Fast Models, or any other controlling environment
                  that implements semihosting.
 
index 30eae87..a051dfb 100644 (file)
@@ -284,10 +284,10 @@ zImage Image xipImage bootpImage uImage: vmlinux
 zinstall uinstall install: vmlinux
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $@
 
-%.dtb:
+%.dtb: scripts
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
-dtbs:
+dtbs: scripts
        $(Q)$(MAKE) $(build)=$(boot) MACHINE=$(MACHINE) $(boot)/$@
 
 # We use MRPROPER_FILES and CLEAN_FILES now
index b8c64b8..81769c1 100644 (file)
@@ -659,10 +659,14 @@ __armv7_mmu_cache_on:
 #ifdef CONFIG_CPU_ENDIAN_BE8
                orr     r0, r0, #1 << 25        @ big-endian page tables
 #endif
+               mrcne   p15, 0, r6, c2, c0, 2   @ read ttb control reg
                orrne   r0, r0, #1              @ MMU enabled
                movne   r1, #0xfffffffd         @ domain 0 = client
+               bic     r6, r6, #1 << 31        @ 32-bit translation system
+               bic     r6, r6, #3 << 0         @ use only ttbr0
                mcrne   p15, 0, r3, c2, c0, 0   @ load page table pointer
                mcrne   p15, 0, r1, c3, c0, 0   @ load domain access control
+               mcrne   p15, 0, r6, c2, c0, 2   @ load ttb control
 #endif
                mcr     p15, 0, r0, c7, c5, 4   @ ISB
                mcr     p15, 0, r0, c1, c0, 0   @ load control register
index 03fb936..5c8b3bf 100644 (file)
        .size \name , . - \name
        .endm
 
+       .macro check_uaccess, addr:req, size:req, limit:req, tmp:req, bad:req
+#ifndef CONFIG_CPU_USE_DOMAINS
+       adds    \tmp, \addr, #\size - 1
+       sbcccs  \tmp, \tmp, \limit
+       bcs     \bad
+#endif
+       .endm
+
 #endif /* __ASM_ASSEMBLER_H__ */
index e965f1b..5f6ddcc 100644 (file)
@@ -187,6 +187,7 @@ static inline unsigned long __phys_to_virt(unsigned long x)
 #define __phys_to_virt(x)      ((x) - PHYS_OFFSET + PAGE_OFFSET)
 #endif
 #endif
+#endif /* __ASSEMBLY__ */
 
 #ifndef PHYS_OFFSET
 #ifdef PLAT_PHYS_OFFSET
@@ -196,6 +197,8 @@ static inline unsigned long __phys_to_virt(unsigned long x)
 #endif
 #endif
 
+#ifndef __ASSEMBLY__
+
 /*
  * PFNs are used to describe any physical page; this means
  * PFN 0 == physical address 0.
index 314d466..99a1951 100644 (file)
@@ -199,6 +199,9 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
 {
        pgtable_page_dtor(pte);
 
+#ifdef CONFIG_ARM_LPAE
+       tlb_add_flush(tlb, addr);
+#else
        /*
         * With the classic ARM MMU, a pte page has two corresponding pmd
         * entries, each covering 1MB.
@@ -206,6 +209,7 @@ static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
        addr &= PMD_MASK;
        tlb_add_flush(tlb, addr + SZ_1M - PAGE_SIZE);
        tlb_add_flush(tlb, addr + SZ_1M);
+#endif
 
        tlb_remove_page(tlb, pte);
 }
index 479a635..77bd79f 100644 (file)
@@ -101,28 +101,39 @@ extern int __get_user_1(void *);
 extern int __get_user_2(void *);
 extern int __get_user_4(void *);
 
-#define __get_user_x(__r2,__p,__e,__s,__i...)                          \
+#define __GUP_CLOBBER_1        "lr", "cc"
+#ifdef CONFIG_CPU_USE_DOMAINS
+#define __GUP_CLOBBER_2        "ip", "lr", "cc"
+#else
+#define __GUP_CLOBBER_2 "lr", "cc"
+#endif
+#define __GUP_CLOBBER_4        "lr", "cc"
+
+#define __get_user_x(__r2,__p,__e,__l,__s)                             \
           __asm__ __volatile__ (                                       \
                __asmeq("%0", "r0") __asmeq("%1", "r2")                 \
+               __asmeq("%3", "r1")                                     \
                "bl     __get_user_" #__s                               \
                : "=&r" (__e), "=r" (__r2)                              \
-               : "0" (__p)                                             \
-               : __i, "cc")
+               : "0" (__p), "r" (__l)                                  \
+               : __GUP_CLOBBER_##__s)
 
-#define get_user(x,p)                                                  \
+#define __get_user_check(x,p)                                                  \
        ({                                                              \
+               unsigned long __limit = current_thread_info()->addr_limit - 1; \
                register const typeof(*(p)) __user *__p asm("r0") = (p);\
                register unsigned long __r2 asm("r2");                  \
+               register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
-                       __get_user_x(__r2, __p, __e, 1, "lr");          \
-                       break;                                          \
+                       __get_user_x(__r2, __p, __e, __l, 1);           \
+                       break;                                          \
                case 2:                                                 \
-                       __get_user_x(__r2, __p, __e, 2, "r3", "lr");    \
+                       __get_user_x(__r2, __p, __e, __l, 2);           \
                        break;                                          \
                case 4:                                                 \
-                       __get_user_x(__r2, __p, __e, 4, "lr");          \
+                       __get_user_x(__r2, __p, __e, __l, 4);           \
                        break;                                          \
                default: __e = __get_user_bad(); break;                 \
                }                                                       \
@@ -130,42 +141,57 @@ extern int __get_user_4(void *);
                __e;                                                    \
        })
 
+#define get_user(x,p)                                                  \
+       ({                                                              \
+               might_fault();                                          \
+               __get_user_check(x,p);                                  \
+        })
+
 extern int __put_user_1(void *, unsigned int);
 extern int __put_user_2(void *, unsigned int);
 extern int __put_user_4(void *, unsigned int);
 extern int __put_user_8(void *, unsigned long long);
 
-#define __put_user_x(__r2,__p,__e,__s)                                 \
+#define __put_user_x(__r2,__p,__e,__l,__s)                             \
           __asm__ __volatile__ (                                       \
                __asmeq("%0", "r0") __asmeq("%2", "r2")                 \
+               __asmeq("%3", "r1")                                     \
                "bl     __put_user_" #__s                               \
                : "=&r" (__e)                                           \
-               : "0" (__p), "r" (__r2)                                 \
+               : "0" (__p), "r" (__r2), "r" (__l)                      \
                : "ip", "lr", "cc")
 
-#define put_user(x,p)                                                  \
+#define __put_user_check(x,p)                                                  \
        ({                                                              \
+               unsigned long __limit = current_thread_info()->addr_limit - 1; \
                register const typeof(*(p)) __r2 asm("r2") = (x);       \
                register const typeof(*(p)) __user *__p asm("r0") = (p);\
+               register unsigned long __l asm("r1") = __limit;         \
                register int __e asm("r0");                             \
                switch (sizeof(*(__p))) {                               \
                case 1:                                                 \
-                       __put_user_x(__r2, __p, __e, 1);                \
+                       __put_user_x(__r2, __p, __e, __l, 1);           \
                        break;                                          \
                case 2:                                                 \
-                       __put_user_x(__r2, __p, __e, 2);                \
+                       __put_user_x(__r2, __p, __e, __l, 2);           \
                        break;                                          \
                case 4:                                                 \
-                       __put_user_x(__r2, __p, __e, 4);                \
+                       __put_user_x(__r2, __p, __e, __l, 4);           \
                        break;                                          \
                case 8:                                                 \
-                       __put_user_x(__r2, __p, __e, 8);                \
+                       __put_user_x(__r2, __p, __e, __l, 8);           \
                        break;                                          \
                default: __e = __put_user_bad(); break;                 \
                }                                                       \
                __e;                                                    \
        })
 
+#define put_user(x,p)                                                  \
+       ({                                                              \
+               might_fault();                                          \
+               __put_user_check(x,p);                                  \
+        })
+
 #else /* CONFIG_MMU */
 
 /*
@@ -219,6 +245,7 @@ do {                                                                        \
        unsigned long __gu_addr = (unsigned long)(ptr);                 \
        unsigned long __gu_val;                                         \
        __chk_user_ptr(ptr);                                            \
+       might_fault();                                                  \
        switch (sizeof(*(ptr))) {                                       \
        case 1: __get_user_asm_byte(__gu_val,__gu_addr,err);    break;  \
        case 2: __get_user_asm_half(__gu_val,__gu_addr,err);    break;  \
@@ -300,6 +327,7 @@ do {                                                                        \
        unsigned long __pu_addr = (unsigned long)(ptr);                 \
        __typeof__(*(ptr)) __pu_val = (x);                              \
        __chk_user_ptr(ptr);                                            \
+       might_fault();                                                  \
        switch (sizeof(*(ptr))) {                                       \
        case 1: __put_user_asm_byte(__pu_val,__pu_addr,err);    break;  \
        case 2: __put_user_asm_half(__pu_val,__pu_addr,err);    break;  \
index ba386bd..281bf33 100644 (file)
@@ -159,6 +159,12 @@ static int debug_arch_supported(void)
                arch >= ARM_DEBUG_ARCH_V7_1;
 }
 
+/* Can we determine the watchpoint access type from the fsr? */
+static int debug_exception_updates_fsr(void)
+{
+       return 0;
+}
+
 /* Determine number of WRP registers available. */
 static int get_num_wrp_resources(void)
 {
@@ -604,13 +610,14 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
                /* Aligned */
                break;
        case 1:
-               /* Allow single byte watchpoint. */
-               if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
-                       break;
        case 2:
                /* Allow halfword watchpoints and breakpoints. */
                if (info->ctrl.len == ARM_BREAKPOINT_LEN_2)
                        break;
+       case 3:
+               /* Allow single byte watchpoint. */
+               if (info->ctrl.len == ARM_BREAKPOINT_LEN_1)
+                       break;
        default:
                ret = -EINVAL;
                goto out;
@@ -619,18 +626,35 @@ int arch_validate_hwbkpt_settings(struct perf_event *bp)
        info->address &= ~alignment_mask;
        info->ctrl.len <<= offset;
 
-       /*
-        * Currently we rely on an overflow handler to take
-        * care of single-stepping the breakpoint when it fires.
-        * In the case of userspace breakpoints on a core with V7 debug,
-        * we can use the mismatch feature as a poor-man's hardware
-        * single-step, but this only works for per-task breakpoints.
-        */
-       if (!bp->overflow_handler && (arch_check_bp_in_kernelspace(bp) ||
-           !core_has_mismatch_brps() || !bp->hw.bp_target)) {
-               pr_warning("overflow handler required but none found\n");
-               ret = -EINVAL;
+       if (!bp->overflow_handler) {
+               /*
+                * Mismatch breakpoints are required for single-stepping
+                * breakpoints.
+                */
+               if (!core_has_mismatch_brps())
+                       return -EINVAL;
+
+               /* We don't allow mismatch breakpoints in kernel space. */
+               if (arch_check_bp_in_kernelspace(bp))
+                       return -EPERM;
+
+               /*
+                * Per-cpu breakpoints are not supported by our stepping
+                * mechanism.
+                */
+               if (!bp->hw.bp_target)
+                       return -EINVAL;
+
+               /*
+                * We only support specific access types if the fsr
+                * reports them.
+                */
+               if (!debug_exception_updates_fsr() &&
+                   (info->ctrl.type == ARM_BREAKPOINT_LOAD ||
+                    info->ctrl.type == ARM_BREAKPOINT_STORE))
+                       return -EINVAL;
        }
+
 out:
        return ret;
 }
@@ -706,10 +730,12 @@ static void watchpoint_handler(unsigned long addr, unsigned int fsr,
                                goto unlock;
 
                        /* Check that the access type matches. */
-                       access = (fsr & ARM_FSR_ACCESS_MASK) ? HW_BREAKPOINT_W :
-                                HW_BREAKPOINT_R;
-                       if (!(access & hw_breakpoint_type(wp)))
-                               goto unlock;
+                       if (debug_exception_updates_fsr()) {
+                               access = (fsr & ARM_FSR_ACCESS_MASK) ?
+                                         HW_BREAKPOINT_W : HW_BREAKPOINT_R;
+                               if (!(access & hw_breakpoint_type(wp)))
+                                       goto unlock;
+                       }
 
                        /* We have a winner. */
                        info->trigger = addr;
index f794521..b0179b8 100644 (file)
@@ -420,20 +420,23 @@ asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 #endif
                        instr = *(u32 *) pc;
        } else if (thumb_mode(regs)) {
-               get_user(instr, (u16 __user *)pc);
+               if (get_user(instr, (u16 __user *)pc))
+                       goto die_sig;
                if (is_wide_instruction(instr)) {
                        unsigned int instr2;
-                       get_user(instr2, (u16 __user *)pc+1);
+                       if (get_user(instr2, (u16 __user *)pc+1))
+                               goto die_sig;
                        instr <<= 16;
                        instr |= instr2;
                }
-       } else {
-               get_user(instr, (u32 __user *)pc);
+       } else if (get_user(instr, (u32 __user *)pc)) {
+               goto die_sig;
        }
 
        if (call_undef_hook(regs, instr) == 0)
                return;
 
+die_sig:
 #ifdef CONFIG_DEBUG_USER
        if (user_debug & UDBG_UNDEFINED) {
                printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
index d6dacc6..395d5fb 100644 (file)
@@ -59,6 +59,7 @@ void __init init_current_timer_delay(unsigned long freq)
 {
        pr_info("Switching to timer-based delay loop\n");
        lpj_fine                        = freq / HZ;
+       loops_per_jiffy                 = lpj_fine;
        arm_delay_ops.delay             = __timer_delay;
        arm_delay_ops.const_udelay      = __timer_const_udelay;
        arm_delay_ops.udelay            = __timer_udelay;
index 11093a7..9b06bb4 100644 (file)
@@ -16,8 +16,9 @@
  * __get_user_X
  *
  * Inputs:     r0 contains the address
+ *             r1 contains the address limit, which must be preserved
  * Outputs:    r0 is the error code
- *             r2, r3 contains the zero-extended value
+ *             r2 contains the zero-extended value
  *             lr corrupted
  *
  * No other registers must be altered.  (see <asm/uaccess.h>
  * Note also that it is intended that __get_user_bad is not global.
  */
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
 
 ENTRY(__get_user_1)
+       check_uaccess r0, 1, r1, r2, __get_user_bad
 1: TUSER(ldrb) r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__get_user_1)
 
 ENTRY(__get_user_2)
-#ifdef CONFIG_THUMB2_KERNEL
-2: TUSER(ldrb) r2, [r0]
-3: TUSER(ldrb) r3, [r0, #1]
+       check_uaccess r0, 2, r1, r2, __get_user_bad
+#ifdef CONFIG_CPU_USE_DOMAINS
+rb     .req    ip
+2:     ldrbt   r2, [r0], #1
+3:     ldrbt   rb, [r0], #0
 #else
-2: TUSER(ldrb) r2, [r0], #1
-3: TUSER(ldrb) r3, [r0]
+rb     .req    r0
+2:     ldrb    r2, [r0]
+3:     ldrb    rb, [r0, #1]
 #endif
 #ifndef __ARMEB__
-       orr     r2, r2, r3, lsl #8
+       orr     r2, r2, rb, lsl #8
 #else
-       orr     r2, r3, r2, lsl #8
+       orr     r2, rb, r2, lsl #8
 #endif
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__get_user_2)
 
 ENTRY(__get_user_4)
+       check_uaccess r0, 4, r1, r2, __get_user_bad
 4: TUSER(ldr)  r2, [r0]
        mov     r0, #0
        mov     pc, lr
index 7db2599..3d73dcb 100644 (file)
@@ -16,6 +16,7 @@
  * __put_user_X
  *
  * Inputs:     r0 contains the address
+ *             r1 contains the address limit, which must be preserved
  *             r2, r3 contains the value
  * Outputs:    r0 is the error code
  *             lr corrupted
  * Note also that it is intended that __put_user_bad is not global.
  */
 #include <linux/linkage.h>
+#include <asm/assembler.h>
 #include <asm/errno.h>
 #include <asm/domain.h>
 
 ENTRY(__put_user_1)
+       check_uaccess r0, 1, r1, ip, __put_user_bad
 1: TUSER(strb) r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__put_user_1)
 
 ENTRY(__put_user_2)
+       check_uaccess r0, 2, r1, ip, __put_user_bad
        mov     ip, r2, lsr #8
 #ifdef CONFIG_THUMB2_KERNEL
 #ifndef __ARMEB__
@@ -60,12 +64,14 @@ ENTRY(__put_user_2)
 ENDPROC(__put_user_2)
 
 ENTRY(__put_user_4)
+       check_uaccess r0, 4, r1, ip, __put_user_bad
 4: TUSER(str)  r2, [r0]
        mov     r0, #0
        mov     pc, lr
 ENDPROC(__put_user_4)
 
 ENTRY(__put_user_8)
+       check_uaccess r0, 8, r1, ip, __put_user_bad
 #ifdef CONFIG_THUMB2_KERNEL
 5: TUSER(str)  r2, [r0]
 6: TUSER(str)  r3, [r0, #4]
index 119bc52..4e07eec 100644 (file)
@@ -63,10 +63,11 @@ static int contextidr_notifier(struct notifier_block *unused, unsigned long cmd,
        pid = task_pid_nr(thread->task) << ASID_BITS;
        asm volatile(
        "       mrc     p15, 0, %0, c13, c0, 1\n"
-       "       bfi     %1, %0, #0, %2\n"
-       "       mcr     p15, 0, %1, c13, c0, 1\n"
+       "       and     %0, %0, %2\n"
+       "       orr     %0, %0, %1\n"
+       "       mcr     p15, 0, %0, c13, c0, 1\n"
        : "=r" (contextidr), "+r" (pid)
-       : "I" (ASID_BITS));
+       : "I" (~ASID_MASK));
        isb();
 
        return NOTIFY_OK;
index 6776160..a8ee92d 100644 (file)
@@ -55,6 +55,9 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
 /* permanent static mappings from iotable_init() */
 #define VM_ARM_STATIC_MAPPING  0x40000000
 
+/* empty mapping */
+#define VM_ARM_EMPTY_MAPPING   0x20000000
+
 /* mapping type (attributes) for permanent static mappings */
 #define VM_ARM_MTYPE(mt)               ((mt) << 20)
 #define VM_ARM_MTYPE_MASK      (0x1f << 20)
index 4c2d045..c2fa21d 100644 (file)
@@ -807,7 +807,7 @@ static void __init pmd_empty_section_gap(unsigned long addr)
        vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
        vm->addr = (void *)addr;
        vm->size = SECTION_SIZE;
-       vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+       vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
        vm->caller = pmd_empty_section_gap;
        vm_area_add_early(vm);
 }
@@ -820,7 +820,7 @@ static void __init fill_pmd_gaps(void)
 
        /* we're still single threaded hence no lock needed here */
        for (vm = vmlist; vm; vm = vm->next) {
-               if (!(vm->flags & VM_ARM_STATIC_MAPPING))
+               if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
                        continue;
                addr = (unsigned long)vm->addr;
                if (addr < next)
@@ -961,8 +961,8 @@ void __init sanity_check_meminfo(void)
                 * Check whether this memory bank would partially overlap
                 * the vmalloc area.
                 */
-               if (__va(bank->start + bank->size) > vmalloc_min ||
-                   __va(bank->start + bank->size) < __va(bank->start)) {
+               if (__va(bank->start + bank->size - 1) >= vmalloc_min ||
+                   __va(bank->start + bank->size - 1) <= __va(bank->start)) {
                        unsigned long newsize = vmalloc_min - __va(bank->start);
                        printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
                               "to -%.8llx (vmalloc region overlap).\n",
index f348619..c7092e6 100644 (file)
@@ -38,6 +38,7 @@ config BLACKFIN
        select GENERIC_ATOMIC64
        select GENERIC_IRQ_PROBE
        select IRQ_PER_CPU if SMP
+       select USE_GENERIC_SMP_HELPERS if SMP
        select HAVE_NMI_WATCHDOG if NMI_WATCHDOG
        select GENERIC_SMP_IDLE_THREAD
        select ARCH_USES_GETTIMEOFFSET if !GENERIC_CLOCKEVENTS
index d3d7e64..66cf000 100644 (file)
@@ -20,7 +20,6 @@ endif
 KBUILD_AFLAGS           += $(call cc-option,-mno-fdpic)
 KBUILD_CFLAGS_MODULE    += -mlong-calls
 LDFLAGS                 += -m elf32bfin
-KALLSYMS         += --symbol-prefix=_
 
 KBUILD_DEFCONFIG := BF537-STAMP_defconfig
 
index dc3d144..9631598 100644 (file)
@@ -18,6 +18,8 @@
 #define raw_smp_processor_id()  blackfin_core_id()
 
 extern void bfin_relocate_coreb_l1_mem(void);
+extern void arch_send_call_function_single_ipi(int cpu);
+extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_ICACHE_FLUSH_L1)
 asmlinkage void blackfin_icache_flush_range_l1(unsigned long *ptr);
index 00bbe67..a401513 100644 (file)
@@ -48,10 +48,13 @@ unsigned long blackfin_iflush_l1_entry[NR_CPUS];
 
 struct blackfin_initial_pda __cpuinitdata initial_pda_coreb;
 
-#define BFIN_IPI_TIMER       0
-#define BFIN_IPI_RESCHEDULE   1
-#define BFIN_IPI_CALL_FUNC    2
-#define BFIN_IPI_CPU_STOP     3
+enum ipi_message_type {
+       BFIN_IPI_TIMER,
+       BFIN_IPI_RESCHEDULE,
+       BFIN_IPI_CALL_FUNC,
+       BFIN_IPI_CALL_FUNC_SINGLE,
+       BFIN_IPI_CPU_STOP,
+};
 
 struct blackfin_flush_data {
        unsigned long start;
@@ -60,35 +63,20 @@ struct blackfin_flush_data {
 
 void *secondary_stack;
 
-
-struct smp_call_struct {
-       void (*func)(void *info);
-       void *info;
-       int wait;
-       cpumask_t *waitmask;
-};
-
 static struct blackfin_flush_data smp_flush_data;
 
 static DEFINE_SPINLOCK(stop_lock);
 
-struct ipi_message {
-       unsigned long type;
-       struct smp_call_struct call_struct;
-};
-
 /* A magic number - stress test shows this is safe for common cases */
 #define BFIN_IPI_MSGQ_LEN 5
 
 /* Simple FIFO buffer, overflow leads to panic */
-struct ipi_message_queue {
-       spinlock_t lock;
+struct ipi_data {
        unsigned long count;
-       unsigned long head; /* head of the queue */
-       struct ipi_message ipi_message[BFIN_IPI_MSGQ_LEN];
+       unsigned long bits;
 };
 
-static DEFINE_PER_CPU(struct ipi_message_queue, ipi_msg_queue);
+static DEFINE_PER_CPU(struct ipi_data, bfin_ipi);
 
 static void ipi_cpu_stop(unsigned int cpu)
 {
@@ -129,28 +117,6 @@ static void ipi_flush_icache(void *info)
        blackfin_icache_flush_range(fdata->start, fdata->end);
 }
 
-static void ipi_call_function(unsigned int cpu, struct ipi_message *msg)
-{
-       int wait;
-       void (*func)(void *info);
-       void *info;
-       func = msg->call_struct.func;
-       info = msg->call_struct.info;
-       wait = msg->call_struct.wait;
-       func(info);
-       if (wait) {
-#ifdef __ARCH_SYNC_CORE_DCACHE
-               /*
-                * 'wait' usually means synchronization between CPUs.
-                * Invalidate D cache in case shared data was changed
-                * by func() to ensure cache coherence.
-                */
-               resync_core_dcache();
-#endif
-               cpumask_clear_cpu(cpu, msg->call_struct.waitmask);
-       }
-}
-
 /* Use IRQ_SUPPLE_0 to request reschedule.
  * When returning from interrupt to user space,
  * there is chance to reschedule */
@@ -172,152 +138,95 @@ void ipi_timer(void)
 
 static irqreturn_t ipi_handler_int1(int irq, void *dev_instance)
 {
-       struct ipi_message *msg;
-       struct ipi_message_queue *msg_queue;
+       struct ipi_data *bfin_ipi_data;
        unsigned int cpu = smp_processor_id();
-       unsigned long flags;
+       unsigned long pending;
+       unsigned long msg;
 
        platform_clear_ipi(cpu, IRQ_SUPPLE_1);
 
-       msg_queue = &__get_cpu_var(ipi_msg_queue);
-
-       spin_lock_irqsave(&msg_queue->lock, flags);
-
-       while (msg_queue->count) {
-               msg = &msg_queue->ipi_message[msg_queue->head];
-               switch (msg->type) {
-               case BFIN_IPI_TIMER:
-                       ipi_timer();
-                       break;
-               case BFIN_IPI_RESCHEDULE:
-                       scheduler_ipi();
-                       break;
-               case BFIN_IPI_CALL_FUNC:
-                       ipi_call_function(cpu, msg);
-                       break;
-               case BFIN_IPI_CPU_STOP:
-                       ipi_cpu_stop(cpu);
-                       break;
-               default:
-                       printk(KERN_CRIT "CPU%u: Unknown IPI message 0x%lx\n",
-                              cpu, msg->type);
-                       break;
-               }
-               msg_queue->head++;
-               msg_queue->head %= BFIN_IPI_MSGQ_LEN;
-               msg_queue->count--;
+       bfin_ipi_data = &__get_cpu_var(bfin_ipi);
+
+       while ((pending = xchg(&bfin_ipi_data->bits, 0)) != 0) {
+               msg = 0;
+               do {
+                       msg = find_next_bit(&pending, BITS_PER_LONG, msg + 1);
+                       switch (msg) {
+                       case BFIN_IPI_TIMER:
+                               ipi_timer();
+                               break;
+                       case BFIN_IPI_RESCHEDULE:
+                               scheduler_ipi();
+                               break;
+                       case BFIN_IPI_CALL_FUNC:
+                               generic_smp_call_function_interrupt();
+                               break;
+
+                       case BFIN_IPI_CALL_FUNC_SINGLE:
+                               generic_smp_call_function_single_interrupt();
+                               break;
+
+                       case BFIN_IPI_CPU_STOP:
+                               ipi_cpu_stop(cpu);
+                               break;
+                       }
+               } while (msg < BITS_PER_LONG);
+
+               smp_mb();
        }
-       spin_unlock_irqrestore(&msg_queue->lock, flags);
        return IRQ_HANDLED;
 }
 
-static void ipi_queue_init(void)
+static void bfin_ipi_init(void)
 {
        unsigned int cpu;
-       struct ipi_message_queue *msg_queue;
+       struct ipi_data *bfin_ipi_data;
        for_each_possible_cpu(cpu) {
-               msg_queue = &per_cpu(ipi_msg_queue, cpu);
-               spin_lock_init(&msg_queue->lock);
-               msg_queue->count = 0;
-               msg_queue->head = 0;
+               bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+               bfin_ipi_data->bits = 0;
+               bfin_ipi_data->count = 0;
        }
 }
 
-static inline void smp_send_message(cpumask_t callmap, unsigned long type,
-                                       void (*func) (void *info), void *info, int wait)
+void send_ipi(const struct cpumask *cpumask, enum ipi_message_type msg)
 {
        unsigned int cpu;
-       struct ipi_message_queue *msg_queue;
-       struct ipi_message *msg;
-       unsigned long flags, next_msg;
-       cpumask_t waitmask; /* waitmask is shared by all cpus */
-
-       cpumask_copy(&waitmask, &callmap);
-       for_each_cpu(cpu, &callmap) {
-               msg_queue = &per_cpu(ipi_msg_queue, cpu);
-               spin_lock_irqsave(&msg_queue->lock, flags);
-               if (msg_queue->count < BFIN_IPI_MSGQ_LEN) {
-                       next_msg = (msg_queue->head + msg_queue->count)
-                                       % BFIN_IPI_MSGQ_LEN;
-                       msg = &msg_queue->ipi_message[next_msg];
-                       msg->type = type;
-                       if (type == BFIN_IPI_CALL_FUNC) {
-                               msg->call_struct.func = func;
-                               msg->call_struct.info = info;
-                               msg->call_struct.wait = wait;
-                               msg->call_struct.waitmask = &waitmask;
-                       }
-                       msg_queue->count++;
-               } else
-                       panic("IPI message queue overflow\n");
-               spin_unlock_irqrestore(&msg_queue->lock, flags);
+       struct ipi_data *bfin_ipi_data;
+       unsigned long flags;
+
+       local_irq_save(flags);
+
+       for_each_cpu(cpu, cpumask) {
+               bfin_ipi_data = &per_cpu(bfin_ipi, cpu);
+               smp_mb();
+               set_bit(msg, &bfin_ipi_data->bits);
+               bfin_ipi_data->count++;
                platform_send_ipi_cpu(cpu, IRQ_SUPPLE_1);
        }
 
-       if (wait) {
-               while (!cpumask_empty(&waitmask))
-                       blackfin_dcache_invalidate_range(
-                               (unsigned long)(&waitmask),
-                               (unsigned long)(&waitmask));
-#ifdef __ARCH_SYNC_CORE_DCACHE
-               /*
-                * Invalidate D cache in case shared data was changed by
-                * other processors to ensure cache coherence.
-                */
-               resync_core_dcache();
-#endif
-       }
+       local_irq_restore(flags);
 }
 
-int smp_call_function(void (*func)(void *info), void *info, int wait)
+void arch_send_call_function_single_ipi(int cpu)
 {
-       cpumask_t callmap;
-
-       preempt_disable();
-       cpumask_copy(&callmap, cpu_online_mask);
-       cpumask_clear_cpu(smp_processor_id(), &callmap);
-       if (!cpumask_empty(&callmap))
-               smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
-       preempt_enable();
-
-       return 0;
+       send_ipi(cpumask_of(cpu), BFIN_IPI_CALL_FUNC_SINGLE);
 }
-EXPORT_SYMBOL_GPL(smp_call_function);
 
-int smp_call_function_single(int cpuid, void (*func) (void *info), void *info,
-                               int wait)
+void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 {
-       unsigned int cpu = cpuid;
-       cpumask_t callmap;
-
-       if (cpu_is_offline(cpu))
-               return 0;
-       cpumask_clear(&callmap);
-       cpumask_set_cpu(cpu, &callmap);
-
-       smp_send_message(callmap, BFIN_IPI_CALL_FUNC, func, info, wait);
-
-       return 0;
+       send_ipi(mask, BFIN_IPI_CALL_FUNC);
 }
-EXPORT_SYMBOL_GPL(smp_call_function_single);
 
 void smp_send_reschedule(int cpu)
 {
-       cpumask_t callmap;
-       /* simply trigger an ipi */
-
-       cpumask_clear(&callmap);
-       cpumask_set_cpu(cpu, &callmap);
-
-       smp_send_message(callmap, BFIN_IPI_RESCHEDULE, NULL, NULL, 0);
+       send_ipi(cpumask_of(cpu), BFIN_IPI_RESCHEDULE);
 
        return;
 }
 
 void smp_send_msg(const struct cpumask *mask, unsigned long type)
 {
-       smp_send_message(*mask, type, NULL, NULL, 0);
+       send_ipi(mask, type);
 }
 
 void smp_timer_broadcast(const struct cpumask *mask)
@@ -333,7 +242,7 @@ void smp_send_stop(void)
        cpumask_copy(&callmap, cpu_online_mask);
        cpumask_clear_cpu(smp_processor_id(), &callmap);
        if (!cpumask_empty(&callmap))
-               smp_send_message(callmap, BFIN_IPI_CPU_STOP, NULL, NULL, 0);
+               send_ipi(&callmap, BFIN_IPI_CPU_STOP);
 
        preempt_enable();
 
@@ -436,7 +345,7 @@ void __init smp_prepare_boot_cpu(void)
 void __init smp_prepare_cpus(unsigned int max_cpus)
 {
        platform_prepare_cpus(max_cpus);
-       ipi_queue_init();
+       bfin_ipi_init();
        platform_request_ipi(IRQ_SUPPLE_0, ipi_handler_int0);
        platform_request_ipi(IRQ_SUPPLE_1, ipi_handler_int1);
 }
index e498b18..9fc9aa7 100644 (file)
@@ -318,7 +318,7 @@ static void pic_ioport_write(void *opaque, u32 addr, u32 val)
                if (val & 0x10) {
                        u8 edge_irr = s->irr & ~s->elcr;
                        int i;
-                       bool found;
+                       bool found = false;
                        struct kvm_vcpu *vcpu;
 
                        s->init4 = val & 1;
index c00f03d..b1eb202 100644 (file)
@@ -3619,6 +3619,7 @@ static void seg_setup(int seg)
 
 static int alloc_apic_access_page(struct kvm *kvm)
 {
+       struct page *page;
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
@@ -3633,7 +3634,13 @@ static int alloc_apic_access_page(struct kvm *kvm)
        if (r)
                goto out;
 
-       kvm->arch.apic_access_page = gfn_to_page(kvm, 0xfee00);
+       page = gfn_to_page(kvm, 0xfee00);
+       if (is_error_page(page)) {
+               r = -EFAULT;
+               goto out;
+       }
+
+       kvm->arch.apic_access_page = page;
 out:
        mutex_unlock(&kvm->slots_lock);
        return r;
@@ -3641,6 +3648,7 @@ out:
 
 static int alloc_identity_pagetable(struct kvm *kvm)
 {
+       struct page *page;
        struct kvm_userspace_memory_region kvm_userspace_mem;
        int r = 0;
 
@@ -3656,8 +3664,13 @@ static int alloc_identity_pagetable(struct kvm *kvm)
        if (r)
                goto out;
 
-       kvm->arch.ept_identity_pagetable = gfn_to_page(kvm,
-                       kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+       page = gfn_to_page(kvm, kvm->arch.ept_identity_map_addr >> PAGE_SHIFT);
+       if (is_error_page(page)) {
+               r = -EFAULT;
+               goto out;
+       }
+
+       kvm->arch.ept_identity_pagetable = page;
 out:
        mutex_unlock(&kvm->slots_lock);
        return r;
@@ -6575,7 +6588,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
        /* Exposing INVPCID only when PCID is exposed */
        best = kvm_find_cpuid_entry(vcpu, 0x7, 0);
        if (vmx_invpcid_supported() &&
-           best && (best->ecx & bit(X86_FEATURE_INVPCID)) &&
+           best && (best->ebx & bit(X86_FEATURE_INVPCID)) &&
            guest_cpuid_has_pcid(vcpu)) {
                exec_control |= SECONDARY_EXEC_ENABLE_INVPCID;
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
@@ -6585,7 +6598,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
                vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
                             exec_control);
                if (best)
-                       best->ecx &= ~bit(X86_FEATURE_INVPCID);
+                       best->ebx &= ~bit(X86_FEATURE_INVPCID);
        }
 }
 
index 148ed66..2966c84 100644 (file)
@@ -5113,17 +5113,20 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu)
                        !kvm_event_needs_reinjection(vcpu);
 }
 
-static void vapic_enter(struct kvm_vcpu *vcpu)
+static int vapic_enter(struct kvm_vcpu *vcpu)
 {
        struct kvm_lapic *apic = vcpu->arch.apic;
        struct page *page;
 
        if (!apic || !apic->vapic_addr)
-               return;
+               return 0;
 
        page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT);
+       if (is_error_page(page))
+               return -EFAULT;
 
        vcpu->arch.apic->vapic_page = page;
+       return 0;
 }
 
 static void vapic_exit(struct kvm_vcpu *vcpu)
@@ -5430,7 +5433,11 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        }
 
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
-       vapic_enter(vcpu);
+       r = vapic_enter(vcpu);
+       if (r) {
+               srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
+               return r;
+       }
 
        r = 1;
        while (r > 0) {
index 5ef7ba6..d0583a4 100644 (file)
@@ -336,7 +336,7 @@ static int crypto_authenc_genicv(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc)) {
+       if (req->assoclen && sg_is_last(assoc)) {
                authenc_ahash_fn = crypto_authenc_ahash;
                sg_init_table(asg, 2);
                sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
@@ -490,7 +490,7 @@ static int crypto_authenc_iverify(struct aead_request *req, u8 *iv,
                cryptlen += ivsize;
        }
 
-       if (sg_is_last(assoc)) {
+       if (req->assoclen && sg_is_last(assoc)) {
                authenc_ahash_fn = crypto_authenc_ahash;
                sg_init_table(asg, 2);
                sg_set_page(asg, sg_page(assoc), assoc->length, assoc->offset);
index 0028881..d216cd3 100644 (file)
@@ -120,3 +120,4 @@ u32 gen_split_key(struct device *jrdev, u8 *key_out, int split_key_len,
 
        return ret;
 }
+EXPORT_SYMBOL(gen_split_key);
index dc27598..ed38454 100644 (file)
@@ -4066,7 +4066,6 @@ megasas_probe_one(struct pci_dev *pdev, const struct pci_device_id *id)
        spin_lock_init(&instance->cmd_pool_lock);
        spin_lock_init(&instance->hba_lock);
        spin_lock_init(&instance->completion_lock);
-       spin_lock_init(&poll_aen_lock);
 
        mutex_init(&instance->aen_mutex);
        mutex_init(&instance->reset_mutex);
@@ -5392,6 +5391,8 @@ static int __init megasas_init(void)
        printk(KERN_INFO "megasas: %s %s\n", MEGASAS_VERSION,
               MEGASAS_EXT_VERSION);
 
+       spin_lock_init(&poll_aen_lock);
+
        support_poll_for_event = 2;
        support_device_change = 1;
 
index 9d46fcb..b25757d 100644 (file)
@@ -2424,10 +2424,13 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
        }
 
        /* command line tunables  for max controller queue depth */
-       if (max_queue_depth != -1)
-               max_request_credit = (max_queue_depth < facts->RequestCredit)
-                   ? max_queue_depth : facts->RequestCredit;
-       else
+       if (max_queue_depth != -1 && max_queue_depth != 0) {
+               max_request_credit = min_t(u16, max_queue_depth +
+                       ioc->hi_priority_depth + ioc->internal_depth,
+                       facts->RequestCredit);
+               if (max_request_credit > MAX_HBA_QUEUE_DEPTH)
+                       max_request_credit =  MAX_HBA_QUEUE_DEPTH;
+       } else
                max_request_credit = min_t(u16, facts->RequestCredit,
                    MAX_HBA_QUEUE_DEPTH);
 
@@ -2502,7 +2505,7 @@ _base_allocate_memory_pools(struct MPT2SAS_ADAPTER *ioc,  int sleep_flag)
        /* set the scsi host can_queue depth
         * with some internal commands that could be outstanding
         */
-       ioc->shost->can_queue = ioc->scsiio_depth - (2);
+       ioc->shost->can_queue = ioc->scsiio_depth;
        dinitprintk(ioc, printk(MPT2SAS_INFO_FMT "scsi host: "
            "can_queue depth (%d)\n", ioc->name, ioc->shost->can_queue));
 
index 4a6381c..de2337f 100644 (file)
@@ -42,6 +42,8 @@
 
 #include <trace/events/scsi.h>
 
+static void scsi_eh_done(struct scsi_cmnd *scmd);
+
 #define SENSE_TIMEOUT          (10*HZ)
 
 /*
@@ -241,6 +243,14 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
        if (! scsi_command_normalize_sense(scmd, &sshdr))
                return FAILED;  /* no valid sense data */
 
+       if (scmd->cmnd[0] == TEST_UNIT_READY && scmd->scsi_done != scsi_eh_done)
+               /*
+                * nasty: for mid-layer issued TURs, we need to return the
+                * actual sense data without any recovery attempt.  For eh
+                * issued ones, we need to try to recover and interpret
+                */
+               return SUCCESS;
+
        if (scsi_sense_is_deferred(&sshdr))
                return NEEDS_RETRY;
 
index ffd7773..faa790f 100644 (file)
@@ -776,7 +776,6 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
        }
 
        if (req->cmd_type == REQ_TYPE_BLOCK_PC) { /* SG_IO ioctl from block level */
-               req->errors = result;
                if (result) {
                        if (sense_valid && req->sense) {
                                /*
@@ -792,6 +791,10 @@ void scsi_io_completion(struct scsi_cmnd *cmd, unsigned int good_bytes)
                        if (!sense_deferred)
                                error = __scsi_error_from_host_byte(cmd, result);
                }
+               /*
+                * __scsi_error_from_host_byte may have reset the host_byte
+                */
+               req->errors = cmd->result;
 
                req->resid_len = scsi_get_resid(cmd);
 
index 56a9379..d947ffc 100644 (file)
@@ -764,6 +764,16 @@ static int scsi_add_lun(struct scsi_device *sdev, unsigned char *inq_result,
        sdev->model = (char *) (sdev->inquiry + 16);
        sdev->rev = (char *) (sdev->inquiry + 32);
 
+       if (strncmp(sdev->vendor, "ATA     ", 8) == 0) {
+               /*
+                * sata emulation layer device.  This is a hack to work around
+                * the SATL power management specifications which state that
+                * when the SATL detects the device has gone into standby
+                * mode, it shall respond with NOT READY.
+                */
+               sdev->allow_restart = 1;
+       }
+
        if (*bflags & BLIST_ISROM) {
                sdev->type = TYPE_ROM;
                sdev->removable = 1;
index 03ff5b1..75a20c0 100644 (file)
@@ -117,7 +117,7 @@ static ssize_t fuse_conn_max_background_write(struct file *file,
                                              const char __user *buf,
                                              size_t count, loff_t *ppos)
 {
-       unsigned val;
+       unsigned uninitialized_var(val);
        ssize_t ret;
 
        ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
@@ -154,7 +154,7 @@ static ssize_t fuse_conn_congestion_threshold_write(struct file *file,
                                                    const char __user *buf,
                                                    size_t count, loff_t *ppos)
 {
-       unsigned val;
+       unsigned uninitialized_var(val);
        ssize_t ret;
 
        ret = fuse_conn_limit_write(file, buf, count, ppos, &val,
index 3426521..ee8d550 100644 (file)
@@ -396,7 +396,7 @@ err_device:
 err_region:
        unregister_chrdev_region(devt, 1);
 err:
-       fc->conn_error = 1;
+       fuse_conn_kill(fc);
        goto out;
 }
 
@@ -532,8 +532,6 @@ static int cuse_channel_release(struct inode *inode, struct file *file)
                cdev_del(cc->cdev);
        }
 
-       /* kill connection and shutdown channel */
-       fuse_conn_kill(&cc->fc);
        rc = fuse_dev_release(inode, file);     /* puts the base reference */
 
        return rc;
index 7df2b5e..f4246cf 100644 (file)
@@ -1576,6 +1576,7 @@ static int fuse_retrieve(struct fuse_conn *fc, struct inode *inode,
                req->pages[req->num_pages] = page;
                req->num_pages++;
 
+               offset = 0;
                num -= this_num;
                total_len += this_num;
                index++;
index ce0a283..fca222d 100644 (file)
@@ -367,11 +367,6 @@ void fuse_conn_kill(struct fuse_conn *fc)
        wake_up_all(&fc->waitq);
        wake_up_all(&fc->blocked_waitq);
        wake_up_all(&fc->reserved_req_waitq);
-       mutex_lock(&fuse_mutex);
-       list_del(&fc->entry);
-       fuse_ctl_remove_conn(fc);
-       mutex_unlock(&fuse_mutex);
-       fuse_bdi_destroy(fc);
 }
 EXPORT_SYMBOL_GPL(fuse_conn_kill);
 
@@ -380,7 +375,14 @@ static void fuse_put_super(struct super_block *sb)
        struct fuse_conn *fc = get_fuse_conn_super(sb);
 
        fuse_send_destroy(fc);
+
        fuse_conn_kill(fc);
+       mutex_lock(&fuse_mutex);
+       list_del(&fc->entry);
+       fuse_ctl_remove_conn(fc);
+       mutex_unlock(&fuse_mutex);
+       fuse_bdi_destroy(fc);
+
        fuse_conn_put(fc);
 }
 
index 75d6d0a..6a7fcab 100644 (file)
@@ -287,10 +287,12 @@ nfs_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        struct inode *inode = file->f_path.dentry->d_inode;
 
        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       if (ret != 0)
+               goto out;
        mutex_lock(&inode->i_mutex);
        ret = nfs_file_fsync_commit(file, start, end, datasync);
        mutex_unlock(&inode->i_mutex);
-
+out:
        return ret;
 }
 
index c6e895f..9b47610 100644 (file)
@@ -154,7 +154,7 @@ static void nfs_zap_caches_locked(struct inode *inode)
        nfsi->attrtimeo = NFS_MINATTRTIMEO(inode);
        nfsi->attrtimeo_timestamp = jiffies;
 
-       memset(NFS_COOKIEVERF(inode), 0, sizeof(NFS_COOKIEVERF(inode)));
+       memset(NFS_I(inode)->cookieverf, 0, sizeof(NFS_I(inode)->cookieverf));
        if (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode))
                nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL|NFS_INO_REVAL_PAGECACHE;
        else
index d6b3b5f..6932209 100644 (file)
@@ -643,7 +643,7 @@ nfs3_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
                  u64 cookie, struct page **pages, unsigned int count, int plus)
 {
        struct inode            *dir = dentry->d_inode;
-       __be32                  *verf = NFS_COOKIEVERF(dir);
+       __be32                  *verf = NFS_I(dir)->cookieverf;
        struct nfs3_readdirargs arg = {
                .fh             = NFS_FH(dir),
                .cookie         = cookie,
index acb65e7..eb5eb8e 100644 (file)
@@ -96,13 +96,15 @@ nfs4_file_fsync(struct file *file, loff_t start, loff_t end, int datasync)
        struct inode *inode = file->f_path.dentry->d_inode;
 
        ret = filemap_write_and_wait_range(inode->i_mapping, start, end);
+       if (ret != 0)
+               goto out;
        mutex_lock(&inode->i_mutex);
        ret = nfs_file_fsync_commit(file, start, end, datasync);
        if (!ret && !datasync)
                /* application has asked for meta-data sync */
                ret = pnfs_layoutcommit_inode(inode, true);
        mutex_unlock(&inode->i_mutex);
-
+out:
        return ret;
 }
 
index 6352741..1e50326 100644 (file)
@@ -3215,11 +3215,11 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
                        dentry->d_parent->d_name.name,
                        dentry->d_name.name,
                        (unsigned long long)cookie);
-       nfs4_setup_readdir(cookie, NFS_COOKIEVERF(dir), dentry, &args);
+       nfs4_setup_readdir(cookie, NFS_I(dir)->cookieverf, dentry, &args);
        res.pgbase = args.pgbase;
        status = nfs4_call_sync(NFS_SERVER(dir)->client, NFS_SERVER(dir), &msg, &args.seq_args, &res.seq_res, 0);
        if (status >= 0) {
-               memcpy(NFS_COOKIEVERF(dir), res.verifier.data, NFS4_VERIFIER_SIZE);
+               memcpy(NFS_I(dir)->cookieverf, res.verifier.data, NFS4_VERIFIER_SIZE);
                status += args.pgbase;
        }
 
@@ -3653,11 +3653,11 @@ static inline int nfs4_server_supports_acls(struct nfs_server *server)
                && (server->acl_bitmask & ACL4_SUPPORT_DENY_ACL);
 }
 
-/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_CACHE_SIZE, and that
- * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_CACHE_SIZE) bytes on
+/* Assuming that XATTR_SIZE_MAX is a multiple of PAGE_SIZE, and that
+ * it's OK to put sizeof(void) * (XATTR_SIZE_MAX/PAGE_SIZE) bytes on
  * the stack.
  */
-#define NFS4ACL_MAXPAGES (XATTR_SIZE_MAX >> PAGE_CACHE_SHIFT)
+#define NFS4ACL_MAXPAGES DIV_ROUND_UP(XATTR_SIZE_MAX, PAGE_SIZE)
 
 static int buf_to_pages_noslab(const void *buf, size_t buflen,
                struct page **pages, unsigned int *pgbase)
@@ -3668,7 +3668,7 @@ static int buf_to_pages_noslab(const void *buf, size_t buflen,
        spages = pages;
 
        do {
-               len = min_t(size_t, PAGE_CACHE_SIZE, buflen);
+               len = min_t(size_t, PAGE_SIZE, buflen);
                newpage = alloc_page(GFP_KERNEL);
 
                if (newpage == NULL)
@@ -3739,7 +3739,7 @@ static void nfs4_write_cached_acl(struct inode *inode, struct page **pages, size
        struct nfs4_cached_acl *acl;
        size_t buflen = sizeof(*acl) + acl_len;
 
-       if (pages && buflen <= PAGE_SIZE) {
+       if (buflen <= PAGE_SIZE) {
                acl = kmalloc(buflen, GFP_KERNEL);
                if (acl == NULL)
                        goto out;
@@ -3782,17 +3782,15 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
                .rpc_argp = &args,
                .rpc_resp = &res,
        };
-       int ret = -ENOMEM, npages, i;
-       size_t acl_len = 0;
+       unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
+       int ret = -ENOMEM, i;
 
-       npages = (buflen + PAGE_SIZE - 1) >> PAGE_SHIFT;
        /* As long as we're doing a round trip to the server anyway,
         * let's be prepared for a page of acl data. */
        if (npages == 0)
                npages = 1;
-
-       /* Add an extra page to handle the bitmap returned */
-       npages++;
+       if (npages > ARRAY_SIZE(pages))
+               return -ERANGE;
 
        for (i = 0; i < npages; i++) {
                pages[i] = alloc_page(GFP_KERNEL);
@@ -3808,11 +3806,6 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
        args.acl_len = npages * PAGE_SIZE;
        args.acl_pgbase = 0;
 
-       /* Let decode_getfacl know not to fail if the ACL data is larger than
-        * the page we send as a guess */
-       if (buf == NULL)
-               res.acl_flags |= NFS4_ACL_LEN_REQUEST;
-
        dprintk("%s  buf %p buflen %zu npages %d args.acl_len %zu\n",
                __func__, buf, buflen, npages, args.acl_len);
        ret = nfs4_call_sync(NFS_SERVER(inode)->client, NFS_SERVER(inode),
@@ -3820,20 +3813,19 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu
        if (ret)
                goto out_free;
 
-       acl_len = res.acl_len;
-       if (acl_len > args.acl_len)
-               nfs4_write_cached_acl(inode, NULL, 0, acl_len);
-       else
-               nfs4_write_cached_acl(inode, pages, res.acl_data_offset,
-                                     acl_len);
-       if (buf) {
+       /* Handle the case where the passed-in buffer is too short */
+       if (res.acl_flags & NFS4_ACL_TRUNC) {
+               /* Did the user only issue a request for the acl length? */
+               if (buf == NULL)
+                       goto out_ok;
                ret = -ERANGE;
-               if (acl_len > buflen)
-                       goto out_free;
-               _copy_from_pages(buf, pages, res.acl_data_offset,
-                               acl_len);
+               goto out_free;
        }
-       ret = acl_len;
+       nfs4_write_cached_acl(inode, pages, res.acl_data_offset, res.acl_len);
+       if (buf)
+               _copy_from_pages(buf, pages, res.acl_data_offset, res.acl_len);
+out_ok:
+       ret = res.acl_len;
 out_free:
        for (i = 0; i < npages; i++)
                if (pages[i])
@@ -3891,10 +3883,13 @@ static int __nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t bufl
                .rpc_argp       = &arg,
                .rpc_resp       = &res,
        };
+       unsigned int npages = DIV_ROUND_UP(buflen, PAGE_SIZE);
        int ret, i;
 
        if (!nfs4_server_supports_acls(server))
                return -EOPNOTSUPP;
+       if (npages > ARRAY_SIZE(pages))
+               return -ERANGE;
        i = buf_to_pages_noslab(buf, buflen, arg.acl_pages, &arg.acl_pgbase);
        if (i < 0)
                return i;
index 1bfbd67..8dba6bd 100644 (file)
@@ -5072,18 +5072,14 @@ static int decode_getacl(struct xdr_stream *xdr, struct rpc_rqst *req,
                 * are stored with the acl data to handle the problem of
                 * variable length bitmaps.*/
                res->acl_data_offset = xdr_stream_pos(xdr) - pg_offset;
-
-               /* We ignore &savep and don't do consistency checks on
-                * the attr length.  Let userspace figure it out.... */
                res->acl_len = attrlen;
-               if (attrlen > (xdr->nwords << 2)) {
-                       if (res->acl_flags & NFS4_ACL_LEN_REQUEST) {
-                               /* getxattr interface called with a NULL buf */
-                               goto out;
-                       }
+
+               /* Check for receive buffer overflow */
+               if (res->acl_len > (xdr->nwords << 2) ||
+                   res->acl_len + res->acl_data_offset > xdr->buf->page_len) {
+                       res->acl_flags |= NFS4_ACL_TRUNC;
                        dprintk("NFS: acl reply: attrlen %u > page_len %u\n",
                                        attrlen, xdr->nwords << 2);
-                       return -EINVAL;
                }
        } else
                status = -EOPNOTSUPP;
@@ -6229,7 +6225,8 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
        status = decode_open(xdr, res);
        if (status)
                goto out;
-       if (decode_getfh(xdr, &res->fh) != 0)
+       status = decode_getfh(xdr, &res->fh);
+       if (status)
                goto out;
        decode_getfattr(xdr, res->f_attr, res->server);
 out:
index 239aff7..b8eda70 100644 (file)
@@ -1867,6 +1867,7 @@ static int nfs23_validate_mount_data(void *options,
 
                memcpy(sap, &data->addr, sizeof(data->addr));
                args->nfs_server.addrlen = sizeof(data->addr);
+               args->nfs_server.port = ntohs(data->addr.sin_port);
                if (!nfs_verify_server_address(sap))
                        goto out_no_address;
 
@@ -2564,6 +2565,7 @@ static int nfs4_validate_mount_data(void *options,
                        return -EFAULT;
                if (!nfs_verify_server_address(sap))
                        goto out_no_address;
+               args->nfs_server.port = ntohs(((struct sockaddr_in *)sap)->sin_port);
 
                if (data->auth_flavourlen) {
                        if (data->auth_flavourlen > 1)
index 1f8fc7f..4b03f56 100644 (file)
@@ -265,11 +265,6 @@ static inline const struct nfs_rpc_ops *NFS_PROTO(const struct inode *inode)
        return NFS_SERVER(inode)->nfs_client->rpc_ops;
 }
 
-static inline __be32 *NFS_COOKIEVERF(const struct inode *inode)
-{
-       return NFS_I(inode)->cookieverf;
-}
-
 static inline unsigned NFS_MINATTRTIMEO(const struct inode *inode)
 {
        struct nfs_server *nfss = NFS_SERVER(inode);
index ac7c8ae..be9cf3c 100644 (file)
@@ -652,7 +652,7 @@ struct nfs_getaclargs {
 };
 
 /* getxattr ACL interface flags */
-#define NFS4_ACL_LEN_REQUEST   0x0001  /* zero length getxattr buffer */
+#define NFS4_ACL_TRUNC         0x0001  /* ACL was truncated */
 struct nfs_getaclres {
        size_t                          acl_len;
        size_t                          acl_data_offset;
index cff40aa..bf8c49f 100644 (file)
@@ -114,6 +114,7 @@ struct rpc_xprt_ops {
        void            (*set_buffer_size)(struct rpc_xprt *xprt, size_t sndsize, size_t rcvsize);
        int             (*reserve_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
        void            (*release_xprt)(struct rpc_xprt *xprt, struct rpc_task *task);
+       void            (*alloc_slot)(struct rpc_xprt *xprt, struct rpc_task *task);
        void            (*rpcbind)(struct rpc_task *task);
        void            (*set_port)(struct rpc_xprt *xprt, unsigned short port);
        void            (*connect)(struct rpc_task *task);
@@ -281,6 +282,8 @@ void                        xprt_connect(struct rpc_task *task);
 void                   xprt_reserve(struct rpc_task *task);
 int                    xprt_reserve_xprt(struct rpc_xprt *xprt, struct rpc_task *task);
 int                    xprt_reserve_xprt_cong(struct rpc_xprt *xprt, struct rpc_task *task);
+void                   xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
+void                   xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task);
 int                    xprt_prepare_transmit(struct rpc_task *task);
 void                   xprt_transmit(struct rpc_task *task);
 void                   xprt_end_transmit(struct rpc_task *task);
index 692d976..1e1373b 100644 (file)
@@ -66,6 +66,7 @@ enum {
 
        /* pool flags */
        POOL_MANAGE_WORKERS     = 1 << 0,       /* need to manage workers */
+       POOL_MANAGING_WORKERS   = 1 << 1,       /* managing workers */
 
        /* worker flags */
        WORKER_STARTED          = 1 << 0,       /* started */
@@ -652,7 +653,7 @@ static bool need_to_manage_workers(struct worker_pool *pool)
 /* Do we have too many workers and should some go away? */
 static bool too_many_workers(struct worker_pool *pool)
 {
-       bool managing = mutex_is_locked(&pool->manager_mutex);
+       bool managing = pool->flags & POOL_MANAGING_WORKERS;
        int nr_idle = pool->nr_idle + managing; /* manager is considered idle */
        int nr_busy = pool->nr_workers - nr_idle;
 
@@ -1326,6 +1327,15 @@ static void idle_worker_rebind(struct worker *worker)
 
        /* we did our part, wait for rebind_workers() to finish up */
        wait_event(gcwq->rebind_hold, !(worker->flags & WORKER_REBIND));
+
+       /*
+        * rebind_workers() shouldn't finish until all workers passed the
+        * above WORKER_REBIND wait.  Tell it when done.
+        */
+       spin_lock_irq(&worker->pool->gcwq->lock);
+       if (!--worker->idle_rebind->cnt)
+               complete(&worker->idle_rebind->done);
+       spin_unlock_irq(&worker->pool->gcwq->lock);
 }
 
 /*
@@ -1396,12 +1406,15 @@ retry:
        /* set REBIND and kick idle ones, we'll wait for these later */
        for_each_worker_pool(pool, gcwq) {
                list_for_each_entry(worker, &pool->idle_list, entry) {
+                       unsigned long worker_flags = worker->flags;
+
                        if (worker->flags & WORKER_REBIND)
                                continue;
 
-                       /* morph UNBOUND to REBIND */
-                       worker->flags &= ~WORKER_UNBOUND;
-                       worker->flags |= WORKER_REBIND;
+                       /* morph UNBOUND to REBIND atomically */
+                       worker_flags &= ~WORKER_UNBOUND;
+                       worker_flags |= WORKER_REBIND;
+                       ACCESS_ONCE(worker->flags) = worker_flags;
 
                        idle_rebind.cnt++;
                        worker->idle_rebind = &idle_rebind;
@@ -1419,25 +1432,15 @@ retry:
                goto retry;
        }
 
-       /*
-        * All idle workers are rebound and waiting for %WORKER_REBIND to
-        * be cleared inside idle_worker_rebind().  Clear and release.
-        * Clearing %WORKER_REBIND from this foreign context is safe
-        * because these workers are still guaranteed to be idle.
-        */
-       for_each_worker_pool(pool, gcwq)
-               list_for_each_entry(worker, &pool->idle_list, entry)
-                       worker->flags &= ~WORKER_REBIND;
-
-       wake_up_all(&gcwq->rebind_hold);
-
-       /* rebind busy workers */
+       /* all idle workers are rebound, rebind busy workers */
        for_each_busy_worker(worker, i, pos, gcwq) {
                struct work_struct *rebind_work = &worker->rebind_work;
+               unsigned long worker_flags = worker->flags;
 
-               /* morph UNBOUND to REBIND */
-               worker->flags &= ~WORKER_UNBOUND;
-               worker->flags |= WORKER_REBIND;
+               /* morph UNBOUND to REBIND atomically */
+               worker_flags &= ~WORKER_UNBOUND;
+               worker_flags |= WORKER_REBIND;
+               ACCESS_ONCE(worker->flags) = worker_flags;
 
                if (test_and_set_bit(WORK_STRUCT_PENDING_BIT,
                                     work_data_bits(rebind_work)))
@@ -1449,6 +1452,34 @@ retry:
                            worker->scheduled.next,
                            work_color_to_flags(WORK_NO_COLOR));
        }
+
+       /*
+        * All idle workers are rebound and waiting for %WORKER_REBIND to
+        * be cleared inside idle_worker_rebind().  Clear and release.
+        * Clearing %WORKER_REBIND from this foreign context is safe
+        * because these workers are still guaranteed to be idle.
+        *
+        * We need to make sure all idle workers passed WORKER_REBIND wait
+        * in idle_worker_rebind() before returning; otherwise, workers can
+        * get stuck at the wait if hotplug cycle repeats.
+        */
+       idle_rebind.cnt = 1;
+       INIT_COMPLETION(idle_rebind.done);
+
+       for_each_worker_pool(pool, gcwq) {
+               list_for_each_entry(worker, &pool->idle_list, entry) {
+                       worker->flags &= ~WORKER_REBIND;
+                       idle_rebind.cnt++;
+               }
+       }
+
+       wake_up_all(&gcwq->rebind_hold);
+
+       if (--idle_rebind.cnt) {
+               spin_unlock_irq(&gcwq->lock);
+               wait_for_completion(&idle_rebind.done);
+               spin_lock_irq(&gcwq->lock);
+       }
 }
 
 static struct worker *alloc_worker(void)
@@ -1794,9 +1825,45 @@ static bool manage_workers(struct worker *worker)
        struct worker_pool *pool = worker->pool;
        bool ret = false;
 
-       if (!mutex_trylock(&pool->manager_mutex))
+       if (pool->flags & POOL_MANAGING_WORKERS)
                return ret;
 
+       pool->flags |= POOL_MANAGING_WORKERS;
+
+       /*
+        * To simplify both worker management and CPU hotplug, hold off
+        * management while hotplug is in progress.  CPU hotplug path can't
+        * grab %POOL_MANAGING_WORKERS to achieve this because that can
+        * lead to idle worker depletion (all become busy thinking someone
+        * else is managing) which in turn can result in deadlock under
+        * extreme circumstances.  Use @pool->manager_mutex to synchronize
+        * manager against CPU hotplug.
+        *
+        * manager_mutex would always be free unless CPU hotplug is in
+        * progress.  trylock first without dropping @gcwq->lock.
+        */
+       if (unlikely(!mutex_trylock(&pool->manager_mutex))) {
+               spin_unlock_irq(&pool->gcwq->lock);
+               mutex_lock(&pool->manager_mutex);
+               /*
+                * CPU hotplug could have happened while we were waiting
+                * for manager_mutex.  Hotplug itself can't handle us
+                * because manager isn't either on idle or busy list, and
+                * @gcwq's state and ours could have deviated.
+                *
+                * As hotplug is now excluded via manager_mutex, we can
+                * simply try to bind.  It will succeed or fail depending
+                * on @gcwq's current state.  Try it and adjust
+                * %WORKER_UNBOUND accordingly.
+                */
+               if (worker_maybe_bind_and_lock(worker))
+                       worker->flags &= ~WORKER_UNBOUND;
+               else
+                       worker->flags |= WORKER_UNBOUND;
+
+               ret = true;
+       }
+
        pool->flags &= ~POOL_MANAGE_WORKERS;
 
        /*
@@ -1806,6 +1873,7 @@ static bool manage_workers(struct worker *worker)
        ret |= maybe_destroy_workers(pool);
        ret |= maybe_create_worker(pool);
 
+       pool->flags &= ~POOL_MANAGING_WORKERS;
        mutex_unlock(&pool->manager_mutex);
        return ret;
 }
index 286d558..8c0e629 100644 (file)
@@ -163,9 +163,11 @@ static int digsig_verify_rsa(struct key *key,
        memcpy(out1 + head, p, l);
 
        err = pkcs_1_v1_5_decode_emsa(out1, len, mblen, out2, &len);
+       if (err)
+               goto err;
 
-       if (!err && len == hlen)
-               err = memcmp(out2, h, hlen);
+       if (len != hlen || memcmp(out2, h, hlen))
+               err = -EINVAL;
 
 err:
        mpi_free(in);
index a5a402a..5d7f61d 100644 (file)
@@ -969,11 +969,11 @@ static bool xprt_dynamic_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
        return false;
 }
 
-static void xprt_alloc_slot(struct rpc_task *task)
+void xprt_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
 {
-       struct rpc_xprt *xprt = task->tk_xprt;
        struct rpc_rqst *req;
 
+       spin_lock(&xprt->reserve_lock);
        if (!list_empty(&xprt->free)) {
                req = list_entry(xprt->free.next, struct rpc_rqst, rq_list);
                list_del(&req->rq_list);
@@ -994,12 +994,29 @@ static void xprt_alloc_slot(struct rpc_task *task)
        default:
                task->tk_status = -EAGAIN;
        }
+       spin_unlock(&xprt->reserve_lock);
        return;
 out_init_req:
        task->tk_status = 0;
        task->tk_rqstp = req;
        xprt_request_init(task, xprt);
+       spin_unlock(&xprt->reserve_lock);
+}
+EXPORT_SYMBOL_GPL(xprt_alloc_slot);
+
+void xprt_lock_and_alloc_slot(struct rpc_xprt *xprt, struct rpc_task *task)
+{
+       /* Note: grabbing the xprt_lock_write() ensures that we throttle
+        * new slot allocation if the transport is congested (i.e. when
+        * reconnecting a stream transport or when out of socket write
+        * buffer space).
+        */
+       if (xprt_lock_write(xprt, task)) {
+               xprt_alloc_slot(xprt, task);
+               xprt_release_write(xprt, task);
+       }
 }
+EXPORT_SYMBOL_GPL(xprt_lock_and_alloc_slot);
 
 static void xprt_free_slot(struct rpc_xprt *xprt, struct rpc_rqst *req)
 {
@@ -1083,20 +1100,9 @@ void xprt_reserve(struct rpc_task *task)
        if (task->tk_rqstp != NULL)
                return;
 
-       /* Note: grabbing the xprt_lock_write() here is not strictly needed,
-        * but ensures that we throttle new slot allocation if the transport
-        * is congested (e.g. if reconnecting or if we're out of socket
-        * write buffer space).
-        */
        task->tk_timeout = 0;
        task->tk_status = -EAGAIN;
-       if (!xprt_lock_write(xprt, task))
-               return;
-
-       spin_lock(&xprt->reserve_lock);
-       xprt_alloc_slot(task);
-       spin_unlock(&xprt->reserve_lock);
-       xprt_release_write(xprt, task);
+       xprt->ops->alloc_slot(xprt, task);
 }
 
 static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
index 06cdbff..5d9202d 100644 (file)
@@ -713,6 +713,7 @@ static void xprt_rdma_print_stats(struct rpc_xprt *xprt, struct seq_file *seq)
 static struct rpc_xprt_ops xprt_rdma_procs = {
        .reserve_xprt           = xprt_rdma_reserve_xprt,
        .release_xprt           = xprt_release_xprt_cong, /* sunrpc/xprt.c */
+       .alloc_slot             = xprt_alloc_slot,
        .release_request        = xprt_release_rqst_cong,       /* ditto */
        .set_retrans_timeout    = xprt_set_retrans_timeout_def, /* ditto */
        .rpcbind                = rpcb_getport_async,   /* sunrpc/rpcb_clnt.c */
index 4005672..a35b8e5 100644 (file)
@@ -2473,6 +2473,7 @@ static void bc_destroy(struct rpc_xprt *xprt)
 static struct rpc_xprt_ops xs_local_ops = {
        .reserve_xprt           = xprt_reserve_xprt,
        .release_xprt           = xs_tcp_release_xprt,
+       .alloc_slot             = xprt_alloc_slot,
        .rpcbind                = xs_local_rpcbind,
        .set_port               = xs_local_set_port,
        .connect                = xs_connect,
@@ -2489,6 +2490,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
        .set_buffer_size        = xs_udp_set_buffer_size,
        .reserve_xprt           = xprt_reserve_xprt_cong,
        .release_xprt           = xprt_release_xprt_cong,
+       .alloc_slot             = xprt_alloc_slot,
        .rpcbind                = rpcb_getport_async,
        .set_port               = xs_set_port,
        .connect                = xs_connect,
@@ -2506,6 +2508,7 @@ static struct rpc_xprt_ops xs_udp_ops = {
 static struct rpc_xprt_ops xs_tcp_ops = {
        .reserve_xprt           = xprt_reserve_xprt,
        .release_xprt           = xs_tcp_release_xprt,
+       .alloc_slot             = xprt_lock_and_alloc_slot,
        .rpcbind                = rpcb_getport_async,
        .set_port               = xs_set_port,
        .connect                = xs_connect,
index 4235a63..b3d907e 100644 (file)
@@ -74,8 +74,13 @@ kallsyms()
        info KSYM ${2}
        local kallsymopt;
 
+       if [ -n "${CONFIG_SYMBOL_PREFIX}" ]; then
+               kallsymopt="${kallsymopt} \
+                           --symbol-prefix=${CONFIG_SYMBOL_PREFIX}"
+       fi
+
        if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
-               kallsymopt=--all-symbols
+               kallsymopt="${kallsymopt} --all-symbols"
        fi
 
        local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL}               \