Merge branch 'devel-stable' into for-next
authorRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 22 Jul 2011 22:09:07 +0000 (23:09 +0100)
committerRussell King <rmk+kernel@arm.linux.org.uk>
Fri, 22 Jul 2011 22:09:07 +0000 (23:09 +0100)
Conflicts:
arch/arm/kernel/entry-armv.S

13 files changed:
1  2 
arch/arm/Kconfig
arch/arm/kernel/entry-armv.S
arch/arm/kernel/entry-header.S
arch/arm/kernel/perf_event.c
arch/arm/kernel/setup.c
arch/arm/mach-davinci/board-dm365-evm.c
arch/arm/mm/init.c
arch/arm/mm/proc-arm6_7.S
arch/arm/mm/proc-sa1100.S
arch/arm/mm/tlb-fa.S
arch/arm/mm/tlb-v6.S
arch/arm/mm/tlb-v7.S
arch/arm/vfp/vfpmodule.c

Simple merge
@@@ -385,8 -391,8 +385,8 @@@ ENDPROC(__pabt_svc
        @ if it was interrupted in a critical region.  Here we
        @ perform a quick test inline since it should be false
        @ 99.9999% of the time.  The rest is done out of line.
 -      cmp     r2, #TASK_SIZE
 +      cmp     r4, #TASK_SIZE
-       blhs    kuser_cmpxchg_fixup
+       blhs    kuser_cmpxchg64_fixup
  #endif
  #endif
        .endm
@@@ -742,40 -780,95 +723,95 @@@ ENDPROC(__switch_to
  __kuser_helper_start:
  
  /*
-  * Reference prototype:
-  *
-  *    void __kernel_memory_barrier(void)
-  *
-  * Input:
-  *
-  *    lr = return address
-  *
-  * Output:
-  *
-  *    none
-  *
-  * Clobbered:
-  *
-  *    none
-  *
-  * Definition and user space usage example:
-  *
-  *    typedef void (__kernel_dmb_t)(void);
-  *    #define __kernel_dmb (*(__kernel_dmb_t *)0xffff0fa0)
-  *
-  * Apply any needed memory barrier to preserve consistency with data modified
-  * manually and __kuser_cmpxchg usage.
-  *
-  * This could be used as follows:
-  *
-  * #define __kernel_dmb() \
-  *         asm volatile ( "mov r0, #0xffff0fff; mov lr, pc; sub pc, r0, #95" \
-  *            : : : "r0", "lr","cc" )
+  * Due to the length of some sequences, __kuser_cmpxchg64 spans 2 regular
+  * kuser "slots", therefore 0xffff0f80 is not used as a valid entry point.
   */
  
- __kuser_memory_barrier:                               @ 0xffff0fa0
+ __kuser_cmpxchg64:                            @ 0xffff0f60
+ #if defined(CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG)
+       /*
+        * Poor you.  No fast solution possible...
+        * The kernel itself must perform the operation.
+        * A special ghost syscall is used for that (see traps.c).
+        */
+       stmfd   sp!, {r7, lr}
+       ldr     r7, 1f                  @ it's 20 bits
+       swi     __ARM_NR_cmpxchg64
+       ldmfd   sp!, {r7, pc}
+ 1:    .word   __ARM_NR_cmpxchg64
+ #elif defined(CONFIG_CPU_32v6K)
+       stmfd   sp!, {r4, r5, r6, r7}
+       ldrd    r4, r5, [r0]                    @ load old val
+       ldrd    r6, r7, [r1]                    @ load new val
+       smp_dmb arm
+ 1:    ldrexd  r0, r1, [r2]                    @ load current val
+       eors    r3, r0, r4                      @ compare with oldval (1)
+       eoreqs  r3, r1, r5                      @ compare with oldval (2)
+       strexdeq r3, r6, r7, [r2]               @ store newval if eq
+       teqeq   r3, #1                          @ success?
+       beq     1b                              @ if no then retry
        smp_dmb arm
 -      @ r2 = address of interrupted insn (must be preserved).
+       rsbs    r0, r3, #0                      @ set returned val and C flag
+       ldmfd   sp!, {r4, r5, r6, r7}
+       bx      lr
+ #elif !defined(CONFIG_SMP)
+ #ifdef CONFIG_MMU
+       /*
+        * The only thing that can break atomicity in this cmpxchg64
+        * implementation is either an IRQ or a data abort exception
+        * causing another process/thread to be scheduled in the middle of
+        * the critical sequence.  The same strategy as for cmpxchg is used.
+        */
+       stmfd   sp!, {r4, r5, r6, lr}
+       ldmia   r0, {r4, r5}                    @ load old val
+       ldmia   r1, {r6, lr}                    @ load new val
+ 1:    ldmia   r2, {r0, r1}                    @ load current val
+       eors    r3, r0, r4                      @ compare with oldval (1)
+       eoreqs  r3, r1, r5                      @ compare with oldval (2)
+ 2:    stmeqia r2, {r6, lr}                    @ store newval if eq
+       rsbs    r0, r3, #0                      @ set return val and C flag
+       ldmfd   sp!, {r4, r5, r6, pc}
+       .text
+ kuser_cmpxchg64_fixup:
+       @ Called from kuser_cmpxchg_fixup.
 -      @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
++      @ r4 = address of interrupted insn (must be preserved).
+       @ sp = saved regs. r7 and r8 are clobbered.
+       @ 1b = first critical insn, 2b = last critical insn.
 -      subs    r8, r2, r7
++      @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
+       mov     r7, #0xffff0fff
+       sub     r7, r7, #(0xffff0fff - (0xffff0f60 + (1b - __kuser_cmpxchg64)))
++      subs    r8, r4, r7
+       rsbcss  r8, r8, #(2b - 1b)
+       strcs   r7, [sp, #S_PC]
+ #if __LINUX_ARM_ARCH__ < 6
+       bcc     kuser_cmpxchg32_fixup
+ #endif
+       mov     pc, lr
+       .previous
+ #else
+ #warning "NPTL on non MMU needs fixing"
+       mov     r0, #-1
+       adds    r0, r0, #0
        usr_ret lr
+ #endif
+ #else
+ #error "incoherent kernel configuration"
+ #endif
+       /* pad to next slot */
+       .rept   (16 - (. - __kuser_cmpxchg64)/4)
+       .word   0
+       .endr
  
        .align  5
  
@@@ -868,15 -913,15 +856,15 @@@ __kuser_cmpxchg:                                @ 0xffff0fc
        usr_ret lr
  
        .text
- kuser_cmpxchg_fixup:
+ kuser_cmpxchg32_fixup:
        @ Called from kuser_cmpxchg_check macro.
 -      @ r2 = address of interrupted insn (must be preserved).
 +      @ r4 = address of interrupted insn (must be preserved).
        @ sp = saved regs. r7 and r8 are clobbered.
        @ 1b = first critical insn, 2b = last critical insn.
 -      @ If r2 >= 1b and r2 <= 2b then saved pc_usr is set to 1b.
 +      @ If r4 >= 1b and r4 <= 2b then saved pc_usr is set to 1b.
        mov     r7, #0xffff0fff
        sub     r7, r7, #(0xffff0fff - (0xffff0fc0 + (1b - __kuser_cmpxchg)))
 -      subs    r8, r2, r7
 +      subs    r8, r4, r7
        rsbcss  r8, r8, #(2b - 1b)
        strcs   r7, [sp, #S_PC]
        mov     pc, lr
Simple merge
Simple merge
@@@ -917,8 -915,15 +917,14 @@@ void __init setup_arch(char **cmdline_p
  #endif
        reserve_crashkernel();
  
 -      cpu_init();
        tcm_init();
  
+ #ifdef CONFIG_ZONE_DMA
+       if (mdesc->dma_zone_size) {
+               extern unsigned long arm_dma_zone_size;
+               arm_dma_zone_size = mdesc->dma_zone_size;
+       }
+ #endif
  #ifdef CONFIG_MULTI_IRQ_HANDLER
        handle_arch_irq = mdesc->handle_irq;
  #endif
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge