Merge branch 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Jul 2011 17:20:54 +0000 (10:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 24 Jul 2011 17:20:54 +0000 (10:20 -0700)
* 'for-linus' of master.kernel.org:/home/rmk/linux-2.6-arm: (237 commits)
  ARM: 7004/1: fix traps.h compile warnings
  ARM: 6998/2: kernel: use proper memory barriers for bitops
  ARM: 6997/1: ep93xx: increase NR_BANKS to 16 for support of 128MB RAM
  ARM: Fix build errors caused by adding generic macros
  ARM: CPU hotplug: ensure we migrate all IRQs off a downed CPU
  ARM: CPU hotplug: pass in proper affinity mask on IRQ migration
  ARM: GIC: avoid routing interrupts to offline CPUs
  ARM: CPU hotplug: fix abuse of irqdesc->node
  ARM: 6981/2: mmci: adjust calculation of f_min
  ARM: 7000/1: LPAE: Use long long printk format for displaying the pud
  ARM: 6999/1: head, zImage: Always Enter the kernel in ARM state
  ARM: btc: avoid invalidating the branch target cache on kernel TLB maintanence
  ARM: ARM_DMA_ZONE_SIZE is no more
  ARM: mach-shark: move ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size
  ARM: mach-sa1100: move ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size
  ARM: mach-realview: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size
  ARM: mach-pxa: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size
  ARM: mach-ixp4xx: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size
  ARM: mach-h720x: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size
  ARM: mach-davinci: move from ARM_DMA_ZONE_SIZE to mdesc->dma_zone_size
  ...

1  2 
arch/arm/Kconfig
arch/arm/kernel/perf_event_v7.c
arch/arm/kernel/ptrace.c
arch/arm/mach-davinci/board-mityomapl138.c
arch/arm/mm/fault.c
drivers/mmc/host/mmci.c

diff --combined arch/arm/Kconfig
@@@ -10,7 -10,7 +10,7 @@@ config AR
        select GENERIC_ATOMIC64 if (CPU_V6 || !CPU_32v6K || !AEABI)
        select HAVE_OPROFILE if (HAVE_PERF_EVENTS)
        select HAVE_ARCH_KGDB
-       select HAVE_KPROBES if (!XIP_KERNEL && !THUMB2_KERNEL)
+       select HAVE_KPROBES if !XIP_KERNEL
        select HAVE_KRETPROBES if (HAVE_KPROBES)
        select HAVE_FUNCTION_TRACER if (!XIP_KERNEL)
        select HAVE_FTRACE_MCOUNT_RECORD if (!XIP_KERNEL)
@@@ -37,6 -37,9 +37,9 @@@
          Europe.  There is an ARM Linux project with a web page at
          <http://www.arm.linux.org.uk/>.
  
+ config ARM_HAS_SG_CHAIN
+       bool
  config HAVE_PWM
        bool
  
@@@ -642,7 -645,6 +645,7 @@@ config ARCH_SHMOBIL
        select NO_IOPORT
        select SPARSE_IRQ
        select MULTI_IRQ_HANDLER
 +      select PM_GENERIC_DOMAINS if PM
        help
          Support for Renesas's SH-Mobile and R-Mobile ARM platforms.
  
@@@ -1347,7 -1349,6 +1350,6 @@@ config SMP_ON_U
  
  config HAVE_ARM_SCU
        bool
-       depends on SMP
        help
          This option enables support for the ARM system coherency unit
  
@@@ -1716,17 -1717,34 +1718,34 @@@ config ZBOOT_RO
          Say Y here if you intend to execute your compressed kernel image
          (zImage) directly from ROM or flash.  If unsure, say N.
  
+ choice
+       prompt "Include SD/MMC loader in zImage (EXPERIMENTAL)"
+       depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
+       default ZBOOT_ROM_NONE
+       help
+         Include experimental SD/MMC loading code in the ROM-able zImage.
+         With this enabled it is possible to write the the ROM-able zImage
+         kernel image to an MMC or SD card and boot the kernel straight
+         from the reset vector. At reset the processor Mask ROM will load
+         the first part of the the ROM-able zImage which in turn loads the
+         rest the kernel image to RAM.
+ config ZBOOT_ROM_NONE
+       bool "No SD/MMC loader in zImage (EXPERIMENTAL)"
+       help
+         Do not load image from SD or MMC
  config ZBOOT_ROM_MMCIF
        bool "Include MMCIF loader in zImage (EXPERIMENTAL)"
-       depends on ZBOOT_ROM && ARCH_SH7372 && EXPERIMENTAL
        help
-         Say Y here to include experimental MMCIF loading code in the
-         ROM-able zImage. With this enabled it is possible to write the
-         the ROM-able zImage kernel image to an MMC card and boot the
-         kernel straight from the reset vector. At reset the processor
-         Mask ROM will load the first part of the the ROM-able zImage
-         which in turn loads the rest the kernel image to RAM using the
-         MMCIF hardware block.
+         Load image from MMCIF hardware block.
+ config ZBOOT_ROM_SH_MOBILE_SDHI
+       bool "Include SuperH Mobile SDHI loader in zImage (EXPERIMENTAL)"
+       help
+         Load image from SDHI hardware block
+ endchoice
  
  config CMDLINE
        string "Default kernel command string"
   */
  
  #ifdef CONFIG_CPU_V7
- /* Common ARMv7 event types */
+ /*
+  * Common ARMv7 event types
+  *
+  * Note: An implementation may not be able to count all of these events
+  * but the encodings are considered to be `reserved' in the case that
+  * they are not available.
+  */
  enum armv7_perf_types {
        ARMV7_PERFCTR_PMNC_SW_INCR              = 0x00,
        ARMV7_PERFCTR_IFETCH_MISS               = 0x01,
        ARMV7_PERFCTR_ITLB_MISS                 = 0x02,
-       ARMV7_PERFCTR_DCACHE_REFILL             = 0x03,
-       ARMV7_PERFCTR_DCACHE_ACCESS             = 0x04,
+       ARMV7_PERFCTR_DCACHE_REFILL             = 0x03, /* L1 */
+       ARMV7_PERFCTR_DCACHE_ACCESS             = 0x04, /* L1 */
        ARMV7_PERFCTR_DTLB_REFILL               = 0x05,
        ARMV7_PERFCTR_DREAD                     = 0x06,
        ARMV7_PERFCTR_DWRITE                    = 0x07,
+       ARMV7_PERFCTR_INSTR_EXECUTED            = 0x08,
        ARMV7_PERFCTR_EXC_TAKEN                 = 0x09,
        ARMV7_PERFCTR_EXC_EXECUTED              = 0x0A,
        ARMV7_PERFCTR_CID_WRITE                 = 0x0B,
         */
        ARMV7_PERFCTR_PC_WRITE                  = 0x0C,
        ARMV7_PERFCTR_PC_IMM_BRANCH             = 0x0D,
+       ARMV7_PERFCTR_PC_PROC_RETURN            = 0x0E,
        ARMV7_PERFCTR_UNALIGNED_ACCESS          = 0x0F,
+       /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
        ARMV7_PERFCTR_PC_BRANCH_MIS_PRED        = 0x10,
        ARMV7_PERFCTR_CLOCK_CYCLES              = 0x11,
-       ARMV7_PERFCTR_PC_BRANCH_MIS_USED        = 0x12,
+       ARMV7_PERFCTR_PC_BRANCH_PRED            = 0x12,
+       ARMV7_PERFCTR_MEM_ACCESS                = 0x13,
+       ARMV7_PERFCTR_L1_ICACHE_ACCESS          = 0x14,
+       ARMV7_PERFCTR_L1_DCACHE_WB              = 0x15,
+       ARMV7_PERFCTR_L2_DCACHE_ACCESS          = 0x16,
+       ARMV7_PERFCTR_L2_DCACHE_REFILL          = 0x17,
+       ARMV7_PERFCTR_L2_DCACHE_WB              = 0x18,
+       ARMV7_PERFCTR_BUS_ACCESS                = 0x19,
+       ARMV7_PERFCTR_MEMORY_ERROR              = 0x1A,
+       ARMV7_PERFCTR_INSTR_SPEC                = 0x1B,
+       ARMV7_PERFCTR_TTBR_WRITE                = 0x1C,
+       ARMV7_PERFCTR_BUS_CYCLES                = 0x1D,
  
        ARMV7_PERFCTR_CPU_CYCLES                = 0xFF
  };
  
  /* ARMv7 Cortex-A8 specific event types */
  enum armv7_a8_perf_types {
-       ARMV7_PERFCTR_INSTR_EXECUTED            = 0x08,
-       ARMV7_PERFCTR_PC_PROC_RETURN            = 0x0E,
        ARMV7_PERFCTR_WRITE_BUFFER_FULL         = 0x40,
        ARMV7_PERFCTR_L2_STORE_MERGED           = 0x41,
        ARMV7_PERFCTR_L2_STORE_BUFF             = 0x42,
@@@ -138,6 -153,39 +153,39 @@@ enum armv7_a9_perf_types 
        ARMV7_PERFCTR_PLE_RQST_PROG             = 0xA5
  };
  
+ /* ARMv7 Cortex-A5 specific event types */
+ enum armv7_a5_perf_types {
+       ARMV7_PERFCTR_IRQ_TAKEN                 = 0x86,
+       ARMV7_PERFCTR_FIQ_TAKEN                 = 0x87,
+       ARMV7_PERFCTR_EXT_MEM_RQST              = 0xc0,
+       ARMV7_PERFCTR_NC_EXT_MEM_RQST           = 0xc1,
+       ARMV7_PERFCTR_PREFETCH_LINEFILL         = 0xc2,
+       ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP    = 0xc3,
+       ARMV7_PERFCTR_ENTER_READ_ALLOC          = 0xc4,
+       ARMV7_PERFCTR_READ_ALLOC                = 0xc5,
+       ARMV7_PERFCTR_STALL_SB_FULL             = 0xc9,
+ };
+ /* ARMv7 Cortex-A15 specific event types */
+ enum armv7_a15_perf_types {
+       ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS     = 0x40,
+       ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS    = 0x41,
+       ARMV7_PERFCTR_L1_DCACHE_READ_REFILL     = 0x42,
+       ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL    = 0x43,
+       ARMV7_PERFCTR_L1_DTLB_READ_REFILL       = 0x4C,
+       ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL      = 0x4D,
+       ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS     = 0x50,
+       ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS    = 0x51,
+       ARMV7_PERFCTR_L2_DCACHE_READ_REFILL     = 0x52,
+       ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL    = 0x53,
+       ARMV7_PERFCTR_SPEC_PC_WRITE             = 0x76,
+ };
  /*
   * Cortex-A8 HW events mapping
   *
@@@ -207,11 -255,6 +255,6 @@@ static const unsigned armv7_a8_perf_cac
                },
        },
        [C(DTLB)] = {
-               /*
-                * Only ITLB misses and DTLB refills are supported.
-                * If users want the DTLB refills misses a raw counter
-                * must be used.
-                */
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
                        [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
                        [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
        },
 +      [C(NODE)] = {
 +              [C(OP_READ)] = {
 +                      [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
 +                      [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
 +              },
 +              [C(OP_WRITE)] = {
 +                      [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
 +                      [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
 +              },
 +              [C(OP_PREFETCH)] = {
 +                      [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
 +                      [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
 +              },
 +      },
  };
  
  /*
@@@ -337,11 -366,6 +380,6 @@@ static const unsigned armv7_a9_perf_cac
                },
        },
        [C(DTLB)] = {
-               /*
-                * Only ITLB misses and DTLB refills are supported.
-                * If users want the DTLB refills misses a raw counter
-                * must be used.
-                */
                [C(OP_READ)] = {
                        [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
                        [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
                        [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
                },
        },
 +      [C(NODE)] = {
 +              [C(OP_READ)] = {
 +                      [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
 +                      [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
 +              },
 +              [C(OP_WRITE)] = {
 +                      [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
 +                      [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
 +              },
 +              [C(OP_PREFETCH)] = {
 +                      [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
 +                      [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
 +              },
 +      },
  };
  
+ /*
+  * Cortex-A5 HW events mapping
+  */
+ static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]          = HW_OP_UNSUPPORTED,
+ };
+ static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                       [PERF_COUNT_HW_CACHE_OP_MAX]
+                                       [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(L1D)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]
+                                       = ARMV7_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_DCACHE_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]
+                                       = ARMV7_PERFCTR_DCACHE_ACCESS,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_DCACHE_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]
+                                       = ARMV7_PERFCTR_PREFETCH_LINEFILL,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+               },
+       },
+       [C(L1I)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+               },
+               /*
+                * The prefetch counters don't differentiate between the I
+                * side and the D side.
+                */
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]
+                                       = ARMV7_PERFCTR_PREFETCH_LINEFILL,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PREFETCH_LINEFILL_DROP,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_DTLB_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+ };
+ /*
+  * Cortex-A15 HW events mapping
+  */
+ static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
+       [PERF_COUNT_HW_CPU_CYCLES]          = ARMV7_PERFCTR_CPU_CYCLES,
+       [PERF_COUNT_HW_INSTRUCTIONS]        = ARMV7_PERFCTR_INSTR_EXECUTED,
+       [PERF_COUNT_HW_CACHE_REFERENCES]    = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_CACHE_MISSES]        = HW_OP_UNSUPPORTED,
+       [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_SPEC_PC_WRITE,
+       [PERF_COUNT_HW_BRANCH_MISSES]       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+       [PERF_COUNT_HW_BUS_CYCLES]          = ARMV7_PERFCTR_BUS_CYCLES,
+ };
+ static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
+                                       [PERF_COUNT_HW_CACHE_OP_MAX]
+                                       [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+       [C(L1D)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]
+                                       = ARMV7_PERFCTR_L1_DCACHE_READ_ACCESS,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_L1_DCACHE_READ_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]
+                                       = ARMV7_PERFCTR_L1_DCACHE_WRITE_ACCESS,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_L1_DCACHE_WRITE_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(L1I)] = {
+               /*
+                * Not all performance counters differentiate between read
+                * and write accesses/misses so we're not always strictly
+                * correct, but it's the best we can do. Writes and reads get
+                * combined in these cases.
+                */
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_IFETCH_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(LL)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]
+                                       = ARMV7_PERFCTR_L2_DCACHE_READ_ACCESS,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_L2_DCACHE_READ_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]
+                                       = ARMV7_PERFCTR_L2_DCACHE_WRITE_ACCESS,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_L2_DCACHE_WRITE_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(DTLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_L1_DTLB_READ_REFILL,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_L1_DTLB_WRITE_REFILL,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(ITLB)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = ARMV7_PERFCTR_ITLB_MISS,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+       [C(BPU)] = {
+               [C(OP_READ)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_WRITE)] = {
+                       [C(RESULT_ACCESS)]      = ARMV7_PERFCTR_PC_BRANCH_PRED,
+                       [C(RESULT_MISS)]
+                                       = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
+               },
+               [C(OP_PREFETCH)] = {
+                       [C(RESULT_ACCESS)]      = CACHE_OP_UNSUPPORTED,
+                       [C(RESULT_MISS)]        = CACHE_OP_UNSUPPORTED,
+               },
+       },
+ };
  /*
   * Perf Events counters
   */
@@@ -815,7 -1061,7 +1089,7 @@@ static irqreturn_t armv7pmu_handle_irq(
                if (!armpmu_event_set_period(event, hwc, idx))
                        continue;
  
 -              if (perf_event_overflow(event, 0, &data, regs))
 +              if (perf_event_overflow(event, &data, regs))
                        armpmu->disable(hwc, idx);
        }
  
@@@ -933,6 -1179,26 +1207,26 @@@ static const struct arm_pmu *__init arm
        armv7pmu.num_events     = armv7_read_num_pmnc_events();
        return &armv7pmu;
  }
+ static const struct arm_pmu *__init armv7_a5_pmu_init(void)
+ {
+       armv7pmu.id             = ARM_PERF_PMU_ID_CA5;
+       armv7pmu.name           = "ARMv7 Cortex-A5";
+       armv7pmu.cache_map      = &armv7_a5_perf_cache_map;
+       armv7pmu.event_map      = &armv7_a5_perf_map;
+       armv7pmu.num_events     = armv7_read_num_pmnc_events();
+       return &armv7pmu;
+ }
+ static const struct arm_pmu *__init armv7_a15_pmu_init(void)
+ {
+       armv7pmu.id             = ARM_PERF_PMU_ID_CA15;
+       armv7pmu.name           = "ARMv7 Cortex-A15";
+       armv7pmu.cache_map      = &armv7_a15_perf_cache_map;
+       armv7pmu.event_map      = &armv7_a15_perf_map;
+       armv7pmu.num_events     = armv7_read_num_pmnc_events();
+       return &armv7pmu;
+ }
  #else
  static const struct arm_pmu *__init armv7_a8_pmu_init(void)
  {
@@@ -943,4 -1209,14 +1237,14 @@@ static const struct arm_pmu *__init arm
  {
        return NULL;
  }
+ static const struct arm_pmu *__init armv7_a5_pmu_init(void)
+ {
+       return NULL;
+ }
+ static const struct arm_pmu *__init armv7_a15_pmu_init(void)
+ {
+       return NULL;
+ }
  #endif        /* CONFIG_CPU_V7 */
diff --combined arch/arm/kernel/ptrace.c
@@@ -228,34 -228,12 +228,12 @@@ static struct undef_hook thumb_break_ho
        .fn             = break_trap,
  };
  
- static int thumb2_break_trap(struct pt_regs *regs, unsigned int instr)
- {
-       unsigned int instr2;
-       void __user *pc;
-       /* Check the second half of the instruction.  */
-       pc = (void __user *)(instruction_pointer(regs) + 2);
-       if (processor_mode(regs) == SVC_MODE) {
-               instr2 = *(u16 *) pc;
-       } else {
-               get_user(instr2, (u16 __user *)pc);
-       }
-       if (instr2 == 0xa000) {
-               ptrace_break(current, regs);
-               return 0;
-       } else {
-               return 1;
-       }
- }
  static struct undef_hook thumb2_break_hook = {
-       .instr_mask     = 0xffff,
-       .instr_val      = 0xf7f0,
+       .instr_mask     = 0xffffffff,
+       .instr_val      = 0xf7f0a000,
        .cpsr_mask      = PSR_T_BIT,
        .cpsr_val       = PSR_T_BIT,
-       .fn             = thumb2_break_trap,
+       .fn             = break_trap,
  };
  
  static int __init ptrace_break_init(void)
@@@ -396,7 -374,7 +374,7 @@@ static long ptrace_hbp_idx_to_num(int i
  /*
   * Handle hitting a HW-breakpoint.
   */
 -static void ptrace_hbptriggered(struct perf_event *bp, int unused,
 +static void ptrace_hbptriggered(struct perf_event *bp,
                                     struct perf_sample_data *data,
                                     struct pt_regs *regs)
  {
@@@ -479,8 -457,7 +457,8 @@@ static struct perf_event *ptrace_hbp_cr
        attr.bp_type    = type;
        attr.disabled   = 1;
  
 -      return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, tsk);
 +      return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
 +                                         tsk);
  }
  
  static int ptrace_gethbpregs(struct task_struct *tsk, long num,
@@@ -20,7 -20,6 +20,7 @@@
  #include <linux/spi/spi.h>
  #include <linux/spi/flash.h>
  
 +#include <asm/io.h>
  #include <asm/mach-types.h>
  #include <asm/mach/arch.h>
  #include <mach/common.h>
@@@ -571,4 -570,5 +571,5 @@@ MACHINE_START(MITYOMAPL138, "MityDSP-L1
        .init_irq       = cp_intc_init,
        .timer          = &davinci_timer,
        .init_machine   = mityomapl138_init,
+       .dma_zone_size  = SZ_128M,
  MACHINE_END
diff --combined arch/arm/mm/fault.c
@@@ -94,7 -94,7 +94,7 @@@ void show_pte(struct mm_struct *mm, uns
  
                pud = pud_offset(pgd, addr);
                if (PTRS_PER_PUD != 1)
-                       printk(", *pud=%08lx", pud_val(*pud));
+                       printk(", *pud=%08llx", (long long)pud_val(*pud));
  
                if (pud_none(*pud))
                        break;
@@@ -285,6 -285,10 +285,10 @@@ do_page_fault(unsigned long addr, unsig
        tsk = current;
        mm  = tsk->mm;
  
+       /* Enable interrupts if they were enabled in the parent context. */
+       if (interrupts_enabled(regs))
+               local_irq_enable();
        /*
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
        fault = __do_page_fault(mm, addr, fsr, tsk);
        up_read(&mm->mmap_sem);
  
 -      perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr);
 +      perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
        if (fault & VM_FAULT_MAJOR)
 -              perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr);
 +              perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, regs, addr);
        else if (fault & VM_FAULT_MINOR)
 -              perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr);
 +              perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, regs, addr);
  
        /*
         * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR
diff --combined drivers/mmc/host/mmci.c
@@@ -226,9 -226,6 +226,9 @@@ static void __devinit mmci_dma_setup(st
                return;
        }
  
 +      /* initialize pre request cookie */
 +      host->next_data.cookie = 1;
 +
        /* Try to acquire a generic DMA engine slave channel */
        dma_cap_zero(mask);
        dma_cap_set(DMA_SLAVE, mask);
@@@ -338,8 -335,7 +338,8 @@@ static void mmci_dma_unmap(struct mmci_
                dir = DMA_FROM_DEVICE;
        }
  
 -      dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
 +      if (!data->host_cookie)
 +              dma_unmap_sg(chan->device->dev, data->sg, data->sg_len, dir);
  
        /*
         * Use of DMA with scatter-gather is impossible.
@@@ -357,8 -353,7 +357,8 @@@ static void mmci_dma_data_error(struct 
        dmaengine_terminate_all(host->dma_current);
  }
  
 -static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
 +static int mmci_dma_prep_data(struct mmci_host *host, struct mmc_data *data,
 +                            struct mmci_host_next *next)
  {
        struct variant_data *variant = host->variant;
        struct dma_slave_config conf = {
                .src_maxburst = variant->fifohalfsize >> 2, /* # of words */
                .dst_maxburst = variant->fifohalfsize >> 2, /* # of words */
        };
 -      struct mmc_data *data = host->data;
        struct dma_chan *chan;
        struct dma_device *device;
        struct dma_async_tx_descriptor *desc;
        int nr_sg;
  
 -      host->dma_current = NULL;
 +      /* Check if next job is already prepared */
 +      if (data->host_cookie && !next &&
 +          host->dma_current && host->dma_desc_current)
 +              return 0;
 +
 +      if (!next) {
 +              host->dma_current = NULL;
 +              host->dma_desc_current = NULL;
 +      }
  
        if (data->flags & MMC_DATA_READ) {
                conf.direction = DMA_FROM_DEVICE;
                return -EINVAL;
  
        /* If less than or equal to the fifo size, don't bother with DMA */
 -      if (host->size <= variant->fifosize)
 +      if (data->blksz * data->blocks <= variant->fifosize)
                return -EINVAL;
  
        device = chan->device;
        if (!desc)
                goto unmap_exit;
  
 -      /* Okay, go for it. */
 -      host->dma_current = chan;
 +      if (next) {
 +              next->dma_chan = chan;
 +              next->dma_desc = desc;
 +      } else {
 +              host->dma_current = chan;
 +              host->dma_desc_current = desc;
 +      }
 +
 +      return 0;
  
 + unmap_exit:
 +      if (!next)
 +              dmaengine_terminate_all(chan);
 +      dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
 +      return -ENOMEM;
 +}
 +
 +static int mmci_dma_start_data(struct mmci_host *host, unsigned int datactrl)
 +{
 +      int ret;
 +      struct mmc_data *data = host->data;
 +
 +      ret = mmci_dma_prep_data(host, host->data, NULL);
 +      if (ret)
 +              return ret;
 +
 +      /* Okay, go for it. */
        dev_vdbg(mmc_dev(host->mmc),
                 "Submit MMCI DMA job, sglen %d blksz %04x blks %04x flags %08x\n",
                 data->sg_len, data->blksz, data->blocks, data->flags);
 -      dmaengine_submit(desc);
 -      dma_async_issue_pending(chan);
 +      dmaengine_submit(host->dma_desc_current);
 +      dma_async_issue_pending(host->dma_current);
  
        datactrl |= MCI_DPSM_DMAENABLE;
  
        writel(readl(host->base + MMCIMASK0) | MCI_DATAENDMASK,
               host->base + MMCIMASK0);
        return 0;
 +}
  
 -unmap_exit:
 -      dmaengine_terminate_all(chan);
 -      dma_unmap_sg(device->dev, data->sg, data->sg_len, conf.direction);
 -      return -ENOMEM;
 +static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
 +{
 +      struct mmci_host_next *next = &host->next_data;
 +
 +      if (data->host_cookie && data->host_cookie != next->cookie) {
 +              printk(KERN_WARNING "[%s] invalid cookie: data->host_cookie %d"
 +                     " host->next_data.cookie %d\n",
 +                     __func__, data->host_cookie, host->next_data.cookie);
 +              data->host_cookie = 0;
 +      }
 +
 +      if (!data->host_cookie)
 +              return;
 +
 +      host->dma_desc_current = next->dma_desc;
 +      host->dma_current = next->dma_chan;
 +
 +      next->dma_desc = NULL;
 +      next->dma_chan = NULL;
  }
 +
 +static void mmci_pre_request(struct mmc_host *mmc, struct mmc_request *mrq,
 +                           bool is_first_req)
 +{
 +      struct mmci_host *host = mmc_priv(mmc);
 +      struct mmc_data *data = mrq->data;
 +      struct mmci_host_next *nd = &host->next_data;
 +
 +      if (!data)
 +              return;
 +
 +      if (data->host_cookie) {
 +              data->host_cookie = 0;
 +              return;
 +      }
 +
 +      /* if config for dma */
 +      if (((data->flags & MMC_DATA_WRITE) && host->dma_tx_channel) ||
 +          ((data->flags & MMC_DATA_READ) && host->dma_rx_channel)) {
 +              if (mmci_dma_prep_data(host, data, nd))
 +                      data->host_cookie = 0;
 +              else
 +                      data->host_cookie = ++nd->cookie < 0 ? 1 : nd->cookie;
 +      }
 +}
 +
 +static void mmci_post_request(struct mmc_host *mmc, struct mmc_request *mrq,
 +                            int err)
 +{
 +      struct mmci_host *host = mmc_priv(mmc);
 +      struct mmc_data *data = mrq->data;
 +      struct dma_chan *chan;
 +      enum dma_data_direction dir;
 +
 +      if (!data)
 +              return;
 +
 +      if (data->flags & MMC_DATA_READ) {
 +              dir = DMA_FROM_DEVICE;
 +              chan = host->dma_rx_channel;
 +      } else {
 +              dir = DMA_TO_DEVICE;
 +              chan = host->dma_tx_channel;
 +      }
 +
 +
 +      /* if config for dma */
 +      if (chan) {
 +              if (err)
 +                      dmaengine_terminate_all(chan);
 +              if (err || data->host_cookie)
 +                      dma_unmap_sg(mmc_dev(host->mmc), data->sg,
 +                                   data->sg_len, dir);
 +              mrq->data->host_cookie = 0;
 +      }
 +}
 +
  #else
  /* Blank functions if the DMA engine is not available */
 +static void mmci_get_next_data(struct mmci_host *host, struct mmc_data *data)
 +{
 +}
  static inline void mmci_dma_setup(struct mmci_host *host)
  {
  }
@@@ -561,10 -449,6 +561,10 @@@ static inline int mmci_dma_start_data(s
  {
        return -ENOSYS;
  }
 +
 +#define mmci_pre_request NULL
 +#define mmci_post_request NULL
 +
  #endif
  
  static void mmci_start_data(struct mmci_host *host, struct mmc_data *data)
@@@ -988,9 -872,6 +988,9 @@@ static void mmci_request(struct mmc_hos
  
        host->mrq = mrq;
  
 +      if (mrq->data)
 +              mmci_get_next_data(host, mrq->data);
 +
        if (mrq->data && mrq->data->flags & MMC_DATA_READ)
                mmci_start_data(host, mrq->data);
  
@@@ -1105,8 -986,6 +1105,8 @@@ static irqreturn_t mmci_cd_irq(int irq
  
  static const struct mmc_host_ops mmci_ops = {
        .request        = mmci_request,
 +      .pre_req        = mmci_pre_request,
 +      .post_req       = mmci_post_request,
        .set_ios        = mmci_set_ios,
        .get_ro         = mmci_get_ro,
        .get_cd         = mmci_get_cd,
@@@ -1184,7 -1063,15 +1184,15 @@@ static int __devinit mmci_probe(struct 
        }
  
        mmc->ops = &mmci_ops;
-       mmc->f_min = (host->mclk + 511) / 512;
+       /*
+        * The ARM and ST versions of the block have slightly different
+        * clock divider equations which means that the minimum divider
+        * differs too.
+        */
+       if (variant->st_clkdiv)
+               mmc->f_min = DIV_ROUND_UP(host->mclk, 257);
+       else
+               mmc->f_min = DIV_ROUND_UP(host->mclk, 512);
        /*
         * If the platform data supplies a maximum operating
         * frequency, this takes precedence. Else, we fall back