perf_events, x86: Split PMU definitions into separate files
[cascardo/linux.git] / arch / x86 / kernel / cpu / perf_event.c
index 0bd23d0..641ccb9 100644 (file)
@@ -23,6 +23,7 @@
 #include <linux/uaccess.h>
 #include <linux/highmem.h>
 #include <linux/cpu.h>
+#include <linux/bitops.h>
 
 #include <asm/apic.h>
 #include <asm/stacktrace.h>
@@ -69,12 +70,21 @@ struct debug_store {
        u64     pebs_event_reset[MAX_PEBS_EVENTS];
 };
 
-#define BITS_TO_U64(nr) DIV_ROUND_UP(nr, BITS_PER_BYTE * sizeof(u64))
-
 struct event_constraint {
-       u64     idxmsk[BITS_TO_U64(X86_PMC_IDX_MAX)];
+       union {
+               unsigned long   idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+               u64             idxmsk64[1];
+       };
        int     code;
        int     cmask;
+       int     weight;
+};
+
+struct amd_nb {
+       int nb_id;  /* NorthBridge id */
+       int refcnt; /* reference count */
+       struct perf_event *owners[X86_PMC_IDX_MAX];
+       struct event_constraint event_constraints[X86_PMC_IDX_MAX];
 };
 
 struct cpu_hw_events {
@@ -87,18 +97,31 @@ struct cpu_hw_events {
        int                     n_events;
        int                     n_added;
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
+       u64                     tags[X86_PMC_IDX_MAX];
        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
+       struct amd_nb           *amd_nb;
 };
 
-#define EVENT_CONSTRAINT(c, n, m) { \
-       .code = (c),    \
-       .cmask = (m),   \
-       .idxmsk[0] = (n) }
+#define __EVENT_CONSTRAINT(c, n, m, w) {\
+       { .idxmsk64[0] = (n) },         \
+       .code = (c),                    \
+       .cmask = (m),                   \
+       .weight = (w),                  \
+}
+
+#define EVENT_CONSTRAINT(c, n, m)      \
+       __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
+
+#define INTEL_EVENT_CONSTRAINT(c, n)   \
+       EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
+
+#define FIXED_EVENT_CONSTRAINT(c, n)   \
+       EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
 
-#define EVENT_CONSTRAINT_END \
-       { .code = 0, .cmask = 0, .idxmsk[0] = 0 }
+#define EVENT_CONSTRAINT_END           \
+       EVENT_CONSTRAINT(0, 0, 0)
 
-#define for_each_event_constraint(e, c) \
+#define for_each_event_constraint(e, c)        \
        for ((e) = (c); (e)->cmask; (e)++)
 
 /*
@@ -126,9 +149,14 @@ struct x86_pmu {
        u64             intel_ctrl;
        void            (*enable_bts)(u64 config);
        void            (*disable_bts)(void);
-       void            (*get_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event, u64 *idxmsk);
-       void            (*put_event_constraints)(struct cpu_hw_events *cpuc, struct perf_event *event);
-       const struct event_constraint *event_constraints;
+
+       struct event_constraint *
+                       (*get_event_constraints)(struct cpu_hw_events *cpuc,
+                                                struct perf_event *event);
+
+       void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
+                                                struct perf_event *event);
+       struct event_constraint *event_constraints;
 };
 
 static struct x86_pmu x86_pmu __read_mostly;
@@ -140,121 +168,6 @@ static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
 static int x86_perf_event_set_period(struct perf_event *event,
                             struct hw_perf_event *hwc, int idx);
 
-/*
- * Not sure about some of these
- */
-static const u64 p6_perfmon_event_map[] =
-{
-  [PERF_COUNT_HW_CPU_CYCLES]           = 0x0079,
-  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x0f2e,
-  [PERF_COUNT_HW_CACHE_MISSES]         = 0x012e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,
-  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,
-  [PERF_COUNT_HW_BUS_CYCLES]           = 0x0062,
-};
-
-static u64 p6_pmu_event_map(int hw_event)
-{
-       return p6_perfmon_event_map[hw_event];
-}
-
-/*
- * Event setting that is specified not to count anything.
- * We use this to effectively disable a counter.
- *
- * L2_RQSTS with 0 MESI unit mask.
- */
-#define P6_NOP_EVENT                   0x0000002EULL
-
-static u64 p6_pmu_raw_event(u64 hw_event)
-{
-#define P6_EVNTSEL_EVENT_MASK          0x000000FFULL
-#define P6_EVNTSEL_UNIT_MASK           0x0000FF00ULL
-#define P6_EVNTSEL_EDGE_MASK           0x00040000ULL
-#define P6_EVNTSEL_INV_MASK            0x00800000ULL
-#define P6_EVNTSEL_REG_MASK            0xFF000000ULL
-
-#define P6_EVNTSEL_MASK                        \
-       (P6_EVNTSEL_EVENT_MASK |        \
-        P6_EVNTSEL_UNIT_MASK  |        \
-        P6_EVNTSEL_EDGE_MASK  |        \
-        P6_EVNTSEL_INV_MASK   |        \
-        P6_EVNTSEL_REG_MASK)
-
-       return hw_event & P6_EVNTSEL_MASK;
-}
-
-static struct event_constraint intel_p6_event_constraints[] =
-{
-       EVENT_CONSTRAINT(0xc1, 0x1, INTEL_ARCH_EVENT_MASK),     /* FLOPS */
-       EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK),     /* FP_COMP_OPS_EXE */
-       EVENT_CONSTRAINT(0x11, 0x1, INTEL_ARCH_EVENT_MASK),     /* FP_ASSIST */
-       EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK),     /* MUL */
-       EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK),     /* DIV */
-       EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK),     /* CYCLES_DIV_BUSY */
-       EVENT_CONSTRAINT_END
-};
-
-/*
- * Intel PerfMon v3. Used on Core2 and later.
- */
-static const u64 intel_perfmon_event_map[] =
-{
-  [PERF_COUNT_HW_CPU_CYCLES]           = 0x003c,
-  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x4f2e,
-  [PERF_COUNT_HW_CACHE_MISSES]         = 0x412e,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,
-  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,
-  [PERF_COUNT_HW_BUS_CYCLES]           = 0x013c,
-};
-
-static struct event_constraint intel_core_event_constraints[] =
-{
-       EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
-       EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
-       EVENT_CONSTRAINT(0x10, 0x1, INTEL_ARCH_EVENT_MASK), /* FP_COMP_OPS_EXE */
-       EVENT_CONSTRAINT(0x11, 0x2, INTEL_ARCH_EVENT_MASK), /* FP_ASSIST */
-       EVENT_CONSTRAINT(0x12, 0x2, INTEL_ARCH_EVENT_MASK), /* MUL */
-       EVENT_CONSTRAINT(0x13, 0x2, INTEL_ARCH_EVENT_MASK), /* DIV */
-       EVENT_CONSTRAINT(0x14, 0x1, INTEL_ARCH_EVENT_MASK), /* CYCLES_DIV_BUSY */
-       EVENT_CONSTRAINT(0x18, 0x1, INTEL_ARCH_EVENT_MASK), /* IDLE_DURING_DIV */
-       EVENT_CONSTRAINT(0x19, 0x2, INTEL_ARCH_EVENT_MASK), /* DELAYED_BYPASS */
-       EVENT_CONSTRAINT(0xa1, 0x1, INTEL_ARCH_EVENT_MASK), /* RS_UOPS_DISPATCH_CYCLES */
-       EVENT_CONSTRAINT(0xcb, 0x1, INTEL_ARCH_EVENT_MASK), /* MEM_LOAD_RETIRED */
-       EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_nehalem_event_constraints[] =
-{
-       EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
-       EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
-       EVENT_CONSTRAINT(0x40, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LD */
-       EVENT_CONSTRAINT(0x41, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_ST */
-       EVENT_CONSTRAINT(0x42, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK */
-       EVENT_CONSTRAINT(0x43, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_ALL_REF */
-       EVENT_CONSTRAINT(0x4e, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_PREFETCH */
-       EVENT_CONSTRAINT(0x4c, 0x3, INTEL_ARCH_EVENT_MASK), /* LOAD_HIT_PRE */
-       EVENT_CONSTRAINT(0x51, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D */
-       EVENT_CONSTRAINT(0x52, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_PREFETCH_LOCK_FB_HIT */
-       EVENT_CONSTRAINT(0x53, 0x3, INTEL_ARCH_EVENT_MASK), /* L1D_CACHE_LOCK_FB_HIT */
-       EVENT_CONSTRAINT(0xc5, 0x3, INTEL_ARCH_EVENT_MASK), /* CACHE_LOCK_CYCLES */
-       EVENT_CONSTRAINT_END
-};
-
-static struct event_constraint intel_gen_event_constraints[] =
-{
-       EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32)), INTEL_ARCH_FIXED_MASK), /* INSTRUCTIONS_RETIRED */
-       EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33)), INTEL_ARCH_FIXED_MASK), /* UNHALTED_CORE_CYCLES */
-       EVENT_CONSTRAINT_END
-};
-
-static u64 intel_pmu_event_map(int hw_event)
-{
-       return intel_perfmon_event_map[hw_event];
-}
-
 /*
  * Generalized hw caching related hw_event table, filled
  * in on a per model basis. A value of 0 means
@@ -270,424 +183,6 @@ static u64 __read_mostly hw_cache_event_ids
                                [PERF_COUNT_HW_CACHE_OP_MAX]
                                [PERF_COUNT_HW_CACHE_RESULT_MAX];
 
-static __initconst u64 nehalem_hw_cache_event_ids
-                               [PERF_COUNT_HW_CACHE_MAX]
-                               [PERF_COUNT_HW_CACHE_OP_MAX]
-                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI            */
-               [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE         */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI            */
-               [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE         */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS        */
-               [ C(RESULT_MISS)   ] = 0x024e, /* L1D_PREFETCH.MISS            */
-       },
- },
- [ C(L1I ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                    */
-               [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                   */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
- [ C(LL  ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS               */
-               [ C(RESULT_MISS)   ] = 0x0224, /* L2_RQSTS.LD_MISS             */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS                */
-               [ C(RESULT_MISS)   ] = 0x0824, /* L2_RQSTS.RFO_MISS            */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference                */
-               [ C(RESULT_MISS)   ] = 0x412e, /* LLC Misses                   */
-       },
- },
- [ C(DTLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI   (alias)  */
-               [ C(RESULT_MISS)   ] = 0x0108, /* DTLB_LOAD_MISSES.ANY         */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI   (alias)  */
-               [ C(RESULT_MISS)   ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS  */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0x0,
-       },
- },
- [ C(ITLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P           */
-               [ C(RESULT_MISS)   ] = 0x20c8, /* ITLB_MISS_RETIRED            */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
- [ C(BPU ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
-               [ C(RESULT_MISS)   ] = 0x03e8, /* BPU_CLEARS.ANY               */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
-};
-
-static __initconst u64 core2_hw_cache_event_ids
-                               [PERF_COUNT_HW_CACHE_MAX]
-                               [PERF_COUNT_HW_CACHE_OP_MAX]
-                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI          */
-               [ C(RESULT_MISS)   ] = 0x0140, /* L1D_CACHE_LD.I_STATE       */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI          */
-               [ C(RESULT_MISS)   ] = 0x0141, /* L1D_CACHE_ST.I_STATE       */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS      */
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(L1I ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS                  */
-               [ C(RESULT_MISS)   ] = 0x0081, /* L1I.MISSES                 */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(LL  ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
-               [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
-               [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(DTLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI  (alias) */
-               [ C(RESULT_MISS)   ] = 0x0208, /* DTLB_MISSES.MISS_LD        */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI  (alias) */
-               [ C(RESULT_MISS)   ] = 0x0808, /* DTLB_MISSES.MISS_ST        */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(ITLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
-               [ C(RESULT_MISS)   ] = 0x1282, /* ITLBMISSES                 */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
- [ C(BPU ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
-               [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
-};
-
-static __initconst u64 atom_hw_cache_event_ids
-                               [PERF_COUNT_HW_CACHE_MAX]
-                               [PERF_COUNT_HW_CACHE_OP_MAX]
-                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD               */
-               [ C(RESULT_MISS)   ] = 0,
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST               */
-               [ C(RESULT_MISS)   ] = 0,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(L1I ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS                  */
-               [ C(RESULT_MISS)   ] = 0x0280, /* L1I.MISSES                 */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(LL  ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI                 */
-               [ C(RESULT_MISS)   ] = 0x4129, /* L2_LD.ISTATE               */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI                 */
-               [ C(RESULT_MISS)   ] = 0x412A, /* L2_ST.ISTATE               */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(DTLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI  (alias) */
-               [ C(RESULT_MISS)   ] = 0x0508, /* DTLB_MISSES.MISS_LD        */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI  (alias) */
-               [ C(RESULT_MISS)   ] = 0x0608, /* DTLB_MISSES.MISS_ST        */
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(ITLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P         */
-               [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES                */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
- [ C(BPU ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY        */
-               [ C(RESULT_MISS)   ] = 0x00c5, /* BP_INST_RETIRED.MISPRED    */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
-};
-
-static u64 intel_pmu_raw_event(u64 hw_event)
-{
-#define CORE_EVNTSEL_EVENT_MASK                0x000000FFULL
-#define CORE_EVNTSEL_UNIT_MASK         0x0000FF00ULL
-#define CORE_EVNTSEL_EDGE_MASK         0x00040000ULL
-#define CORE_EVNTSEL_INV_MASK          0x00800000ULL
-#define CORE_EVNTSEL_REG_MASK          0xFF000000ULL
-
-#define CORE_EVNTSEL_MASK              \
-       (INTEL_ARCH_EVTSEL_MASK |       \
-        INTEL_ARCH_UNIT_MASK   |       \
-        INTEL_ARCH_EDGE_MASK   |       \
-        INTEL_ARCH_INV_MASK    |       \
-        INTEL_ARCH_CNT_MASK)
-
-       return hw_event & CORE_EVNTSEL_MASK;
-}
-
-static __initconst u64 amd_hw_cache_event_ids
-                               [PERF_COUNT_HW_CACHE_MAX]
-                               [PERF_COUNT_HW_CACHE_OP_MAX]
-                               [PERF_COUNT_HW_CACHE_RESULT_MAX] =
-{
- [ C(L1D) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
-               [ C(RESULT_MISS)   ] = 0x0041, /* Data Cache Misses          */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
-               [ C(RESULT_MISS)   ] = 0,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts  */
-               [ C(RESULT_MISS)   ] = 0x0167, /* Data Prefetcher :cancelled */
-       },
- },
- [ C(L1I ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches  */
-               [ C(RESULT_MISS)   ] = 0x0081, /* Instruction cache misses   */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(LL  ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
-               [ C(RESULT_MISS)   ] = 0x037E, /* L2 Cache Misses : IC+DC     */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback           */
-               [ C(RESULT_MISS)   ] = 0,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(DTLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses        */
-               [ C(RESULT_MISS)   ] = 0x0046, /* L1 DTLB and L2 DLTB Miss   */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = 0,
-               [ C(RESULT_MISS)   ] = 0,
-       },
- },
- [ C(ITLB) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes        */
-               [ C(RESULT_MISS)   ] = 0x0085, /* Instr. fetch ITLB misses   */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
- [ C(BPU ) ] = {
-       [ C(OP_READ) ] = {
-               [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr.      */
-               [ C(RESULT_MISS)   ] = 0x00c3, /* Retired Mispredicted BI    */
-       },
-       [ C(OP_WRITE) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
-       [ C(OP_PREFETCH) ] = {
-               [ C(RESULT_ACCESS) ] = -1,
-               [ C(RESULT_MISS)   ] = -1,
-       },
- },
-};
-
-/*
- * AMD Performance Monitor K7 and later.
- */
-static const u64 amd_perfmon_event_map[] =
-{
-  [PERF_COUNT_HW_CPU_CYCLES]           = 0x0076,
-  [PERF_COUNT_HW_INSTRUCTIONS]         = 0x00c0,
-  [PERF_COUNT_HW_CACHE_REFERENCES]     = 0x0080,
-  [PERF_COUNT_HW_CACHE_MISSES]         = 0x0081,
-  [PERF_COUNT_HW_BRANCH_INSTRUCTIONS]  = 0x00c4,
-  [PERF_COUNT_HW_BRANCH_MISSES]                = 0x00c5,
-};
-
-static u64 amd_pmu_event_map(int hw_event)
-{
-       return amd_perfmon_event_map[hw_event];
-}
-
-static u64 amd_pmu_raw_event(u64 hw_event)
-{
-#define K7_EVNTSEL_EVENT_MASK  0x7000000FFULL
-#define K7_EVNTSEL_UNIT_MASK   0x00000FF00ULL
-#define K7_EVNTSEL_EDGE_MASK   0x000040000ULL
-#define K7_EVNTSEL_INV_MASK    0x000800000ULL
-#define K7_EVNTSEL_REG_MASK    0x0FF000000ULL
-
-#define K7_EVNTSEL_MASK                        \
-       (K7_EVNTSEL_EVENT_MASK |        \
-        K7_EVNTSEL_UNIT_MASK  |        \
-        K7_EVNTSEL_EDGE_MASK  |        \
-        K7_EVNTSEL_INV_MASK   |        \
-        K7_EVNTSEL_REG_MASK)
-
-       return hw_event & K7_EVNTSEL_MASK;
-}
-
 /*
  * Propagate event elapsed time into the generic event.
  * Can only be executed on the CPU where the event is active.
@@ -939,42 +434,6 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
        return 0;
 }
 
-static void intel_pmu_enable_bts(u64 config)
-{
-       unsigned long debugctlmsr;
-
-       debugctlmsr = get_debugctlmsr();
-
-       debugctlmsr |= X86_DEBUGCTL_TR;
-       debugctlmsr |= X86_DEBUGCTL_BTS;
-       debugctlmsr |= X86_DEBUGCTL_BTINT;
-
-       if (!(config & ARCH_PERFMON_EVENTSEL_OS))
-               debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
-
-       if (!(config & ARCH_PERFMON_EVENTSEL_USR))
-               debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
-
-       update_debugctlmsr(debugctlmsr);
-}
-
-static void intel_pmu_disable_bts(void)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       unsigned long debugctlmsr;
-
-       if (!cpuc->ds)
-               return;
-
-       debugctlmsr = get_debugctlmsr();
-
-       debugctlmsr &=
-               ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
-                 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
-
-       update_debugctlmsr(debugctlmsr);
-}
-
 /*
  * Setup the hardware configuration for a given attr_type
  */
@@ -1013,6 +472,8 @@ static int __hw_perf_event_init(struct perf_event *event)
        hwc->config = ARCH_PERFMON_EVENTSEL_INT;
 
        hwc->idx = -1;
+       hwc->last_cpu = -1;
+       hwc->last_tag = ~0ULL;
 
        /*
         * Count user and OS events unless requested not to.
@@ -1081,65 +542,21 @@ static int __hw_perf_event_init(struct perf_event *event)
        return 0;
 }
 
-static void p6_pmu_disable_all(void)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       u64 val;
-
-       if (!cpuc->enabled)
-               return;
-
-       cpuc->enabled = 0;
-       barrier();
-
-       /* p6 only has one enable register */
-       rdmsrl(MSR_P6_EVNTSEL0, val);
-       val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
-       wrmsrl(MSR_P6_EVNTSEL0, val);
-}
-
-static void intel_pmu_disable_all(void)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
-       if (!cpuc->enabled)
-               return;
-
-       cpuc->enabled = 0;
-       barrier();
-
-       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
-
-       if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
-               intel_pmu_disable_bts();
-}
-
-static void amd_pmu_disable_all(void)
+static void x86_pmu_disable_all(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx;
 
-       if (!cpuc->enabled)
-               return;
-
-       cpuc->enabled = 0;
-       /*
-        * ensure we write the disable before we start disabling the
-        * events proper, so that amd_pmu_enable_event() does the
-        * right thing.
-        */
-       barrier();
-
        for (idx = 0; idx < x86_pmu.num_events; idx++) {
                u64 val;
 
                if (!test_bit(idx, cpuc->active_mask))
                        continue;
-               rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
+               rdmsrl(x86_pmu.eventsel + idx, val);
                if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
                        continue;
                val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
-               wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+               wrmsrl(x86_pmu.eventsel + idx, val);
        }
 }
 
@@ -1150,63 +567,21 @@ void hw_perf_disable(void)
        if (!x86_pmu_initialized())
                return;
 
-       if (cpuc->enabled)
-               cpuc->n_added = 0;
-
-       x86_pmu.disable_all();
-}
-
-static void p6_pmu_enable_all(void)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       unsigned long val;
-
-       if (cpuc->enabled)
-               return;
-
-       cpuc->enabled = 1;
-       barrier();
-
-       /* p6 only has one enable register */
-       rdmsrl(MSR_P6_EVNTSEL0, val);
-       val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-       wrmsrl(MSR_P6_EVNTSEL0, val);
-}
-
-static void intel_pmu_enable_all(void)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
-       if (cpuc->enabled)
+       if (!cpuc->enabled)
                return;
 
-       cpuc->enabled = 1;
+       cpuc->n_added = 0;
+       cpuc->enabled = 0;
        barrier();
 
-       wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
-
-       if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
-               struct perf_event *event =
-                       cpuc->events[X86_PMC_IDX_FIXED_BTS];
-
-               if (WARN_ON_ONCE(!event))
-                       return;
-
-               intel_pmu_enable_bts(event->hw.config);
-       }
+       x86_pmu.disable_all();
 }
 
-static void amd_pmu_enable_all(void)
+static void x86_pmu_enable_all(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        int idx;
 
-       if (cpuc->enabled)
-               return;
-
-       cpuc->enabled = 1;
-       barrier();
-
        for (idx = 0; idx < x86_pmu.num_events; idx++) {
                struct perf_event *event = cpuc->events[idx];
                u64 val;
@@ -1216,7 +591,7 @@ static void amd_pmu_enable_all(void)
 
                val = event->hw.config;
                val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-               wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
+               wrmsrl(x86_pmu.eventsel + idx, val);
        }
 }
 
@@ -1229,53 +604,42 @@ static inline int is_x86_event(struct perf_event *event)
 
 static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
 {
-       int i, j , w, num;
-       int weight, wmax;
-       unsigned long *c;
-       u64 constraints[X86_PMC_IDX_MAX][BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
        unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
+       int i, j, w, wmax, num = 0;
        struct hw_perf_event *hwc;
 
        bitmap_zero(used_mask, X86_PMC_IDX_MAX);
 
        for (i = 0; i < n; i++) {
-               x86_pmu.get_event_constraints(cpuc,
-                                             cpuc->event_list[i],
-                                             constraints[i]);
+               constraints[i] =
+                 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
        }
 
        /*
         * fastpath, try to reuse previous register
         */
-       for (i = 0, num = n; i < n; i++, num--) {
+       for (i = 0; i < n; i++) {
                hwc = &cpuc->event_list[i]->hw;
-               c = (unsigned long *)constraints[i];
+               c = constraints[i];
 
                /* never assigned */
                if (hwc->idx == -1)
                        break;
 
                /* constraint still honored */
-               if (!test_bit(hwc->idx, c))
+               if (!test_bit(hwc->idx, c->idxmsk))
                        break;
 
                /* not already used */
                if (test_bit(hwc->idx, used_mask))
                        break;
 
-#if 0
-               pr_debug("CPU%d fast config=0x%llx idx=%d assign=%c\n",
-                        smp_processor_id(),
-                        hwc->config,
-                        hwc->idx,
-                        assign ? 'y' : 'n');
-#endif
-
                set_bit(hwc->idx, used_mask);
                if (assign)
                        assign[i] = hwc->idx;
        }
-       if (!num)
+       if (i == n)
                goto done;
 
        /*
@@ -1306,14 +670,13 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
        for (w = 1, num = n; num && w <= wmax; w++) {
                /* for each event */
                for (i = 0; num && i < n; i++) {
-                       c = (unsigned long *)constraints[i];
+                       c = constraints[i];
                        hwc = &cpuc->event_list[i]->hw;
 
-                       weight = bitmap_weight(c, X86_PMC_IDX_MAX);
-                       if (weight != w)
+                       if (c->weight != w)
                                continue;
 
-                       for_each_bit(j, c, X86_PMC_IDX_MAX) {
+                       for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
                                if (!test_bit(j, used_mask))
                                        break;
                        }
@@ -1321,14 +684,6 @@ static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                        if (j == X86_PMC_IDX_MAX)
                                break;
 
-#if 0
-                       pr_debug("CPU%d slow config=0x%llx idx=%d assign=%c\n",
-                               smp_processor_id(),
-                               hwc->config,
-                               j,
-                               assign ? 'y' : 'n');
-#endif
-
                        set_bit(j, used_mask);
 
                        if (assign)
@@ -1387,11 +742,14 @@ static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader,
        return n;
 }
 
-
 static inline void x86_assign_hw_event(struct perf_event *event,
-                               struct hw_perf_event *hwc, int idx)
+                               struct cpu_hw_events *cpuc, int i)
 {
-       hwc->idx = idx;
+       struct hw_perf_event *hwc = &event->hw;
+
+       hwc->idx = cpuc->assign[i];
+       hwc->last_cpu = smp_processor_id();
+       hwc->last_tag = ++cpuc->tags[i];
 
        if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
                hwc->config_base = 0;
@@ -1410,6 +768,17 @@ static inline void x86_assign_hw_event(struct perf_event *event,
        }
 }
 
+static inline int match_prev_assignment(struct hw_perf_event *hwc,
+                                       struct cpu_hw_events *cpuc,
+                                       int i)
+{
+       return hwc->idx == cpuc->assign[i] &&
+               hwc->last_cpu == smp_processor_id() &&
+               hwc->last_tag == cpuc->tags[i];
+}
+
+static void x86_pmu_stop(struct perf_event *event);
+
 void hw_perf_enable(void)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
@@ -1419,6 +788,10 @@ void hw_perf_enable(void)
 
        if (!x86_pmu_initialized())
                return;
+
+       if (cpuc->enabled)
+               return;
+
        if (cpuc->n_added) {
                /*
                 * apply assignment obtained either from
@@ -1432,16 +805,17 @@ void hw_perf_enable(void)
                        event = cpuc->event_list[i];
                        hwc = &event->hw;
 
-                       if (hwc->idx == -1 || hwc->idx == cpuc->assign[i])
+                       /*
+                        * we can avoid reprogramming counter if:
+                        * - assigned same counter as last time
+                        * - running on same CPU as last time
+                        * - no other event has used the counter since
+                        */
+                       if (hwc->idx == -1 ||
+                           match_prev_assignment(hwc, cpuc, i))
                                continue;
 
-                       x86_pmu.disable(hwc, hwc->idx);
-
-                       clear_bit(hwc->idx, cpuc->active_mask);
-                       barrier();
-                       cpuc->events[hwc->idx] = NULL;
-
-                       x86_perf_event_update(event, hwc, hwc->idx);
+                       x86_pmu_stop(event);
 
                        hwc->idx = -1;
                }
@@ -1452,12 +826,12 @@ void hw_perf_enable(void)
                        hwc = &event->hw;
 
                        if (hwc->idx == -1) {
-                               x86_assign_hw_event(event, hwc, cpuc->assign[i]);
+                               x86_assign_hw_event(event, cpuc, i);
                                x86_perf_event_set_period(event, hwc, hwc->idx);
                        }
                        /*
                         * need to mark as active because x86_pmu_disable()
-                        * clear active_mask and eventsp[] yet it preserves
+                        * clear active_mask and events[] yet it preserves
                         * idx
                         */
                        set_bit(hwc->idx, cpuc->active_mask);
@@ -1469,24 +843,14 @@ void hw_perf_enable(void)
                cpuc->n_added = 0;
                perf_events_lapic_init();
        }
-       x86_pmu.enable_all();
-}
-
-static inline u64 intel_pmu_get_status(void)
-{
-       u64 status;
 
-       rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
-
-       return status;
-}
+       cpuc->enabled = 1;
+       barrier();
 
-static inline void intel_pmu_ack_status(u64 ack)
-{
-       wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
+       x86_pmu.enable_all();
 }
 
-static inline void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
 {
        (void)checking_wrmsrl(hwc->config_base + idx,
                              hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
@@ -1497,53 +861,6 @@ static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
        (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
 }
 
-static inline void
-intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
-{
-       int idx = __idx - X86_PMC_IDX_FIXED;
-       u64 ctrl_val, mask;
-
-       mask = 0xfULL << (idx * 4);
-
-       rdmsrl(hwc->config_base, ctrl_val);
-       ctrl_val &= ~mask;
-       (void)checking_wrmsrl(hwc->config_base, ctrl_val);
-}
-
-static inline void
-p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       u64 val = P6_NOP_EVENT;
-
-       if (cpuc->enabled)
-               val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-
-       (void)checking_wrmsrl(hwc->config_base + idx, val);
-}
-
-static inline void
-intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
-       if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
-               intel_pmu_disable_bts();
-               return;
-       }
-
-       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
-               intel_pmu_disable_fixed(hwc, idx);
-               return;
-       }
-
-       x86_pmu_disable_event(hwc, idx);
-}
-
-static inline void
-amd_pmu_disable_event(struct hw_perf_event *hwc, int idx)
-{
-       x86_pmu_disable_event(hwc, idx);
-}
-
 static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
 
 /*
@@ -1602,69 +919,11 @@ x86_perf_event_set_period(struct perf_event *event,
        return ret;
 }
 
-static inline void
-intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
-{
-       int idx = __idx - X86_PMC_IDX_FIXED;
-       u64 ctrl_val, bits, mask;
-       int err;
-
-       /*
-        * Enable IRQ generation (0x8),
-        * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
-        * if requested:
-        */
-       bits = 0x8ULL;
-       if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
-               bits |= 0x2;
-       if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
-               bits |= 0x1;
-       bits <<= (idx * 4);
-       mask = 0xfULL << (idx * 4);
-
-       rdmsrl(hwc->config_base, ctrl_val);
-       ctrl_val &= ~mask;
-       ctrl_val |= bits;
-       err = checking_wrmsrl(hwc->config_base, ctrl_val);
-}
-
-static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
+static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-       u64 val;
-
-       val = hwc->config;
        if (cpuc->enabled)
-               val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
-
-       (void)checking_wrmsrl(hwc->config_base + idx, val);
-}
-
-
-static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
-{
-       if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
-               if (!__get_cpu_var(cpu_hw_events).enabled)
-                       return;
-
-               intel_pmu_enable_bts(hwc->config);
-               return;
-       }
-
-       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
-               intel_pmu_enable_fixed(hwc, idx);
-               return;
-       }
-
-       x86_pmu_enable_event(hwc, idx);
-}
-
-static void amd_pmu_enable_event(struct hw_perf_event *hwc, int idx)
-{
-       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
-
-       if (cpuc->enabled)
-               x86_pmu_enable_event(hwc, idx);
+               __x86_pmu_enable_event(hwc, idx);
 }
 
 /*
@@ -1702,8 +961,18 @@ static int x86_pmu_enable(struct perf_event *event)
        cpuc->n_events = n;
        cpuc->n_added  = n - n0;
 
-       if (hwc->idx != -1)
-               x86_perf_event_set_period(event, hwc, hwc->idx);
+       return 0;
+}
+
+static int x86_pmu_start(struct perf_event *event)
+{
+       struct hw_perf_event *hwc = &event->hw;
+
+       if (hwc->idx == -1)
+               return -EAGAIN;
+
+       x86_perf_event_set_period(event, hwc, hwc->idx);
+       x86_pmu.enable(hwc, hwc->idx);
 
        return 0;
 }
@@ -1771,71 +1040,11 @@ void perf_event_print_debug(void)
        local_irq_restore(flags);
 }
 
-static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
-{
-       struct debug_store *ds = cpuc->ds;
-       struct bts_record {
-               u64     from;
-               u64     to;
-               u64     flags;
-       };
-       struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
-       struct bts_record *at, *top;
-       struct perf_output_handle handle;
-       struct perf_event_header header;
-       struct perf_sample_data data;
-       struct pt_regs regs;
-
-       if (!event)
-               return;
-
-       if (!ds)
-               return;
-
-       at  = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
-       top = (struct bts_record *)(unsigned long)ds->bts_index;
-
-       if (top <= at)
-               return;
-
-       ds->bts_index = ds->bts_buffer_base;
-
-
-       data.period     = event->hw.last_period;
-       data.addr       = 0;
-       data.raw        = NULL;
-       regs.ip         = 0;
-
-       /*
-        * Prepare a generic sample, i.e. fill in the invariant fields.
-        * We will overwrite the from and to address before we output
-        * the sample.
-        */
-       perf_prepare_sample(&header, &data, event, &regs);
-
-       if (perf_output_begin(&handle, event,
-                             header.size * (top - at), 1, 1))
-               return;
-
-       for (; at < top; at++) {
-               data.ip         = at->from;
-               data.addr       = at->to;
-
-               perf_output_sample(&handle, &header, &data, event);
-       }
-
-       perf_output_end(&handle);
-
-       /* There's new data available. */
-       event->hw.interrupts++;
-       event->pending_kill = POLL_IN;
-}
-
-static void x86_pmu_disable(struct perf_event *event)
+static void x86_pmu_stop(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
-       int i, idx = hwc->idx;
+       int idx = hwc->idx;
 
        /*
         * Must be done before we disable, otherwise the nmi handler
@@ -1844,23 +1053,21 @@ static void x86_pmu_disable(struct perf_event *event)
        clear_bit(idx, cpuc->active_mask);
        x86_pmu.disable(hwc, idx);
 
-       /*
-        * Make sure the cleared pointer becomes visible before we
-        * (potentially) free the event:
-        */
-       barrier();
-
        /*
         * Drain the remaining delta count out of a event
         * that we are disabling:
         */
        x86_perf_event_update(event, hwc, idx);
 
-       /* Drain the remaining BTS records. */
-       if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
-               intel_pmu_drain_bts_buffer(cpuc);
-
        cpuc->events[idx] = NULL;
+}
+
+static void x86_pmu_disable(struct perf_event *event)
+{
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
+       int i;
+
+       x86_pmu_stop(event);
 
        for (i = 0; i < cpuc->n_events; i++) {
                if (event == cpuc->event_list[i]) {
@@ -1872,167 +1079,13 @@ static void x86_pmu_disable(struct perf_event *event)
                                cpuc->event_list[i-1] = cpuc->event_list[i];
 
                        --cpuc->n_events;
+                       break;
                }
        }
        perf_event_update_userpage(event);
 }
 
-/*
- * Save and restart an expired event. Called by NMI contexts,
- * so it has to be careful about preempting normal event ops:
- */
-static int intel_pmu_save_and_restart(struct perf_event *event)
-{
-       struct hw_perf_event *hwc = &event->hw;
-       int idx = hwc->idx;
-       int ret;
-
-       x86_perf_event_update(event, hwc, idx);
-       ret = x86_perf_event_set_period(event, hwc, idx);
-
-       if (event->state == PERF_EVENT_STATE_ACTIVE)
-               intel_pmu_enable_event(hwc, idx);
-
-       return ret;
-}
-
-static void intel_pmu_reset(void)
-{
-       struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
-       unsigned long flags;
-       int idx;
-
-       if (!x86_pmu.num_events)
-               return;
-
-       local_irq_save(flags);
-
-       printk("clearing PMU state on CPU#%d\n", smp_processor_id());
-
-       for (idx = 0; idx < x86_pmu.num_events; idx++) {
-               checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
-               checking_wrmsrl(x86_pmu.perfctr  + idx, 0ull);
-       }
-       for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
-               checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
-       }
-       if (ds)
-               ds->bts_index = ds->bts_buffer_base;
-
-       local_irq_restore(flags);
-}
-
-static int p6_pmu_handle_irq(struct pt_regs *regs)
-{
-       struct perf_sample_data data;
-       struct cpu_hw_events *cpuc;
-       struct perf_event *event;
-       struct hw_perf_event *hwc;
-       int idx, handled = 0;
-       u64 val;
-
-       data.addr = 0;
-       data.raw = NULL;
-
-       cpuc = &__get_cpu_var(cpu_hw_events);
-
-       for (idx = 0; idx < x86_pmu.num_events; idx++) {
-               if (!test_bit(idx, cpuc->active_mask))
-                       continue;
-
-               event = cpuc->events[idx];
-               hwc = &event->hw;
-
-               val = x86_perf_event_update(event, hwc, idx);
-               if (val & (1ULL << (x86_pmu.event_bits - 1)))
-                       continue;
-
-               /*
-                * event overflow
-                */
-               handled         = 1;
-               data.period     = event->hw.last_period;
-
-               if (!x86_perf_event_set_period(event, hwc, idx))
-                       continue;
-
-               if (perf_event_overflow(event, 1, &data, regs))
-                       p6_pmu_disable_event(hwc, idx);
-       }
-
-       if (handled)
-               inc_irq_stat(apic_perf_irqs);
-
-       return handled;
-}
-
-/*
- * This handler is triggered by the local APIC, so the APIC IRQ handling
- * rules apply:
- */
-static int intel_pmu_handle_irq(struct pt_regs *regs)
-{
-       struct perf_sample_data data;
-       struct cpu_hw_events *cpuc;
-       int bit, loops;
-       u64 ack, status;
-
-       data.addr = 0;
-       data.raw = NULL;
-
-       cpuc = &__get_cpu_var(cpu_hw_events);
-
-       perf_disable();
-       intel_pmu_drain_bts_buffer(cpuc);
-       status = intel_pmu_get_status();
-       if (!status) {
-               perf_enable();
-               return 0;
-       }
-
-       loops = 0;
-again:
-       if (++loops > 100) {
-               WARN_ONCE(1, "perfevents: irq loop stuck!\n");
-               perf_event_print_debug();
-               intel_pmu_reset();
-               perf_enable();
-               return 1;
-       }
-
-       inc_irq_stat(apic_perf_irqs);
-       ack = status;
-       for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
-               struct perf_event *event = cpuc->events[bit];
-
-               clear_bit(bit, (unsigned long *) &status);
-               if (!test_bit(bit, cpuc->active_mask))
-                       continue;
-
-               if (!intel_pmu_save_and_restart(event))
-                       continue;
-
-               data.period = event->hw.last_period;
-
-               if (perf_event_overflow(event, 1, &data, regs))
-                       intel_pmu_disable_event(&event->hw, bit);
-       }
-
-       intel_pmu_ack_status(ack);
-
-       /*
-        * Repeat if there is more work to be done:
-        */
-       status = intel_pmu_get_status();
-       if (status)
-               goto again;
-
-       perf_enable();
-
-       return 1;
-}
-
-static int amd_pmu_handle_irq(struct pt_regs *regs)
+static int x86_pmu_handle_irq(struct pt_regs *regs)
 {
        struct perf_sample_data data;
        struct cpu_hw_events *cpuc;
@@ -2067,7 +1120,7 @@ static int amd_pmu_handle_irq(struct pt_regs *regs)
                        continue;
 
                if (perf_event_overflow(event, 1, &data, regs))
-                       amd_pmu_disable_event(hwc, idx);
+                       x86_pmu.disable(hwc, idx);
        }
 
        if (handled)
@@ -2144,75 +1197,37 @@ perf_event_nmi_handler(struct notifier_block *self,
        return NOTIFY_STOP;
 }
 
-static struct event_constraint bts_constraint = {
-       .code = 0,
-       .cmask = 0,
-       .idxmsk[0] = 1ULL << X86_PMC_IDX_FIXED_BTS
+static __read_mostly struct notifier_block perf_event_nmi_notifier = {
+       .notifier_call          = perf_event_nmi_handler,
+       .next                   = NULL,
+       .priority               = 1
 };
 
-static int intel_special_constraints(struct perf_event *event,
-                                    u64 *idxmsk)
-{
-       unsigned int hw_event;
-
-       hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
-
-       if (unlikely((hw_event ==
-                     x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
-                    (event->hw.sample_period == 1))) {
-
-               bitmap_copy((unsigned long *)idxmsk,
-                           (unsigned long *)bts_constraint.idxmsk,
-                           X86_PMC_IDX_MAX);
-               return 1;
-       }
-       return 0;
-}
+static struct event_constraint unconstrained;
+static struct event_constraint emptyconstraint;
 
-static void intel_get_event_constraints(struct cpu_hw_events *cpuc,
-                                       struct perf_event *event,
-                                       u64 *idxmsk)
+static struct event_constraint *
+x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
 {
-       const struct event_constraint *c;
-
-       /*
-        * cleanup bitmask
-        */
-       bitmap_zero((unsigned long *)idxmsk, X86_PMC_IDX_MAX);
-
-       if (intel_special_constraints(event, idxmsk))
-               return;
+       struct event_constraint *c;
 
        if (x86_pmu.event_constraints) {
                for_each_event_constraint(c, x86_pmu.event_constraints) {
-                       if ((event->hw.config & c->cmask) == c->code) {
-
-                               bitmap_copy((unsigned long *)idxmsk,
-                                           (unsigned long *)c->idxmsk,
-                                           X86_PMC_IDX_MAX);
-                               return;
-                       }
+                       if ((event->hw.config & c->cmask) == c->code)
+                               return c;
                }
        }
-       /* no constraints, means supports all generic counters */
-       bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
-}
 
-static void amd_get_event_constraints(struct cpu_hw_events *cpuc,
-                                     struct perf_event *event,
-                                     u64 *idxmsk)
-{
-       /* no constraints, means supports all generic counters */
-       bitmap_fill((unsigned long *)idxmsk, x86_pmu.num_events);
+       return &unconstrained;
 }
 
 static int x86_event_sched_in(struct perf_event *event,
-                         struct perf_cpu_context *cpuctx, int cpu)
+                         struct perf_cpu_context *cpuctx)
 {
        int ret = 0;
 
        event->state = PERF_EVENT_STATE_ACTIVE;
-       event->oncpu = cpu;
+       event->oncpu = smp_processor_id();
        event->tstamp_running += event->ctx->time - event->tstamp_stopped;
 
        if (!is_x86_event(event))
@@ -2228,7 +1243,7 @@ static int x86_event_sched_in(struct perf_event *event,
 }
 
 static void x86_event_sched_out(struct perf_event *event,
-                           struct perf_cpu_context *cpuctx, int cpu)
+                           struct perf_cpu_context *cpuctx)
 {
        event->state = PERF_EVENT_STATE_INACTIVE;
        event->oncpu = -1;
@@ -2256,9 +1271,9 @@ static void x86_event_sched_out(struct perf_event *event,
  */
 int hw_perf_group_sched_in(struct perf_event *leader,
               struct perf_cpu_context *cpuctx,
-              struct perf_event_context *ctx, int cpu)
+              struct perf_event_context *ctx)
 {
-       struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+       struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
        struct perf_event *sub;
        int assign[X86_PMC_IDX_MAX];
        int n0, n1, ret;
@@ -2272,14 +1287,14 @@ int hw_perf_group_sched_in(struct perf_event *leader,
        if (ret)
                return ret;
 
-       ret = x86_event_sched_in(leader, cpuctx, cpu);
+       ret = x86_event_sched_in(leader, cpuctx);
        if (ret)
                return ret;
 
        n1 = 1;
        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
                if (sub->state > PERF_EVENT_STATE_OFF) {
-                       ret = x86_event_sched_in(sub, cpuctx, cpu);
+                       ret = x86_event_sched_in(sub, cpuctx);
                        if (ret)
                                goto undo;
                        ++n1;
@@ -2304,11 +1319,11 @@ int hw_perf_group_sched_in(struct perf_event *leader,
         */
        return 1;
 undo:
-       x86_event_sched_out(leader, cpuctx, cpu);
+       x86_event_sched_out(leader, cpuctx);
        n0  = 1;
        list_for_each_entry(sub, &leader->sibling_list, group_entry) {
                if (sub->state == PERF_EVENT_STATE_ACTIVE) {
-                       x86_event_sched_out(sub, cpuctx, cpu);
+                       x86_event_sched_out(sub, cpuctx);
                        if (++n0 == n1)
                                break;
                }
@@ -2316,204 +1331,9 @@ undo:
        return ret;
 }
 
-static __read_mostly struct notifier_block perf_event_nmi_notifier = {
-       .notifier_call          = perf_event_nmi_handler,
-       .next                   = NULL,
-       .priority               = 1
-};
-
-static __initconst struct x86_pmu p6_pmu = {
-       .name                   = "p6",
-       .handle_irq             = p6_pmu_handle_irq,
-       .disable_all            = p6_pmu_disable_all,
-       .enable_all             = p6_pmu_enable_all,
-       .enable                 = p6_pmu_enable_event,
-       .disable                = p6_pmu_disable_event,
-       .eventsel               = MSR_P6_EVNTSEL0,
-       .perfctr                = MSR_P6_PERFCTR0,
-       .event_map              = p6_pmu_event_map,
-       .raw_event              = p6_pmu_raw_event,
-       .max_events             = ARRAY_SIZE(p6_perfmon_event_map),
-       .apic                   = 1,
-       .max_period             = (1ULL << 31) - 1,
-       .version                = 0,
-       .num_events             = 2,
-       /*
-        * Events have 40 bits implemented. However they are designed such
-        * that bits [32-39] are sign extensions of bit 31. As such the
-        * effective width of a event for P6-like PMU is 32 bits only.
-        *
-        * See IA-32 Intel Architecture Software developer manual Vol 3B
-        */
-       .event_bits             = 32,
-       .event_mask             = (1ULL << 32) - 1,
-       .get_event_constraints  = intel_get_event_constraints,
-       .event_constraints      = intel_p6_event_constraints
-};
-
-static __initconst struct x86_pmu intel_pmu = {
-       .name                   = "Intel",
-       .handle_irq             = intel_pmu_handle_irq,
-       .disable_all            = intel_pmu_disable_all,
-       .enable_all             = intel_pmu_enable_all,
-       .enable                 = intel_pmu_enable_event,
-       .disable                = intel_pmu_disable_event,
-       .eventsel               = MSR_ARCH_PERFMON_EVENTSEL0,
-       .perfctr                = MSR_ARCH_PERFMON_PERFCTR0,
-       .event_map              = intel_pmu_event_map,
-       .raw_event              = intel_pmu_raw_event,
-       .max_events             = ARRAY_SIZE(intel_perfmon_event_map),
-       .apic                   = 1,
-       /*
-        * Intel PMCs cannot be accessed sanely above 32 bit width,
-        * so we install an artificial 1<<31 period regardless of
-        * the generic event period:
-        */
-       .max_period             = (1ULL << 31) - 1,
-       .enable_bts             = intel_pmu_enable_bts,
-       .disable_bts            = intel_pmu_disable_bts,
-       .get_event_constraints  = intel_get_event_constraints
-};
-
-static __initconst struct x86_pmu amd_pmu = {
-       .name                   = "AMD",
-       .handle_irq             = amd_pmu_handle_irq,
-       .disable_all            = amd_pmu_disable_all,
-       .enable_all             = amd_pmu_enable_all,
-       .enable                 = amd_pmu_enable_event,
-       .disable                = amd_pmu_disable_event,
-       .eventsel               = MSR_K7_EVNTSEL0,
-       .perfctr                = MSR_K7_PERFCTR0,
-       .event_map              = amd_pmu_event_map,
-       .raw_event              = amd_pmu_raw_event,
-       .max_events             = ARRAY_SIZE(amd_perfmon_event_map),
-       .num_events             = 4,
-       .event_bits             = 48,
-       .event_mask             = (1ULL << 48) - 1,
-       .apic                   = 1,
-       /* use highest bit to detect overflow */
-       .max_period             = (1ULL << 47) - 1,
-       .get_event_constraints  = amd_get_event_constraints
-};
-
-static __init int p6_pmu_init(void)
-{
-       switch (boot_cpu_data.x86_model) {
-       case 1:
-       case 3:  /* Pentium Pro */
-       case 5:
-       case 6:  /* Pentium II */
-       case 7:
-       case 8:
-       case 11: /* Pentium III */
-       case 9:
-       case 13:
-               /* Pentium M */
-               break;
-       default:
-               pr_cont("unsupported p6 CPU model %d ",
-                       boot_cpu_data.x86_model);
-               return -ENODEV;
-       }
-
-       x86_pmu = p6_pmu;
-
-       return 0;
-}
-
-static __init int intel_pmu_init(void)
-{
-       union cpuid10_edx edx;
-       union cpuid10_eax eax;
-       unsigned int unused;
-       unsigned int ebx;
-       int version;
-
-       if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
-               /* check for P6 processor family */
-          if (boot_cpu_data.x86 == 6) {
-               return p6_pmu_init();
-          } else {
-               return -ENODEV;
-          }
-       }
-
-       /*
-        * Check whether the Architectural PerfMon supports
-        * Branch Misses Retired hw_event or not.
-        */
-       cpuid(10, &eax.full, &ebx, &unused, &edx.full);
-       if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
-               return -ENODEV;
-
-       version = eax.split.version_id;
-       if (version < 2)
-               return -ENODEV;
-
-       x86_pmu                         = intel_pmu;
-       x86_pmu.version                 = version;
-       x86_pmu.num_events              = eax.split.num_events;
-       x86_pmu.event_bits              = eax.split.bit_width;
-       x86_pmu.event_mask              = (1ULL << eax.split.bit_width) - 1;
-
-       /*
-        * Quirk: v2 perfmon does not report fixed-purpose events, so
-        * assume at least 3 events:
-        */
-       x86_pmu.num_events_fixed        = max((int)edx.split.num_events_fixed, 3);
-
-       /*
-        * Install the hw-cache-events table:
-        */
-       switch (boot_cpu_data.x86_model) {
-       case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
-       case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
-       case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
-       case 29: /* six-core 45 nm xeon "Dunnington" */
-               memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
-                      sizeof(hw_cache_event_ids));
-
-               x86_pmu.event_constraints = intel_core_event_constraints;
-               pr_cont("Core2 events, ");
-               break;
-       case 26:
-               memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
-                      sizeof(hw_cache_event_ids));
-
-               x86_pmu.event_constraints = intel_nehalem_event_constraints;
-               pr_cont("Nehalem/Corei7 events, ");
-               break;
-       case 28:
-               memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
-                      sizeof(hw_cache_event_ids));
-
-               x86_pmu.event_constraints = intel_gen_event_constraints;
-               pr_cont("Atom events, ");
-               break;
-       default:
-               /*
-                * default constraints for v2 and up
-                */
-               x86_pmu.event_constraints = intel_gen_event_constraints;
-               pr_cont("generic architected perfmon, ");
-       }
-       return 0;
-}
-
-static __init int amd_pmu_init(void)
-{
-       /* Performance-monitoring supported from K7 and later: */
-       if (boot_cpu_data.x86 < 6)
-               return -ENODEV;
-
-       x86_pmu = amd_pmu;
-
-       /* Events are common for all AMDs */
-       memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
-              sizeof(hw_cache_event_ids));
-
-       return 0;
-}
+#include "perf_event_amd.c"
+#include "perf_event_p6.c"
+#include "perf_event_intel.c"
 
 static void __init pmu_check_apic(void)
 {
@@ -2571,6 +1391,10 @@ void __init init_hw_perf_events(void)
        perf_events_lapic_init();
        register_die_notifier(&perf_event_nmi_notifier);
 
+       unconstrained = (struct event_constraint)
+               __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
+                                  0, x86_pmu.num_events);
+
        pr_info("... version:                %d\n",     x86_pmu.version);
        pr_info("... bit width:              %d\n",     x86_pmu.event_bits);
        pr_info("... generic registers:      %d\n",     x86_pmu.num_events);
@@ -2588,6 +1412,8 @@ static inline void x86_pmu_read(struct perf_event *event)
 static const struct pmu pmu = {
        .enable         = x86_pmu_enable,
        .disable        = x86_pmu_disable,
+       .start          = x86_pmu_start,
+       .stop           = x86_pmu_stop,
        .read           = x86_pmu_read,
        .unthrottle     = x86_pmu_unthrottle,
 };
@@ -2596,9 +1422,9 @@ static const struct pmu pmu = {
  * validate a single event group
  *
  * validation include:
- *     - check events are compatible which each other
- *     - events do not compete for the same counter
- *     - number of events <= number of counters
+ *     - check events are compatible which each other
+ *     - events do not compete for the same counter
+ *     - number of events <= number of counters
  *
  * validation ensures the group can be loaded onto the
  * PMU if it was the only group available.
@@ -2606,10 +1432,13 @@ static const struct pmu pmu = {
 static int validate_group(struct perf_event *event)
 {
        struct perf_event *leader = event->group_leader;
-       struct cpu_hw_events fake_cpuc;
-       int n;
+       struct cpu_hw_events *fake_cpuc;
+       int ret, n;
 
-       memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+       ret = -ENOMEM;
+       fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
+       if (!fake_cpuc)
+               goto out;
 
        /*
         * the event is not yet connected with its
@@ -2617,18 +1446,24 @@ static int validate_group(struct perf_event *event)
         * existing siblings, then add the new event
         * before we can simulate the scheduling
         */
-       n = collect_events(&fake_cpuc, leader, true);
+       ret = -ENOSPC;
+       n = collect_events(fake_cpuc, leader, true);
        if (n < 0)
-               return -ENOSPC;
+               goto out_free;
 
-       fake_cpuc.n_events = n;
-       n = collect_events(&fake_cpuc, event, false);
+       fake_cpuc->n_events = n;
+       n = collect_events(fake_cpuc, event, false);
        if (n < 0)
-               return -ENOSPC;
+               goto out_free;
+
+       fake_cpuc->n_events = n;
 
-       fake_cpuc.n_events = n;
+       ret = x86_schedule_events(fake_cpuc, n, NULL);
 
-       return x86_schedule_events(&fake_cpuc, n, NULL);
+out_free:
+       kfree(fake_cpuc);
+out:
+       return ret;
 }
 
 const struct pmu *hw_perf_event_init(struct perf_event *event)
@@ -2830,4 +1665,25 @@ struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
 void hw_perf_event_setup_online(int cpu)
 {
        init_debug_store_on_cpu(cpu);
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               amd_pmu_cpu_online(cpu);
+               break;
+       default:
+               return;
+       }
+}
+
+void hw_perf_event_setup_offline(int cpu)
+{
+       init_debug_store_on_cpu(cpu);
+
+       switch (boot_cpu_data.x86_vendor) {
+       case X86_VENDOR_AMD:
+               amd_pmu_cpu_offline(cpu);
+               break;
+       default:
+               return;
+       }
 }