Merge branch 'perf/x86' into perf/core, because it's ready
[cascardo/linux.git] / include / linux / perf_event.h
index 2b62198..b16eac5 100644 (file)
@@ -53,6 +53,7 @@ struct perf_guest_info_callbacks {
 #include <linux/sysfs.h>
 #include <linux/perf_regs.h>
 #include <linux/workqueue.h>
+#include <linux/cgroup.h>
 #include <asm/local.h>
 
 struct perf_callchain_entry {
@@ -118,10 +119,16 @@ struct hw_perf_event {
                        struct hrtimer  hrtimer;
                };
                struct { /* tracepoint */
-                       struct task_struct      *tp_target;
                        /* for tp_event->class */
                        struct list_head        tp_list;
                };
+               struct { /* intel_cqm */
+                       int                     cqm_state;
+                       int                     cqm_rmid;
+                       struct list_head        cqm_events_entry;
+                       struct list_head        cqm_groups_entry;
+                       struct list_head        cqm_group_entry;
+               };
 #ifdef CONFIG_HAVE_HW_BREAKPOINT
                struct { /* breakpoint */
                        /*
@@ -129,12 +136,12 @@ struct hw_perf_event {
                         * problem hw_breakpoint has with context
                         * creation and event initalization.
                         */
-                       struct task_struct              *bp_target;
                        struct arch_hw_breakpoint       info;
                        struct list_head                bp_list;
                };
 #endif
        };
+       struct task_struct              *target;
        int                             state;
        local64_t                       prev_count;
        u64                             sample_period;
@@ -262,9 +269,20 @@ struct pmu {
        int (*event_idx)                (struct perf_event *event); /*optional */
 
        /*
-        * flush branch stack on context-switches (needed in cpu-wide mode)
+        * context-switches callback
+        */
+       void (*sched_task)              (struct perf_event_context *ctx,
+                                       bool sched_in);
+       /*
+        * PMU specific data size
+        */
+       size_t                          task_ctx_size;
+
+
+       /*
+        * Return the count value for a counter.
         */
-       void (*flush_branch_stack)      (void);
+       u64 (*count)                    (struct perf_event *event); /*optional*/
 };
 
 /**
@@ -300,6 +318,7 @@ struct swevent_hlist {
 #define PERF_ATTACH_CONTEXT    0x01
 #define PERF_ATTACH_GROUP      0x02
 #define PERF_ATTACH_TASK       0x04
+#define PERF_ATTACH_TASK_DATA  0x08
 
 struct perf_cgroup;
 struct ring_buffer;
@@ -504,7 +523,7 @@ struct perf_event_context {
        u64                             generation;
        int                             pin_count;
        int                             nr_cgroups;      /* cgroup evts */
-       int                             nr_branch_stack; /* branch_stack evt */
+       void                            *task_ctx_data; /* pmu specific data */
        struct rcu_head                 rcu_head;
 
        struct delayed_work             orphans_remove;
@@ -540,6 +559,35 @@ struct perf_output_handle {
        int                             page;
 };
 
+#ifdef CONFIG_CGROUP_PERF
+
+/*
+ * perf_cgroup_info keeps track of time_enabled for a cgroup.
+ * This is a per-cpu dynamically allocated data structure.
+ */
+struct perf_cgroup_info {
+       u64                             time;
+       u64                             timestamp;
+};
+
+struct perf_cgroup {
+       struct cgroup_subsys_state      css;
+       struct perf_cgroup_info __percpu *info;
+};
+
+/*
+ * Must ensure cgroup is pinned (css_get) before calling
+ * this function. In other words, we cannot call this function
+ * if there is no cgroup event for the current CPU context.
+ */
+static inline struct perf_cgroup *
+perf_cgroup_from_task(struct task_struct *task)
+{
+       return container_of(task_css(task, perf_event_cgrp_id),
+                           struct perf_cgroup, css);
+}
+#endif /* CONFIG_CGROUP_PERF */
+
 #ifdef CONFIG_PERF_EVENTS
 
 extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
@@ -558,6 +606,8 @@ extern void perf_event_delayed_put(struct task_struct *task);
 extern void perf_event_print_debug(void);
 extern void perf_pmu_disable(struct pmu *pmu);
 extern void perf_pmu_enable(struct pmu *pmu);
+extern void perf_sched_cb_dec(struct pmu *pmu);
+extern void perf_sched_cb_inc(struct pmu *pmu);
 extern int perf_event_task_disable(void);
 extern int perf_event_task_enable(void);
 extern int perf_event_refresh(struct perf_event *event, int refresh);
@@ -731,6 +781,11 @@ static inline void perf_event_task_sched_out(struct task_struct *prev,
                __perf_event_task_sched_out(prev, next);
 }
 
+static inline u64 __perf_event_count(struct perf_event *event)
+{
+       return local64_read(&event->count) + atomic64_read(&event->child_count);
+}
+
 extern void perf_event_mmap(struct vm_area_struct *vma);
 extern struct perf_guest_info_callbacks *perf_guest_cbs;
 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
@@ -800,6 +855,11 @@ static inline bool has_branch_stack(struct perf_event *event)
        return event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK;
 }
 
+static inline bool needs_branch_stack(struct perf_event *event)
+{
+       return event->attr.branch_sample_type != 0;
+}
+
 extern int perf_output_begin(struct perf_output_handle *handle,
                             struct perf_event *event, unsigned int size);
 extern void perf_output_end(struct perf_output_handle *handle);