Merge branch 'perf/urgent' into perf/core
[cascardo/linux.git] / include / linux / perf_event.h
1 /*
2  * Performance events:
3  *
4  *    Copyright (C) 2008-2009, Thomas Gleixner <tglx@linutronix.de>
5  *    Copyright (C) 2008-2009, Red Hat, Inc., Ingo Molnar
6  *    Copyright (C) 2008-2009, Red Hat, Inc., Peter Zijlstra
7  *
8  * Data type definitions, declarations, prototypes.
9  *
10  *    Started by: Thomas Gleixner and Ingo Molnar
11  *
12  * For licencing details see kernel-base/COPYING
13  */
14 #ifndef _LINUX_PERF_EVENT_H
15 #define _LINUX_PERF_EVENT_H
16
17 #include <linux/types.h>
18 #include <linux/ioctl.h>
19 #include <asm/byteorder.h>
20
21 /*
22  * User-space ABI bits:
23  */
24
25 /*
26  * attr.type
27  */
28 enum perf_type_id {
29         PERF_TYPE_HARDWARE                      = 0,
30         PERF_TYPE_SOFTWARE                      = 1,
31         PERF_TYPE_TRACEPOINT                    = 2,
32         PERF_TYPE_HW_CACHE                      = 3,
33         PERF_TYPE_RAW                           = 4,
34         PERF_TYPE_BREAKPOINT                    = 5,
35
36         PERF_TYPE_MAX,                          /* non-ABI */
37 };
38
39 /*
40  * Generalized performance event event_id types, used by the
41  * attr.event_id parameter of the sys_perf_event_open()
42  * syscall:
43  */
44 enum perf_hw_id {
45         /*
46          * Common hardware events, generalized by the kernel:
47          */
48         PERF_COUNT_HW_CPU_CYCLES                = 0,
49         PERF_COUNT_HW_INSTRUCTIONS              = 1,
50         PERF_COUNT_HW_CACHE_REFERENCES          = 2,
51         PERF_COUNT_HW_CACHE_MISSES              = 3,
52         PERF_COUNT_HW_BRANCH_INSTRUCTIONS       = 4,
53         PERF_COUNT_HW_BRANCH_MISSES             = 5,
54         PERF_COUNT_HW_BUS_CYCLES                = 6,
55
56         PERF_COUNT_HW_MAX,                      /* non-ABI */
57 };
58
59 /*
60  * Generalized hardware cache events:
61  *
62  *       { L1-D, L1-I, LLC, ITLB, DTLB, BPU } x
63  *       { read, write, prefetch } x
64  *       { accesses, misses }
65  */
66 enum perf_hw_cache_id {
67         PERF_COUNT_HW_CACHE_L1D                 = 0,
68         PERF_COUNT_HW_CACHE_L1I                 = 1,
69         PERF_COUNT_HW_CACHE_LL                  = 2,
70         PERF_COUNT_HW_CACHE_DTLB                = 3,
71         PERF_COUNT_HW_CACHE_ITLB                = 4,
72         PERF_COUNT_HW_CACHE_BPU                 = 5,
73
74         PERF_COUNT_HW_CACHE_MAX,                /* non-ABI */
75 };
76
77 enum perf_hw_cache_op_id {
78         PERF_COUNT_HW_CACHE_OP_READ             = 0,
79         PERF_COUNT_HW_CACHE_OP_WRITE            = 1,
80         PERF_COUNT_HW_CACHE_OP_PREFETCH         = 2,
81
82         PERF_COUNT_HW_CACHE_OP_MAX,             /* non-ABI */
83 };
84
85 enum perf_hw_cache_op_result_id {
86         PERF_COUNT_HW_CACHE_RESULT_ACCESS       = 0,
87         PERF_COUNT_HW_CACHE_RESULT_MISS         = 1,
88
89         PERF_COUNT_HW_CACHE_RESULT_MAX,         /* non-ABI */
90 };
91
92 /*
93  * Special "software" events provided by the kernel, even if the hardware
94  * does not support performance events. These events measure various
95  * physical and sw events of the kernel (and allow the profiling of them as
96  * well):
97  */
98 enum perf_sw_ids {
99         PERF_COUNT_SW_CPU_CLOCK                 = 0,
100         PERF_COUNT_SW_TASK_CLOCK                = 1,
101         PERF_COUNT_SW_PAGE_FAULTS               = 2,
102         PERF_COUNT_SW_CONTEXT_SWITCHES          = 3,
103         PERF_COUNT_SW_CPU_MIGRATIONS            = 4,
104         PERF_COUNT_SW_PAGE_FAULTS_MIN           = 5,
105         PERF_COUNT_SW_PAGE_FAULTS_MAJ           = 6,
106         PERF_COUNT_SW_ALIGNMENT_FAULTS          = 7,
107         PERF_COUNT_SW_EMULATION_FAULTS          = 8,
108
109         PERF_COUNT_SW_MAX,                      /* non-ABI */
110 };
111
112 /*
113  * Bits that can be set in attr.sample_type to request information
114  * in the overflow packets.
115  */
116 enum perf_event_sample_format {
117         PERF_SAMPLE_IP                          = 1U << 0,
118         PERF_SAMPLE_TID                         = 1U << 1,
119         PERF_SAMPLE_TIME                        = 1U << 2,
120         PERF_SAMPLE_ADDR                        = 1U << 3,
121         PERF_SAMPLE_READ                        = 1U << 4,
122         PERF_SAMPLE_CALLCHAIN                   = 1U << 5,
123         PERF_SAMPLE_ID                          = 1U << 6,
124         PERF_SAMPLE_CPU                         = 1U << 7,
125         PERF_SAMPLE_PERIOD                      = 1U << 8,
126         PERF_SAMPLE_STREAM_ID                   = 1U << 9,
127         PERF_SAMPLE_RAW                         = 1U << 10,
128
129         PERF_SAMPLE_MAX = 1U << 11,             /* non-ABI */
130 };
131
132 /*
133  * The format of the data returned by read() on a perf event fd,
134  * as specified by attr.read_format:
135  *
136  * struct read_format {
137  *      { u64           value;
138  *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
139  *        { u64         time_running; } && PERF_FORMAT_RUNNING
140  *        { u64         id;           } && PERF_FORMAT_ID
141  *      } && !PERF_FORMAT_GROUP
142  *
143  *      { u64           nr;
144  *        { u64         time_enabled; } && PERF_FORMAT_ENABLED
145  *        { u64         time_running; } && PERF_FORMAT_RUNNING
146  *        { u64         value;
147  *          { u64       id;           } && PERF_FORMAT_ID
148  *        }             cntr[nr];
149  *      } && PERF_FORMAT_GROUP
150  * };
151  */
152 enum perf_event_read_format {
153         PERF_FORMAT_TOTAL_TIME_ENABLED          = 1U << 0,
154         PERF_FORMAT_TOTAL_TIME_RUNNING          = 1U << 1,
155         PERF_FORMAT_ID                          = 1U << 2,
156         PERF_FORMAT_GROUP                       = 1U << 3,
157
158         PERF_FORMAT_MAX = 1U << 4,              /* non-ABI */
159 };
160
161 #define PERF_ATTR_SIZE_VER0     64      /* sizeof first published struct */
162
163 /*
164  * Hardware event_id to monitor via a performance monitoring event:
165  */
166 struct perf_event_attr {
167
168         /*
169          * Major type: hardware/software/tracepoint/etc.
170          */
171         __u32                   type;
172
173         /*
174          * Size of the attr structure, for fwd/bwd compat.
175          */
176         __u32                   size;
177
178         /*
179          * Type specific configuration information.
180          */
181         __u64                   config;
182
183         union {
184                 __u64           sample_period;
185                 __u64           sample_freq;
186         };
187
188         __u64                   sample_type;
189         __u64                   read_format;
190
191         __u64                   disabled       :  1, /* off by default        */
192                                 inherit        :  1, /* children inherit it   */
193                                 pinned         :  1, /* must always be on PMU */
194                                 exclusive      :  1, /* only group on PMU     */
195                                 exclude_user   :  1, /* don't count user      */
196                                 exclude_kernel :  1, /* ditto kernel          */
197                                 exclude_hv     :  1, /* ditto hypervisor      */
198                                 exclude_idle   :  1, /* don't count when idle */
199                                 mmap           :  1, /* include mmap data     */
200                                 comm           :  1, /* include comm data     */
201                                 freq           :  1, /* use freq, not period  */
202                                 inherit_stat   :  1, /* per task counts       */
203                                 enable_on_exec :  1, /* next exec enables     */
204                                 task           :  1, /* trace fork/exit       */
205                                 watermark      :  1, /* wakeup_watermark      */
206                                 /*
207                                  * precise_ip:
208                                  *
209                                  *  0 - SAMPLE_IP can have arbitrary skid
210                                  *  1 - SAMPLE_IP must have constant skid
211                                  *  2 - SAMPLE_IP requested to have 0 skid
212                                  *  3 - SAMPLE_IP must have 0 skid
213                                  *
214                                  *  See also PERF_RECORD_MISC_EXACT_IP
215                                  */
216                                 precise_ip     :  2, /* skid constraint       */
217                                 mmap_data      :  1, /* non-exec mmap data    */
218                                 sample_id_all  :  1, /* sample_type all events */
219
220                                 __reserved_1   : 45;
221
222         union {
223                 __u32           wakeup_events;    /* wakeup every n events */
224                 __u32           wakeup_watermark; /* bytes before wakeup   */
225         };
226
227         __u32                   bp_type;
228         __u64                   bp_addr;
229         __u64                   bp_len;
230 };
231
232 /*
233  * Ioctls that can be done on a perf event fd:
234  */
235 #define PERF_EVENT_IOC_ENABLE           _IO ('$', 0)
236 #define PERF_EVENT_IOC_DISABLE          _IO ('$', 1)
237 #define PERF_EVENT_IOC_REFRESH          _IO ('$', 2)
238 #define PERF_EVENT_IOC_RESET            _IO ('$', 3)
239 #define PERF_EVENT_IOC_PERIOD           _IOW('$', 4, __u64)
240 #define PERF_EVENT_IOC_SET_OUTPUT       _IO ('$', 5)
241 #define PERF_EVENT_IOC_SET_FILTER       _IOW('$', 6, char *)
242
243 enum perf_event_ioc_flags {
244         PERF_IOC_FLAG_GROUP             = 1U << 0,
245 };
246
247 /*
248  * Structure of the page that can be mapped via mmap
249  */
250 struct perf_event_mmap_page {
251         __u32   version;                /* version number of this structure */
252         __u32   compat_version;         /* lowest version this is compat with */
253
254         /*
255          * Bits needed to read the hw events in user-space.
256          *
257          *   u32 seq;
258          *   s64 count;
259          *
260          *   do {
261          *     seq = pc->lock;
262          *
263          *     barrier()
264          *     if (pc->index) {
265          *       count = pmc_read(pc->index - 1);
266          *       count += pc->offset;
267          *     } else
268          *       goto regular_read;
269          *
270          *     barrier();
271          *   } while (pc->lock != seq);
272          *
273          * NOTE: for obvious reason this only works on self-monitoring
274          *       processes.
275          */
276         __u32   lock;                   /* seqlock for synchronization */
277         __u32   index;                  /* hardware event identifier */
278         __s64   offset;                 /* add to hardware event value */
279         __u64   time_enabled;           /* time event active */
280         __u64   time_running;           /* time event on cpu */
281
282                 /*
283                  * Hole for extension of the self monitor capabilities
284                  */
285
286         __u64   __reserved[123];        /* align to 1k */
287
288         /*
289          * Control data for the mmap() data buffer.
290          *
291          * User-space reading the @data_head value should issue an rmb(), on
292          * SMP capable platforms, after reading this value -- see
293          * perf_event_wakeup().
294          *
295          * When the mapping is PROT_WRITE the @data_tail value should be
296          * written by userspace to reflect the last read data. In this case
297          * the kernel will not over-write unread data.
298          */
299         __u64   data_head;              /* head in the data section */
300         __u64   data_tail;              /* user-space written tail */
301 };
302
303 #define PERF_RECORD_MISC_CPUMODE_MASK           (7 << 0)
304 #define PERF_RECORD_MISC_CPUMODE_UNKNOWN        (0 << 0)
305 #define PERF_RECORD_MISC_KERNEL                 (1 << 0)
306 #define PERF_RECORD_MISC_USER                   (2 << 0)
307 #define PERF_RECORD_MISC_HYPERVISOR             (3 << 0)
308 #define PERF_RECORD_MISC_GUEST_KERNEL           (4 << 0)
309 #define PERF_RECORD_MISC_GUEST_USER             (5 << 0)
310
311 /*
312  * Indicates that the content of PERF_SAMPLE_IP points to
313  * the actual instruction that triggered the event. See also
314  * perf_event_attr::precise_ip.
315  */
316 #define PERF_RECORD_MISC_EXACT_IP               (1 << 14)
317 /*
318  * Reserve the last bit to indicate some extended misc field
319  */
320 #define PERF_RECORD_MISC_EXT_RESERVED           (1 << 15)
321
322 struct perf_event_header {
323         __u32   type;
324         __u16   misc;
325         __u16   size;
326 };
327
328 enum perf_event_type {
329
330         /*
331          * If perf_event_attr.sample_id_all is set then all event types will
332          * have the sample_type selected fields related to where/when
333          * (identity) an event took place (TID, TIME, ID, CPU, STREAM_ID)
334          * described in PERF_RECORD_SAMPLE below, it will be stashed just after
335          * the perf_event_header and the fields already present for the existing
336          * fields, i.e. at the end of the payload. That way a newer perf.data
337          * file will be supported by older perf tools, with these new optional
338          * fields being ignored.
339          *
340          * The MMAP events record the PROT_EXEC mappings so that we can
341          * correlate userspace IPs to code. They have the following structure:
342          *
343          * struct {
344          *      struct perf_event_header        header;
345          *
346          *      u32                             pid, tid;
347          *      u64                             addr;
348          *      u64                             len;
349          *      u64                             pgoff;
350          *      char                            filename[];
351          * };
352          */
353         PERF_RECORD_MMAP                        = 1,
354
355         /*
356          * struct {
357          *      struct perf_event_header        header;
358          *      u64                             id;
359          *      u64                             lost;
360          * };
361          */
362         PERF_RECORD_LOST                        = 2,
363
364         /*
365          * struct {
366          *      struct perf_event_header        header;
367          *
368          *      u32                             pid, tid;
369          *      char                            comm[];
370          * };
371          */
372         PERF_RECORD_COMM                        = 3,
373
374         /*
375          * struct {
376          *      struct perf_event_header        header;
377          *      u32                             pid, ppid;
378          *      u32                             tid, ptid;
379          *      u64                             time;
380          * };
381          */
382         PERF_RECORD_EXIT                        = 4,
383
384         /*
385          * struct {
386          *      struct perf_event_header        header;
387          *      u64                             time;
388          *      u64                             id;
389          *      u64                             stream_id;
390          * };
391          */
392         PERF_RECORD_THROTTLE                    = 5,
393         PERF_RECORD_UNTHROTTLE                  = 6,
394
395         /*
396          * struct {
397          *      struct perf_event_header        header;
398          *      u32                             pid, ppid;
399          *      u32                             tid, ptid;
400          *      u64                             time;
401          * };
402          */
403         PERF_RECORD_FORK                        = 7,
404
405         /*
406          * struct {
407          *      struct perf_event_header        header;
408          *      u32                             pid, tid;
409          *
410          *      struct read_format              values;
411          * };
412          */
413         PERF_RECORD_READ                        = 8,
414
415         /*
416          * struct {
417          *      struct perf_event_header        header;
418          *
419          *      { u64                   ip;       } && PERF_SAMPLE_IP
420          *      { u32                   pid, tid; } && PERF_SAMPLE_TID
421          *      { u64                   time;     } && PERF_SAMPLE_TIME
422          *      { u64                   addr;     } && PERF_SAMPLE_ADDR
423          *      { u64                   id;       } && PERF_SAMPLE_ID
424          *      { u64                   stream_id;} && PERF_SAMPLE_STREAM_ID
425          *      { u32                   cpu, res; } && PERF_SAMPLE_CPU
426          *      { u64                   period;   } && PERF_SAMPLE_PERIOD
427          *
428          *      { struct read_format    values;   } && PERF_SAMPLE_READ
429          *
430          *      { u64                   nr,
431          *        u64                   ips[nr];  } && PERF_SAMPLE_CALLCHAIN
432          *
433          *      #
434          *      # The RAW record below is opaque data wrt the ABI
435          *      #
436          *      # That is, the ABI doesn't make any promises wrt to
437          *      # the stability of its content, it may vary depending
438          *      # on event, hardware, kernel version and phase of
439          *      # the moon.
440          *      #
441          *      # In other words, PERF_SAMPLE_RAW contents are not an ABI.
442          *      #
443          *
444          *      { u32                   size;
445          *        char                  data[size];}&& PERF_SAMPLE_RAW
446          * };
447          */
448         PERF_RECORD_SAMPLE                      = 9,
449
450         PERF_RECORD_MAX,                        /* non-ABI */
451 };
452
453 enum perf_callchain_context {
454         PERF_CONTEXT_HV                 = (__u64)-32,
455         PERF_CONTEXT_KERNEL             = (__u64)-128,
456         PERF_CONTEXT_USER               = (__u64)-512,
457
458         PERF_CONTEXT_GUEST              = (__u64)-2048,
459         PERF_CONTEXT_GUEST_KERNEL       = (__u64)-2176,
460         PERF_CONTEXT_GUEST_USER         = (__u64)-2560,
461
462         PERF_CONTEXT_MAX                = (__u64)-4095,
463 };
464
465 #define PERF_FLAG_FD_NO_GROUP   (1U << 0)
466 #define PERF_FLAG_FD_OUTPUT     (1U << 1)
467
468 #ifdef __KERNEL__
469 /*
470  * Kernel-internal data types and definitions:
471  */
472
473 #ifdef CONFIG_PERF_EVENTS
474 # include <asm/perf_event.h>
475 # include <asm/local64.h>
476 #endif
477
478 struct perf_guest_info_callbacks {
479         int (*is_in_guest) (void);
480         int (*is_user_mode) (void);
481         unsigned long (*get_guest_ip) (void);
482 };
483
484 #ifdef CONFIG_HAVE_HW_BREAKPOINT
485 #include <asm/hw_breakpoint.h>
486 #endif
487
488 #include <linux/list.h>
489 #include <linux/mutex.h>
490 #include <linux/rculist.h>
491 #include <linux/rcupdate.h>
492 #include <linux/spinlock.h>
493 #include <linux/hrtimer.h>
494 #include <linux/fs.h>
495 #include <linux/pid_namespace.h>
496 #include <linux/workqueue.h>
497 #include <linux/ftrace.h>
498 #include <linux/cpu.h>
499 #include <linux/irq_work.h>
500 #include <linux/jump_label_ref.h>
501 #include <asm/atomic.h>
502 #include <asm/local.h>
503
504 #define PERF_MAX_STACK_DEPTH            255
505
506 struct perf_callchain_entry {
507         __u64                           nr;
508         __u64                           ip[PERF_MAX_STACK_DEPTH];
509 };
510
511 struct perf_raw_record {
512         u32                             size;
513         void                            *data;
514 };
515
516 struct perf_branch_entry {
517         __u64                           from;
518         __u64                           to;
519         __u64                           flags;
520 };
521
522 struct perf_branch_stack {
523         __u64                           nr;
524         struct perf_branch_entry        entries[0];
525 };
526
527 struct task_struct;
528
529 /**
530  * struct hw_perf_event - performance event hardware details:
531  */
532 struct hw_perf_event {
533 #ifdef CONFIG_PERF_EVENTS
534         union {
535                 struct { /* hardware */
536                         u64             config;
537                         u64             last_tag;
538                         unsigned long   config_base;
539                         unsigned long   event_base;
540                         int             idx;
541                         int             last_cpu;
542                 };
543                 struct { /* software */
544                         struct hrtimer  hrtimer;
545                 };
546 #ifdef CONFIG_HAVE_HW_BREAKPOINT
547                 struct { /* breakpoint */
548                         struct arch_hw_breakpoint       info;
549                         struct list_head                bp_list;
550                         /*
551                          * Crufty hack to avoid the chicken and egg
552                          * problem hw_breakpoint has with context
553                          * creation and event initalization.
554                          */
555                         struct task_struct              *bp_target;
556                 };
557 #endif
558         };
559         int                             state;
560         local64_t                       prev_count;
561         u64                             sample_period;
562         u64                             last_period;
563         local64_t                       period_left;
564         u64                             interrupts;
565
566         u64                             freq_time_stamp;
567         u64                             freq_count_stamp;
568 #endif
569 };
570
571 /*
572  * hw_perf_event::state flags
573  */
574 #define PERF_HES_STOPPED        0x01 /* the counter is stopped */
575 #define PERF_HES_UPTODATE       0x02 /* event->count up-to-date */
576 #define PERF_HES_ARCH           0x04
577
578 struct perf_event;
579
580 /*
581  * Common implementation detail of pmu::{start,commit,cancel}_txn
582  */
583 #define PERF_EVENT_TXN 0x1
584
585 /**
586  * struct pmu - generic performance monitoring unit
587  */
588 struct pmu {
589         struct list_head                entry;
590
591         int * __percpu                  pmu_disable_count;
592         struct perf_cpu_context * __percpu pmu_cpu_context;
593         int                             task_ctx_nr;
594
595         /*
596          * Fully disable/enable this PMU, can be used to protect from the PMI
597          * as well as for lazy/batch writing of the MSRs.
598          */
599         void (*pmu_enable)              (struct pmu *pmu); /* optional */
600         void (*pmu_disable)             (struct pmu *pmu); /* optional */
601
602         /*
603          * Try and initialize the event for this PMU.
604          * Should return -ENOENT when the @event doesn't match this PMU.
605          */
606         int (*event_init)               (struct perf_event *event);
607
608 #define PERF_EF_START   0x01            /* start the counter when adding    */
609 #define PERF_EF_RELOAD  0x02            /* reload the counter when starting */
610 #define PERF_EF_UPDATE  0x04            /* update the counter when stopping */
611
612         /*
613          * Adds/Removes a counter to/from the PMU, can be done inside
614          * a transaction, see the ->*_txn() methods.
615          */
616         int  (*add)                     (struct perf_event *event, int flags);
617         void (*del)                     (struct perf_event *event, int flags);
618
619         /*
620          * Starts/Stops a counter present on the PMU. The PMI handler
621          * should stop the counter when perf_event_overflow() returns
622          * !0. ->start() will be used to continue.
623          */
624         void (*start)                   (struct perf_event *event, int flags);
625         void (*stop)                    (struct perf_event *event, int flags);
626
627         /*
628          * Updates the counter value of the event.
629          */
630         void (*read)                    (struct perf_event *event);
631
632         /*
633          * Group events scheduling is treated as a transaction, add
634          * group events as a whole and perform one schedulability test.
635          * If the test fails, roll back the whole group
636          *
637          * Start the transaction, after this ->add() doesn't need to
638          * do schedulability tests.
639          */
640         void (*start_txn)       (struct pmu *pmu); /* optional */
641         /*
642          * If ->start_txn() disabled the ->add() schedulability test
643          * then ->commit_txn() is required to perform one. On success
644          * the transaction is closed. On error the transaction is kept
645          * open until ->cancel_txn() is called.
646          */
647         int  (*commit_txn)      (struct pmu *pmu); /* optional */
648         /*
649          * Will cancel the transaction, assumes ->del() is called
650          * for each successfull ->add() during the transaction.
651          */
652         void (*cancel_txn)      (struct pmu *pmu); /* optional */
653 };
654
655 /**
656  * enum perf_event_active_state - the states of a event
657  */
658 enum perf_event_active_state {
659         PERF_EVENT_STATE_ERROR          = -2,
660         PERF_EVENT_STATE_OFF            = -1,
661         PERF_EVENT_STATE_INACTIVE       =  0,
662         PERF_EVENT_STATE_ACTIVE         =  1,
663 };
664
665 struct file;
666
667 #define PERF_BUFFER_WRITABLE            0x01
668
669 struct perf_buffer {
670         atomic_t                        refcount;
671         struct rcu_head                 rcu_head;
672 #ifdef CONFIG_PERF_USE_VMALLOC
673         struct work_struct              work;
674         int                             page_order;     /* allocation order  */
675 #endif
676         int                             nr_pages;       /* nr of data pages  */
677         int                             writable;       /* are we writable   */
678
679         atomic_t                        poll;           /* POLL_ for wakeups */
680
681         local_t                         head;           /* write position    */
682         local_t                         nest;           /* nested writers    */
683         local_t                         events;         /* event limit       */
684         local_t                         wakeup;         /* wakeup stamp      */
685         local_t                         lost;           /* nr records lost   */
686
687         long                            watermark;      /* wakeup watermark  */
688
689         struct perf_event_mmap_page     *user_page;
690         void                            *data_pages[0];
691 };
692
693 struct perf_sample_data;
694
695 typedef void (*perf_overflow_handler_t)(struct perf_event *, int,
696                                         struct perf_sample_data *,
697                                         struct pt_regs *regs);
698
699 enum perf_group_flag {
700         PERF_GROUP_SOFTWARE = 0x1,
701 };
702
703 #define SWEVENT_HLIST_BITS      8
704 #define SWEVENT_HLIST_SIZE      (1 << SWEVENT_HLIST_BITS)
705
706 struct swevent_hlist {
707         struct hlist_head       heads[SWEVENT_HLIST_SIZE];
708         struct rcu_head         rcu_head;
709 };
710
711 #define PERF_ATTACH_CONTEXT     0x01
712 #define PERF_ATTACH_GROUP       0x02
713 #define PERF_ATTACH_TASK        0x04
714
715 /**
716  * struct perf_event - performance event kernel representation:
717  */
718 struct perf_event {
719 #ifdef CONFIG_PERF_EVENTS
720         struct list_head                group_entry;
721         struct list_head                event_entry;
722         struct list_head                sibling_list;
723         struct hlist_node               hlist_entry;
724         int                             nr_siblings;
725         int                             group_flags;
726         struct perf_event               *group_leader;
727         struct pmu                      *pmu;
728
729         enum perf_event_active_state    state;
730         unsigned int                    attach_state;
731         local64_t                       count;
732         atomic64_t                      child_count;
733
734         /*
735          * These are the total time in nanoseconds that the event
736          * has been enabled (i.e. eligible to run, and the task has
737          * been scheduled in, if this is a per-task event)
738          * and running (scheduled onto the CPU), respectively.
739          *
740          * They are computed from tstamp_enabled, tstamp_running and
741          * tstamp_stopped when the event is in INACTIVE or ACTIVE state.
742          */
743         u64                             total_time_enabled;
744         u64                             total_time_running;
745
746         /*
747          * These are timestamps used for computing total_time_enabled
748          * and total_time_running when the event is in INACTIVE or
749          * ACTIVE state, measured in nanoseconds from an arbitrary point
750          * in time.
751          * tstamp_enabled: the notional time when the event was enabled
752          * tstamp_running: the notional time when the event was scheduled on
753          * tstamp_stopped: in INACTIVE state, the notional time when the
754          *      event was scheduled off.
755          */
756         u64                             tstamp_enabled;
757         u64                             tstamp_running;
758         u64                             tstamp_stopped;
759
760         /*
761          * timestamp shadows the actual context timing but it can
762          * be safely used in NMI interrupt context. It reflects the
763          * context time as it was when the event was last scheduled in.
764          *
765          * ctx_time already accounts for ctx->timestamp. Therefore to
766          * compute ctx_time for a sample, simply add perf_clock().
767          */
768         u64                             shadow_ctx_time;
769
770         struct perf_event_attr          attr;
771         u16                             header_size;
772         u16                             id_header_size;
773         u16                             read_size;
774         struct hw_perf_event            hw;
775
776         struct perf_event_context       *ctx;
777         struct file                     *filp;
778
779         /*
780          * These accumulate total time (in nanoseconds) that children
781          * events have been enabled and running, respectively.
782          */
783         atomic64_t                      child_total_time_enabled;
784         atomic64_t                      child_total_time_running;
785
786         /*
787          * Protect attach/detach and child_list:
788          */
789         struct mutex                    child_mutex;
790         struct list_head                child_list;
791         struct perf_event               *parent;
792
793         int                             oncpu;
794         int                             cpu;
795
796         struct list_head                owner_entry;
797         struct task_struct              *owner;
798
799         /* mmap bits */
800         struct mutex                    mmap_mutex;
801         atomic_t                        mmap_count;
802         int                             mmap_locked;
803         struct user_struct              *mmap_user;
804         struct perf_buffer              *buffer;
805
806         /* poll related */
807         wait_queue_head_t               waitq;
808         struct fasync_struct            *fasync;
809
810         /* delayed work for NMIs and such */
811         int                             pending_wakeup;
812         int                             pending_kill;
813         int                             pending_disable;
814         struct irq_work                 pending;
815
816         atomic_t                        event_limit;
817
818         void (*destroy)(struct perf_event *);
819         struct rcu_head                 rcu_head;
820
821         struct pid_namespace            *ns;
822         u64                             id;
823
824         perf_overflow_handler_t         overflow_handler;
825
826 #ifdef CONFIG_EVENT_TRACING
827         struct ftrace_event_call        *tp_event;
828         struct event_filter             *filter;
829 #endif
830
831 #endif /* CONFIG_PERF_EVENTS */
832 };
833
834 enum perf_event_context_type {
835         task_context,
836         cpu_context,
837 };
838
839 /**
840  * struct perf_event_context - event context structure
841  *
842  * Used as a container for task events and CPU events as well:
843  */
844 struct perf_event_context {
845         enum perf_event_context_type    type;
846         struct pmu                      *pmu;
847         /*
848          * Protect the states of the events in the list,
849          * nr_active, and the list:
850          */
851         raw_spinlock_t                  lock;
852         /*
853          * Protect the list of events.  Locking either mutex or lock
854          * is sufficient to ensure the list doesn't change; to change
855          * the list you need to lock both the mutex and the spinlock.
856          */
857         struct mutex                    mutex;
858
859         struct list_head                pinned_groups;
860         struct list_head                flexible_groups;
861         struct list_head                event_list;
862         int                             nr_events;
863         int                             nr_active;
864         int                             is_active;
865         int                             nr_stat;
866         int                             rotate_disable;
867         atomic_t                        refcount;
868         struct task_struct              *task;
869
870         /*
871          * Context clock, runs when context enabled.
872          */
873         u64                             time;
874         u64                             timestamp;
875
876         /*
877          * These fields let us detect when two contexts have both
878          * been cloned (inherited) from a common ancestor.
879          */
880         struct perf_event_context       *parent_ctx;
881         u64                             parent_gen;
882         u64                             generation;
883         int                             pin_count;
884         struct rcu_head                 rcu_head;
885 };
886
887 /*
888  * Number of contexts where an event can trigger:
889  *      task, softirq, hardirq, nmi.
890  */
891 #define PERF_NR_CONTEXTS        4
892
893 /**
894  * struct perf_event_cpu_context - per cpu event context structure
895  */
896 struct perf_cpu_context {
897         struct perf_event_context       ctx;
898         struct perf_event_context       *task_ctx;
899         int                             active_oncpu;
900         int                             exclusive;
901         struct list_head                rotation_list;
902         int                             jiffies_interval;
903         struct pmu                      *active_pmu;
904 };
905
906 struct perf_output_handle {
907         struct perf_event               *event;
908         struct perf_buffer              *buffer;
909         unsigned long                   wakeup;
910         unsigned long                   size;
911         void                            *addr;
912         int                             page;
913         int                             nmi;
914         int                             sample;
915 };
916
917 #ifdef CONFIG_PERF_EVENTS
918
919 extern int perf_pmu_register(struct pmu *pmu);
920 extern void perf_pmu_unregister(struct pmu *pmu);
921
922 extern int perf_num_counters(void);
923 extern const char *perf_pmu_name(void);
924 extern void __perf_event_task_sched_in(struct task_struct *task);
925 extern void __perf_event_task_sched_out(struct task_struct *task, struct task_struct *next);
926 extern int perf_event_init_task(struct task_struct *child);
927 extern void perf_event_exit_task(struct task_struct *child);
928 extern void perf_event_free_task(struct task_struct *task);
929 extern void perf_event_delayed_put(struct task_struct *task);
930 extern void perf_event_print_debug(void);
931 extern void perf_pmu_disable(struct pmu *pmu);
932 extern void perf_pmu_enable(struct pmu *pmu);
933 extern int perf_event_task_disable(void);
934 extern int perf_event_task_enable(void);
935 extern void perf_event_update_userpage(struct perf_event *event);
936 extern int perf_event_release_kernel(struct perf_event *event);
937 extern struct perf_event *
938 perf_event_create_kernel_counter(struct perf_event_attr *attr,
939                                 int cpu,
940                                 struct task_struct *task,
941                                 perf_overflow_handler_t callback);
942 extern u64 perf_event_read_value(struct perf_event *event,
943                                  u64 *enabled, u64 *running);
944
945 struct perf_sample_data {
946         u64                             type;
947
948         u64                             ip;
949         struct {
950                 u32     pid;
951                 u32     tid;
952         }                               tid_entry;
953         u64                             time;
954         u64                             addr;
955         u64                             id;
956         u64                             stream_id;
957         struct {
958                 u32     cpu;
959                 u32     reserved;
960         }                               cpu_entry;
961         u64                             period;
962         struct perf_callchain_entry     *callchain;
963         struct perf_raw_record          *raw;
964 };
965
966 static inline
967 void perf_sample_data_init(struct perf_sample_data *data, u64 addr)
968 {
969         data->addr = addr;
970         data->raw  = NULL;
971 }
972
973 extern void perf_output_sample(struct perf_output_handle *handle,
974                                struct perf_event_header *header,
975                                struct perf_sample_data *data,
976                                struct perf_event *event);
977 extern void perf_prepare_sample(struct perf_event_header *header,
978                                 struct perf_sample_data *data,
979                                 struct perf_event *event,
980                                 struct pt_regs *regs);
981
982 extern int perf_event_overflow(struct perf_event *event, int nmi,
983                                  struct perf_sample_data *data,
984                                  struct pt_regs *regs);
985
986 static inline bool is_sampling_event(struct perf_event *event)
987 {
988         return event->attr.sample_period != 0;
989 }
990
991 /*
992  * Return 1 for a software event, 0 for a hardware event
993  */
994 static inline int is_software_event(struct perf_event *event)
995 {
996         return event->pmu->task_ctx_nr == perf_sw_context;
997 }
998
999 extern atomic_t perf_swevent_enabled[PERF_COUNT_SW_MAX];
1000
1001 extern void __perf_sw_event(u32, u64, int, struct pt_regs *, u64);
1002
1003 #ifndef perf_arch_fetch_caller_regs
1004 static inline void
1005 perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip) { }
1006 #endif
1007
1008 /*
1009  * Take a snapshot of the regs. Skip ip and frame pointer to
1010  * the nth caller. We only need a few of the regs:
1011  * - ip for PERF_SAMPLE_IP
1012  * - cs for user_mode() tests
1013  * - bp for callchains
1014  * - eflags, for future purposes, just in case
1015  */
1016 static inline void perf_fetch_caller_regs(struct pt_regs *regs)
1017 {
1018         memset(regs, 0, sizeof(*regs));
1019
1020         perf_arch_fetch_caller_regs(regs, CALLER_ADDR0);
1021 }
1022
1023 static __always_inline void
1024 perf_sw_event(u32 event_id, u64 nr, int nmi, struct pt_regs *regs, u64 addr)
1025 {
1026         struct pt_regs hot_regs;
1027
1028         JUMP_LABEL(&perf_swevent_enabled[event_id], have_event);
1029         return;
1030
1031 have_event:
1032         if (!regs) {
1033                 perf_fetch_caller_regs(&hot_regs);
1034                 regs = &hot_regs;
1035         }
1036         __perf_sw_event(event_id, nr, nmi, regs, addr);
1037 }
1038
1039 extern atomic_t perf_task_events;
1040
1041 static inline void perf_event_task_sched_in(struct task_struct *task)
1042 {
1043         COND_STMT(&perf_task_events, __perf_event_task_sched_in(task));
1044 }
1045
1046 static inline
1047 void perf_event_task_sched_out(struct task_struct *task, struct task_struct *next)
1048 {
1049         perf_sw_event(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 1, NULL, 0);
1050
1051         COND_STMT(&perf_task_events, __perf_event_task_sched_out(task, next));
1052 }
1053
1054 extern void perf_event_mmap(struct vm_area_struct *vma);
1055 extern struct perf_guest_info_callbacks *perf_guest_cbs;
1056 extern int perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1057 extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks *callbacks);
1058
1059 extern void perf_event_comm(struct task_struct *tsk);
1060 extern void perf_event_fork(struct task_struct *tsk);
1061
1062 /* Callchains */
1063 DECLARE_PER_CPU(struct perf_callchain_entry, perf_callchain_entry);
1064
1065 extern void perf_callchain_user(struct perf_callchain_entry *entry,
1066                                 struct pt_regs *regs);
1067 extern void perf_callchain_kernel(struct perf_callchain_entry *entry,
1068                                   struct pt_regs *regs);
1069
1070
1071 static inline void
1072 perf_callchain_store(struct perf_callchain_entry *entry, u64 ip)
1073 {
1074         if (entry->nr < PERF_MAX_STACK_DEPTH)
1075                 entry->ip[entry->nr++] = ip;
1076 }
1077
1078 extern int sysctl_perf_event_paranoid;
1079 extern int sysctl_perf_event_mlock;
1080 extern int sysctl_perf_event_sample_rate;
1081
1082 static inline bool perf_paranoid_tracepoint_raw(void)
1083 {
1084         return sysctl_perf_event_paranoid > -1;
1085 }
1086
1087 static inline bool perf_paranoid_cpu(void)
1088 {
1089         return sysctl_perf_event_paranoid > 0;
1090 }
1091
1092 static inline bool perf_paranoid_kernel(void)
1093 {
1094         return sysctl_perf_event_paranoid > 1;
1095 }
1096
1097 extern void perf_event_init(void);
1098 extern void perf_tp_event(u64 addr, u64 count, void *record,
1099                           int entry_size, struct pt_regs *regs,
1100                           struct hlist_head *head, int rctx);
1101 extern void perf_bp_event(struct perf_event *event, void *data);
1102
1103 #ifndef perf_misc_flags
1104 #define perf_misc_flags(regs)   (user_mode(regs) ? PERF_RECORD_MISC_USER : \
1105                                  PERF_RECORD_MISC_KERNEL)
1106 #define perf_instruction_pointer(regs)  instruction_pointer(regs)
1107 #endif
1108
1109 extern int perf_output_begin(struct perf_output_handle *handle,
1110                              struct perf_event *event, unsigned int size,
1111                              int nmi, int sample);
1112 extern void perf_output_end(struct perf_output_handle *handle);
1113 extern void perf_output_copy(struct perf_output_handle *handle,
1114                              const void *buf, unsigned int len);
1115 extern int perf_swevent_get_recursion_context(void);
1116 extern void perf_swevent_put_recursion_context(int rctx);
1117 extern void perf_event_enable(struct perf_event *event);
1118 extern void perf_event_disable(struct perf_event *event);
1119 extern void perf_event_task_tick(void);
1120 #else
1121 static inline void
1122 perf_event_task_sched_in(struct task_struct *task)                      { }
1123 static inline void
1124 perf_event_task_sched_out(struct task_struct *task,
1125                             struct task_struct *next)                   { }
1126 static inline int perf_event_init_task(struct task_struct *child)       { return 0; }
1127 static inline void perf_event_exit_task(struct task_struct *child)      { }
1128 static inline void perf_event_free_task(struct task_struct *task)       { }
1129 static inline void perf_event_delayed_put(struct task_struct *task)     { }
1130 static inline void perf_event_print_debug(void)                         { }
1131 static inline int perf_event_task_disable(void)                         { return -EINVAL; }
1132 static inline int perf_event_task_enable(void)                          { return -EINVAL; }
1133
1134 static inline void
1135 perf_sw_event(u32 event_id, u64 nr, int nmi,
1136                      struct pt_regs *regs, u64 addr)                    { }
1137 static inline void
1138 perf_bp_event(struct perf_event *event, void *data)                     { }
1139
1140 static inline int perf_register_guest_info_callbacks
1141 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1142 static inline int perf_unregister_guest_info_callbacks
1143 (struct perf_guest_info_callbacks *callbacks) { return 0; }
1144
1145 static inline void perf_event_mmap(struct vm_area_struct *vma)          { }
1146 static inline void perf_event_comm(struct task_struct *tsk)             { }
1147 static inline void perf_event_fork(struct task_struct *tsk)             { }
1148 static inline void perf_event_init(void)                                { }
1149 static inline int  perf_swevent_get_recursion_context(void)             { return -1; }
1150 static inline void perf_swevent_put_recursion_context(int rctx)         { }
1151 static inline void perf_event_enable(struct perf_event *event)          { }
1152 static inline void perf_event_disable(struct perf_event *event)         { }
1153 static inline void perf_event_task_tick(void)                           { }
1154 #endif
1155
1156 #define perf_output_put(handle, x) \
1157         perf_output_copy((handle), &(x), sizeof(x))
1158
1159 /*
1160  * This has to have a higher priority than migration_notifier in sched.c.
1161  */
1162 #define perf_cpu_notifier(fn)                                   \
1163 do {                                                            \
1164         static struct notifier_block fn##_nb __cpuinitdata =    \
1165                 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
1166         fn(&fn##_nb, (unsigned long)CPU_UP_PREPARE,             \
1167                 (void *)(unsigned long)smp_processor_id());     \
1168         fn(&fn##_nb, (unsigned long)CPU_STARTING,               \
1169                 (void *)(unsigned long)smp_processor_id());     \
1170         fn(&fn##_nb, (unsigned long)CPU_ONLINE,                 \
1171                 (void *)(unsigned long)smp_processor_id());     \
1172         register_cpu_notifier(&fn##_nb);                        \
1173 } while (0)
1174
1175 #endif /* __KERNEL__ */
1176 #endif /* _LINUX_PERF_EVENT_H */