perf: Support overwrite mode for the AUX area
[cascardo/linux.git] / kernel / events / internal.h
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
6
7 /* Buffer handling */
8
9 #define RING_BUFFER_WRITABLE            0x01
10
11 struct ring_buffer {
12         atomic_t                        refcount;
13         struct rcu_head                 rcu_head;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15         struct work_struct              work;
16         int                             page_order;     /* allocation order  */
17 #endif
18         int                             nr_pages;       /* nr of data pages  */
19         int                             overwrite;      /* can overwrite itself */
20
21         atomic_t                        poll;           /* POLL_ for wakeups */
22
23         local_t                         head;           /* write position    */
24         local_t                         nest;           /* nested writers    */
25         local_t                         events;         /* event limit       */
26         local_t                         wakeup;         /* wakeup stamp      */
27         local_t                         lost;           /* nr records lost   */
28
29         long                            watermark;      /* wakeup watermark  */
30         /* poll crap */
31         spinlock_t                      event_lock;
32         struct list_head                event_list;
33
34         atomic_t                        mmap_count;
35         unsigned long                   mmap_locked;
36         struct user_struct              *mmap_user;
37
38         /* AUX area */
39         local_t                         aux_head;
40         local_t                         aux_nest;
41         unsigned long                   aux_pgoff;
42         int                             aux_nr_pages;
43         int                             aux_overwrite;
44         atomic_t                        aux_mmap_count;
45         unsigned long                   aux_mmap_locked;
46         void                            (*free_aux)(void *);
47         atomic_t                        aux_refcount;
48         void                            **aux_pages;
49         void                            *aux_priv;
50
51         struct perf_event_mmap_page     *user_page;
52         void                            *data_pages[0];
53 };
54
55 extern void rb_free(struct ring_buffer *rb);
56 extern struct ring_buffer *
57 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
58 extern void perf_event_wakeup(struct perf_event *event);
59 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
60                         pgoff_t pgoff, int nr_pages, int flags);
61 extern void rb_free_aux(struct ring_buffer *rb);
62 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
63 extern void ring_buffer_put(struct ring_buffer *rb);
64
65 static inline bool rb_has_aux(struct ring_buffer *rb)
66 {
67         return !!rb->aux_nr_pages;
68 }
69
70 void perf_event_aux_event(struct perf_event *event, unsigned long head,
71                           unsigned long size, u64 flags);
72
73 extern void
74 perf_event_header__init_id(struct perf_event_header *header,
75                            struct perf_sample_data *data,
76                            struct perf_event *event);
77 extern void
78 perf_event__output_id_sample(struct perf_event *event,
79                              struct perf_output_handle *handle,
80                              struct perf_sample_data *sample);
81
82 extern struct page *
83 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
84
85 #ifdef CONFIG_PERF_USE_VMALLOC
86 /*
87  * Back perf_mmap() with vmalloc memory.
88  *
89  * Required for architectures that have d-cache aliasing issues.
90  */
91
92 static inline int page_order(struct ring_buffer *rb)
93 {
94         return rb->page_order;
95 }
96
97 #else
98
99 static inline int page_order(struct ring_buffer *rb)
100 {
101         return 0;
102 }
103 #endif
104
105 static inline unsigned long perf_data_size(struct ring_buffer *rb)
106 {
107         return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
108 }
109
110 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
111 {
112         return rb->aux_nr_pages << PAGE_SHIFT;
113 }
114
115 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)                      \
116 static inline unsigned long                                             \
117 func_name(struct perf_output_handle *handle,                            \
118           const void *buf, unsigned long len)                           \
119 {                                                                       \
120         unsigned long size, written;                                    \
121                                                                         \
122         do {                                                            \
123                 size    = min(handle->size, len);                       \
124                 written = memcpy_func(handle->addr, buf, size);         \
125                 written = size - written;                               \
126                                                                         \
127                 len -= written;                                         \
128                 handle->addr += written;                                \
129                 buf += written;                                         \
130                 handle->size -= written;                                \
131                 if (!handle->size) {                                    \
132                         struct ring_buffer *rb = handle->rb;            \
133                                                                         \
134                         handle->page++;                                 \
135                         handle->page &= rb->nr_pages - 1;               \
136                         handle->addr = rb->data_pages[handle->page];    \
137                         handle->size = PAGE_SIZE << page_order(rb);     \
138                 }                                                       \
139         } while (len && written == size);                               \
140                                                                         \
141         return len;                                                     \
142 }
143
144 static inline unsigned long
145 memcpy_common(void *dst, const void *src, unsigned long n)
146 {
147         memcpy(dst, src, n);
148         return 0;
149 }
150
151 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
152
153 static inline unsigned long
154 memcpy_skip(void *dst, const void *src, unsigned long n)
155 {
156         return 0;
157 }
158
159 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
160
161 #ifndef arch_perf_out_copy_user
162 #define arch_perf_out_copy_user arch_perf_out_copy_user
163
164 static inline unsigned long
165 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
166 {
167         unsigned long ret;
168
169         pagefault_disable();
170         ret = __copy_from_user_inatomic(dst, src, n);
171         pagefault_enable();
172
173         return ret;
174 }
175 #endif
176
177 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
178
179 /* Callchain handling */
180 extern struct perf_callchain_entry *
181 perf_callchain(struct perf_event *event, struct pt_regs *regs);
182 extern int get_callchain_buffers(void);
183 extern void put_callchain_buffers(void);
184
185 static inline int get_recursion_context(int *recursion)
186 {
187         int rctx;
188
189         if (in_nmi())
190                 rctx = 3;
191         else if (in_irq())
192                 rctx = 2;
193         else if (in_softirq())
194                 rctx = 1;
195         else
196                 rctx = 0;
197
198         if (recursion[rctx])
199                 return -1;
200
201         recursion[rctx]++;
202         barrier();
203
204         return rctx;
205 }
206
207 static inline void put_recursion_context(int *recursion, int rctx)
208 {
209         barrier();
210         recursion[rctx]--;
211 }
212
213 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
214 static inline bool arch_perf_have_user_stack_dump(void)
215 {
216         return true;
217 }
218
219 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
220 #else
221 static inline bool arch_perf_have_user_stack_dump(void)
222 {
223         return false;
224 }
225
226 #define perf_user_stack_pointer(regs) 0
227 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
228
229 #endif /* _KERNEL_EVENTS_INTERNAL_H */