2417eb5512cda340fdefe0df469cbf668a03d077
[cascardo/linux.git] / kernel / events / internal.h
1 #ifndef _KERNEL_EVENTS_INTERNAL_H
2 #define _KERNEL_EVENTS_INTERNAL_H
3
4 #include <linux/hardirq.h>
5 #include <linux/uaccess.h>
6
7 /* Buffer handling */
8
9 #define RING_BUFFER_WRITABLE            0x01
10
11 struct ring_buffer {
12         atomic_t                        refcount;
13         struct rcu_head                 rcu_head;
14 #ifdef CONFIG_PERF_USE_VMALLOC
15         struct work_struct              work;
16         int                             page_order;     /* allocation order  */
17 #endif
18         int                             nr_pages;       /* nr of data pages  */
19         int                             overwrite;      /* can overwrite itself */
20         int                             paused;         /* can write into ring buffer */
21
22         atomic_t                        poll;           /* POLL_ for wakeups */
23
24         local_t                         head;           /* write position    */
25         local_t                         nest;           /* nested writers    */
26         local_t                         events;         /* event limit       */
27         local_t                         wakeup;         /* wakeup stamp      */
28         local_t                         lost;           /* nr records lost   */
29
30         long                            watermark;      /* wakeup watermark  */
31         long                            aux_watermark;
32         /* poll crap */
33         spinlock_t                      event_lock;
34         struct list_head                event_list;
35
36         atomic_t                        mmap_count;
37         unsigned long                   mmap_locked;
38         struct user_struct              *mmap_user;
39
40         /* AUX area */
41         local_t                         aux_head;
42         local_t                         aux_nest;
43         local_t                         aux_wakeup;
44         unsigned long                   aux_pgoff;
45         int                             aux_nr_pages;
46         int                             aux_overwrite;
47         atomic_t                        aux_mmap_count;
48         unsigned long                   aux_mmap_locked;
49         void                            (*free_aux)(void *);
50         atomic_t                        aux_refcount;
51         void                            **aux_pages;
52         void                            *aux_priv;
53
54         struct perf_event_mmap_page     *user_page;
55         void                            *data_pages[0];
56 };
57
58 extern void rb_free(struct ring_buffer *rb);
59
60 static inline void rb_free_rcu(struct rcu_head *rcu_head)
61 {
62         struct ring_buffer *rb;
63
64         rb = container_of(rcu_head, struct ring_buffer, rcu_head);
65         rb_free(rb);
66 }
67
68 static inline void rb_toggle_paused(struct ring_buffer *rb, bool pause)
69 {
70         if (!pause && rb->nr_pages)
71                 rb->paused = 0;
72         else
73                 rb->paused = 1;
74 }
75
76 extern struct ring_buffer *
77 rb_alloc(int nr_pages, long watermark, int cpu, int flags);
78 extern void perf_event_wakeup(struct perf_event *event);
79 extern int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
80                         pgoff_t pgoff, int nr_pages, long watermark, int flags);
81 extern void rb_free_aux(struct ring_buffer *rb);
82 extern struct ring_buffer *ring_buffer_get(struct perf_event *event);
83 extern void ring_buffer_put(struct ring_buffer *rb);
84
85 static inline bool rb_has_aux(struct ring_buffer *rb)
86 {
87         return !!rb->aux_nr_pages;
88 }
89
90 void perf_event_aux_event(struct perf_event *event, unsigned long head,
91                           unsigned long size, u64 flags);
92
93 extern struct page *
94 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
95
96 #ifdef CONFIG_PERF_USE_VMALLOC
97 /*
98  * Back perf_mmap() with vmalloc memory.
99  *
100  * Required for architectures that have d-cache aliasing issues.
101  */
102
103 static inline int page_order(struct ring_buffer *rb)
104 {
105         return rb->page_order;
106 }
107
108 #else
109
110 static inline int page_order(struct ring_buffer *rb)
111 {
112         return 0;
113 }
114 #endif
115
116 static inline unsigned long perf_data_size(struct ring_buffer *rb)
117 {
118         return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
119 }
120
121 static inline unsigned long perf_aux_size(struct ring_buffer *rb)
122 {
123         return rb->aux_nr_pages << PAGE_SHIFT;
124 }
125
126 #define __DEFINE_OUTPUT_COPY_BODY(memcpy_func)                          \
127 {                                                                       \
128         unsigned long size, written;                                    \
129                                                                         \
130         do {                                                            \
131                 size    = min(handle->size, len);                       \
132                 written = memcpy_func(handle->addr, buf, size);         \
133                 written = size - written;                               \
134                                                                         \
135                 len -= written;                                         \
136                 handle->addr += written;                                \
137                 buf += written;                                         \
138                 handle->size -= written;                                \
139                 if (!handle->size) {                                    \
140                         struct ring_buffer *rb = handle->rb;            \
141                                                                         \
142                         handle->page++;                                 \
143                         handle->page &= rb->nr_pages - 1;               \
144                         handle->addr = rb->data_pages[handle->page];    \
145                         handle->size = PAGE_SIZE << page_order(rb);     \
146                 }                                                       \
147         } while (len && written == size);                               \
148                                                                         \
149         return len;                                                     \
150 }
151
152 #define DEFINE_OUTPUT_COPY(func_name, memcpy_func)                      \
153 static inline unsigned long                                             \
154 func_name(struct perf_output_handle *handle,                            \
155           const void *buf, unsigned long len)                           \
156 __DEFINE_OUTPUT_COPY_BODY(memcpy_func)
157
158 static inline unsigned long
159 __output_custom(struct perf_output_handle *handle, perf_copy_f copy_func,
160                 const void *buf, unsigned long len)
161 __DEFINE_OUTPUT_COPY_BODY(copy_func)
162
163 static inline unsigned long
164 memcpy_common(void *dst, const void *src, unsigned long n)
165 {
166         memcpy(dst, src, n);
167         return 0;
168 }
169
170 DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
171
172 static inline unsigned long
173 memcpy_skip(void *dst, const void *src, unsigned long n)
174 {
175         return 0;
176 }
177
178 DEFINE_OUTPUT_COPY(__output_skip, memcpy_skip)
179
180 #ifndef arch_perf_out_copy_user
181 #define arch_perf_out_copy_user arch_perf_out_copy_user
182
183 static inline unsigned long
184 arch_perf_out_copy_user(void *dst, const void *src, unsigned long n)
185 {
186         unsigned long ret;
187
188         pagefault_disable();
189         ret = __copy_from_user_inatomic(dst, src, n);
190         pagefault_enable();
191
192         return ret;
193 }
194 #endif
195
196 DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
197
198 /* Callchain handling */
199 extern struct perf_callchain_entry *
200 perf_callchain(struct perf_event *event, struct pt_regs *regs);
201
202 static inline int get_recursion_context(int *recursion)
203 {
204         int rctx;
205
206         if (in_nmi())
207                 rctx = 3;
208         else if (in_irq())
209                 rctx = 2;
210         else if (in_softirq())
211                 rctx = 1;
212         else
213                 rctx = 0;
214
215         if (recursion[rctx])
216                 return -1;
217
218         recursion[rctx]++;
219         barrier();
220
221         return rctx;
222 }
223
224 static inline void put_recursion_context(int *recursion, int rctx)
225 {
226         barrier();
227         recursion[rctx]--;
228 }
229
230 #ifdef CONFIG_HAVE_PERF_USER_STACK_DUMP
231 static inline bool arch_perf_have_user_stack_dump(void)
232 {
233         return true;
234 }
235
236 #define perf_user_stack_pointer(regs) user_stack_pointer(regs)
237 #else
238 static inline bool arch_perf_have_user_stack_dump(void)
239 {
240         return false;
241 }
242
243 #define perf_user_stack_pointer(regs) 0
244 #endif /* CONFIG_HAVE_PERF_USER_STACK_DUMP */
245
246 #endif /* _KERNEL_EVENTS_INTERNAL_H */