net/mlx5: E-Switch, Fix error flow memory leak
[cascardo/linux.git] / kernel / trace / trace_event_perf.c
1 /*
2  * trace event based perf event profiling/tracing
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra
5  * Copyright (C) 2009-2010 Frederic Weisbecker <fweisbec@gmail.com>
6  */
7
8 #include <linux/module.h>
9 #include <linux/kprobes.h>
10 #include "trace.h"
11
12 static char __percpu *perf_trace_buf[PERF_NR_CONTEXTS];
13
14 /*
15  * Force it to be aligned to unsigned long to avoid misaligned accesses
16  * suprises
17  */
18 typedef typeof(unsigned long [PERF_MAX_TRACE_SIZE / sizeof(unsigned long)])
19         perf_trace_t;
20
21 /* Count the events in use (per event id, not per instance) */
22 static int      total_ref_count;
23
24 static int perf_trace_event_perm(struct trace_event_call *tp_event,
25                                  struct perf_event *p_event)
26 {
27         if (tp_event->perf_perm) {
28                 int ret = tp_event->perf_perm(tp_event, p_event);
29                 if (ret)
30                         return ret;
31         }
32
33         /*
34          * We checked and allowed to create parent,
35          * allow children without checking.
36          */
37         if (p_event->parent)
38                 return 0;
39
40         /*
41          * It's ok to check current process (owner) permissions in here,
42          * because code below is called only via perf_event_open syscall.
43          */
44
45         /* The ftrace function trace is allowed only for root. */
46         if (ftrace_event_is_function(tp_event)) {
47                 if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
48                         return -EPERM;
49
50                 /*
51                  * We don't allow user space callchains for  function trace
52                  * event, due to issues with page faults while tracing page
53                  * fault handler and its overall trickiness nature.
54                  */
55                 if (!p_event->attr.exclude_callchain_user)
56                         return -EINVAL;
57
58                 /*
59                  * Same reason to disable user stack dump as for user space
60                  * callchains above.
61                  */
62                 if (p_event->attr.sample_type & PERF_SAMPLE_STACK_USER)
63                         return -EINVAL;
64         }
65
66         /* No tracing, just counting, so no obvious leak */
67         if (!(p_event->attr.sample_type & PERF_SAMPLE_RAW))
68                 return 0;
69
70         /* Some events are ok to be traced by non-root users... */
71         if (p_event->attach_state == PERF_ATTACH_TASK) {
72                 if (tp_event->flags & TRACE_EVENT_FL_CAP_ANY)
73                         return 0;
74         }
75
76         /*
77          * ...otherwise raw tracepoint data can be a severe data leak,
78          * only allow root to have these.
79          */
80         if (perf_paranoid_tracepoint_raw() && !capable(CAP_SYS_ADMIN))
81                 return -EPERM;
82
83         return 0;
84 }
85
86 static int perf_trace_event_reg(struct trace_event_call *tp_event,
87                                 struct perf_event *p_event)
88 {
89         struct hlist_head __percpu *list;
90         int ret = -ENOMEM;
91         int cpu;
92
93         p_event->tp_event = tp_event;
94         if (tp_event->perf_refcount++ > 0)
95                 return 0;
96
97         list = alloc_percpu(struct hlist_head);
98         if (!list)
99                 goto fail;
100
101         for_each_possible_cpu(cpu)
102                 INIT_HLIST_HEAD(per_cpu_ptr(list, cpu));
103
104         tp_event->perf_events = list;
105
106         if (!total_ref_count) {
107                 char __percpu *buf;
108                 int i;
109
110                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
111                         buf = (char __percpu *)alloc_percpu(perf_trace_t);
112                         if (!buf)
113                                 goto fail;
114
115                         perf_trace_buf[i] = buf;
116                 }
117         }
118
119         ret = tp_event->class->reg(tp_event, TRACE_REG_PERF_REGISTER, NULL);
120         if (ret)
121                 goto fail;
122
123         total_ref_count++;
124         return 0;
125
126 fail:
127         if (!total_ref_count) {
128                 int i;
129
130                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
131                         free_percpu(perf_trace_buf[i]);
132                         perf_trace_buf[i] = NULL;
133                 }
134         }
135
136         if (!--tp_event->perf_refcount) {
137                 free_percpu(tp_event->perf_events);
138                 tp_event->perf_events = NULL;
139         }
140
141         return ret;
142 }
143
144 static void perf_trace_event_unreg(struct perf_event *p_event)
145 {
146         struct trace_event_call *tp_event = p_event->tp_event;
147         int i;
148
149         if (--tp_event->perf_refcount > 0)
150                 goto out;
151
152         tp_event->class->reg(tp_event, TRACE_REG_PERF_UNREGISTER, NULL);
153
154         /*
155          * Ensure our callback won't be called anymore. The buffers
156          * will be freed after that.
157          */
158         tracepoint_synchronize_unregister();
159
160         free_percpu(tp_event->perf_events);
161         tp_event->perf_events = NULL;
162
163         if (!--total_ref_count) {
164                 for (i = 0; i < PERF_NR_CONTEXTS; i++) {
165                         free_percpu(perf_trace_buf[i]);
166                         perf_trace_buf[i] = NULL;
167                 }
168         }
169 out:
170         module_put(tp_event->mod);
171 }
172
173 static int perf_trace_event_open(struct perf_event *p_event)
174 {
175         struct trace_event_call *tp_event = p_event->tp_event;
176         return tp_event->class->reg(tp_event, TRACE_REG_PERF_OPEN, p_event);
177 }
178
179 static void perf_trace_event_close(struct perf_event *p_event)
180 {
181         struct trace_event_call *tp_event = p_event->tp_event;
182         tp_event->class->reg(tp_event, TRACE_REG_PERF_CLOSE, p_event);
183 }
184
185 static int perf_trace_event_init(struct trace_event_call *tp_event,
186                                  struct perf_event *p_event)
187 {
188         int ret;
189
190         ret = perf_trace_event_perm(tp_event, p_event);
191         if (ret)
192                 return ret;
193
194         ret = perf_trace_event_reg(tp_event, p_event);
195         if (ret)
196                 return ret;
197
198         ret = perf_trace_event_open(p_event);
199         if (ret) {
200                 perf_trace_event_unreg(p_event);
201                 return ret;
202         }
203
204         return 0;
205 }
206
207 int perf_trace_init(struct perf_event *p_event)
208 {
209         struct trace_event_call *tp_event;
210         u64 event_id = p_event->attr.config;
211         int ret = -EINVAL;
212
213         mutex_lock(&event_mutex);
214         list_for_each_entry(tp_event, &ftrace_events, list) {
215                 if (tp_event->event.type == event_id &&
216                     tp_event->class && tp_event->class->reg &&
217                     try_module_get(tp_event->mod)) {
218                         ret = perf_trace_event_init(tp_event, p_event);
219                         if (ret)
220                                 module_put(tp_event->mod);
221                         break;
222                 }
223         }
224         mutex_unlock(&event_mutex);
225
226         return ret;
227 }
228
229 void perf_trace_destroy(struct perf_event *p_event)
230 {
231         mutex_lock(&event_mutex);
232         perf_trace_event_close(p_event);
233         perf_trace_event_unreg(p_event);
234         mutex_unlock(&event_mutex);
235 }
236
237 int perf_trace_add(struct perf_event *p_event, int flags)
238 {
239         struct trace_event_call *tp_event = p_event->tp_event;
240         struct hlist_head __percpu *pcpu_list;
241         struct hlist_head *list;
242
243         pcpu_list = tp_event->perf_events;
244         if (WARN_ON_ONCE(!pcpu_list))
245                 return -EINVAL;
246
247         if (!(flags & PERF_EF_START))
248                 p_event->hw.state = PERF_HES_STOPPED;
249
250         list = this_cpu_ptr(pcpu_list);
251         hlist_add_head_rcu(&p_event->hlist_entry, list);
252
253         return tp_event->class->reg(tp_event, TRACE_REG_PERF_ADD, p_event);
254 }
255
256 void perf_trace_del(struct perf_event *p_event, int flags)
257 {
258         struct trace_event_call *tp_event = p_event->tp_event;
259         hlist_del_rcu(&p_event->hlist_entry);
260         tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
261 }
262
263 void *perf_trace_buf_alloc(int size, struct pt_regs **regs, int *rctxp)
264 {
265         char *raw_data;
266         int rctx;
267
268         BUILD_BUG_ON(PERF_MAX_TRACE_SIZE % sizeof(unsigned long));
269
270         if (WARN_ONCE(size > PERF_MAX_TRACE_SIZE,
271                       "perf buffer not large enough"))
272                 return NULL;
273
274         *rctxp = rctx = perf_swevent_get_recursion_context();
275         if (rctx < 0)
276                 return NULL;
277
278         if (regs)
279                 *regs = this_cpu_ptr(&__perf_regs[rctx]);
280         raw_data = this_cpu_ptr(perf_trace_buf[rctx]);
281
282         /* zero the dead bytes from align to not leak stack to user */
283         memset(&raw_data[size - sizeof(u64)], 0, sizeof(u64));
284         return raw_data;
285 }
286 EXPORT_SYMBOL_GPL(perf_trace_buf_alloc);
287 NOKPROBE_SYMBOL(perf_trace_buf_alloc);
288
289 void perf_trace_buf_update(void *record, u16 type)
290 {
291         struct trace_entry *entry = record;
292         int pc = preempt_count();
293         unsigned long flags;
294
295         local_save_flags(flags);
296         tracing_generic_entry_update(entry, flags, pc);
297         entry->type = type;
298 }
299 NOKPROBE_SYMBOL(perf_trace_buf_update);
300
301 #ifdef CONFIG_FUNCTION_TRACER
302 static void
303 perf_ftrace_function_call(unsigned long ip, unsigned long parent_ip,
304                           struct ftrace_ops *ops, struct pt_regs *pt_regs)
305 {
306         struct ftrace_entry *entry;
307         struct hlist_head *head;
308         struct pt_regs regs;
309         int rctx;
310
311         head = this_cpu_ptr(event_function.perf_events);
312         if (hlist_empty(head))
313                 return;
314
315 #define ENTRY_SIZE (ALIGN(sizeof(struct ftrace_entry) + sizeof(u32), \
316                     sizeof(u64)) - sizeof(u32))
317
318         BUILD_BUG_ON(ENTRY_SIZE > PERF_MAX_TRACE_SIZE);
319
320         memset(&regs, 0, sizeof(regs));
321         perf_fetch_caller_regs(&regs);
322
323         entry = perf_trace_buf_alloc(ENTRY_SIZE, NULL, &rctx);
324         if (!entry)
325                 return;
326
327         entry->ip = ip;
328         entry->parent_ip = parent_ip;
329         perf_trace_buf_submit(entry, ENTRY_SIZE, rctx, TRACE_FN,
330                               1, &regs, head, NULL);
331
332 #undef ENTRY_SIZE
333 }
334
335 static int perf_ftrace_function_register(struct perf_event *event)
336 {
337         struct ftrace_ops *ops = &event->ftrace_ops;
338
339         ops->flags |= FTRACE_OPS_FL_PER_CPU | FTRACE_OPS_FL_RCU;
340         ops->func = perf_ftrace_function_call;
341         return register_ftrace_function(ops);
342 }
343
344 static int perf_ftrace_function_unregister(struct perf_event *event)
345 {
346         struct ftrace_ops *ops = &event->ftrace_ops;
347         int ret = unregister_ftrace_function(ops);
348         ftrace_free_filter(ops);
349         return ret;
350 }
351
352 static void perf_ftrace_function_enable(struct perf_event *event)
353 {
354         ftrace_function_local_enable(&event->ftrace_ops);
355 }
356
357 static void perf_ftrace_function_disable(struct perf_event *event)
358 {
359         ftrace_function_local_disable(&event->ftrace_ops);
360 }
361
362 int perf_ftrace_event_register(struct trace_event_call *call,
363                                enum trace_reg type, void *data)
364 {
365         switch (type) {
366         case TRACE_REG_REGISTER:
367         case TRACE_REG_UNREGISTER:
368                 break;
369         case TRACE_REG_PERF_REGISTER:
370         case TRACE_REG_PERF_UNREGISTER:
371                 return 0;
372         case TRACE_REG_PERF_OPEN:
373                 return perf_ftrace_function_register(data);
374         case TRACE_REG_PERF_CLOSE:
375                 return perf_ftrace_function_unregister(data);
376         case TRACE_REG_PERF_ADD:
377                 perf_ftrace_function_enable(data);
378                 return 0;
379         case TRACE_REG_PERF_DEL:
380                 perf_ftrace_function_disable(data);
381                 return 0;
382         }
383
384         return -EINVAL;
385 }
386 #endif /* CONFIG_FUNCTION_TRACER */