Merge branch 'perfcounters-fixes-for-linus' of git://git.kernel.org/pub/scm/linux...
[cascardo/linux.git] / kernel / trace / trace_event_profile.c
1 /*
2  * trace event based perf counter profiling
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include "trace.h"
10
11 /*
12  * We can't use a size but a type in alloc_percpu()
13  * So let's create a dummy type that matches the desired size
14  */
15 typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
16
17 char            *trace_profile_buf;
18 EXPORT_SYMBOL_GPL(trace_profile_buf);
19
20 char            *trace_profile_buf_nmi;
21 EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
22
23 /* Count the events in use (per event id, not per instance) */
24 static int      total_profile_count;
25
26 static int ftrace_profile_enable_event(struct ftrace_event_call *event)
27 {
28         char *buf;
29         int ret = -ENOMEM;
30
31         if (atomic_inc_return(&event->profile_count))
32                 return 0;
33
34         if (!total_profile_count++) {
35                 buf = (char *)alloc_percpu(profile_buf_t);
36                 if (!buf)
37                         goto fail_buf;
38
39                 rcu_assign_pointer(trace_profile_buf, buf);
40
41                 buf = (char *)alloc_percpu(profile_buf_t);
42                 if (!buf)
43                         goto fail_buf_nmi;
44
45                 rcu_assign_pointer(trace_profile_buf_nmi, buf);
46         }
47
48         ret = event->profile_enable();
49         if (!ret)
50                 return 0;
51
52         kfree(trace_profile_buf_nmi);
53 fail_buf_nmi:
54         kfree(trace_profile_buf);
55 fail_buf:
56         total_profile_count--;
57         atomic_dec(&event->profile_count);
58
59         return ret;
60 }
61
62 int ftrace_profile_enable(int event_id)
63 {
64         struct ftrace_event_call *event;
65         int ret = -EINVAL;
66
67         mutex_lock(&event_mutex);
68         list_for_each_entry(event, &ftrace_events, list) {
69                 if (event->id == event_id && event->profile_enable &&
70                     try_module_get(event->mod)) {
71                         ret = ftrace_profile_enable_event(event);
72                         break;
73                 }
74         }
75         mutex_unlock(&event_mutex);
76
77         return ret;
78 }
79
80 static void ftrace_profile_disable_event(struct ftrace_event_call *event)
81 {
82         char *buf, *nmi_buf;
83
84         if (!atomic_add_negative(-1, &event->profile_count))
85                 return;
86
87         event->profile_disable();
88
89         if (!--total_profile_count) {
90                 buf = trace_profile_buf;
91                 rcu_assign_pointer(trace_profile_buf, NULL);
92
93                 nmi_buf = trace_profile_buf_nmi;
94                 rcu_assign_pointer(trace_profile_buf_nmi, NULL);
95
96                 /*
97                  * Ensure every events in profiling have finished before
98                  * releasing the buffers
99                  */
100                 synchronize_sched();
101
102                 free_percpu(buf);
103                 free_percpu(nmi_buf);
104         }
105 }
106
107 void ftrace_profile_disable(int event_id)
108 {
109         struct ftrace_event_call *event;
110
111         mutex_lock(&event_mutex);
112         list_for_each_entry(event, &ftrace_events, list) {
113                 if (event->id == event_id) {
114                         ftrace_profile_disable_event(event);
115                         module_put(event->mod);
116                         break;
117                 }
118         }
119         mutex_unlock(&event_mutex);
120 }