Merge commit 'v2.6.32-rc5' into perf/probes
[cascardo/linux.git] / kernel / trace / trace_event_profile.c
1 /*
2  * trace event based perf counter profiling
3  *
4  * Copyright (C) 2009 Red Hat Inc, Peter Zijlstra <pzijlstr@redhat.com>
5  *
6  */
7
8 #include <linux/module.h>
9 #include "trace.h"
10
11 /*
12  * We can't use a size but a type in alloc_percpu()
13  * So let's create a dummy type that matches the desired size
14  */
15 typedef struct {char buf[FTRACE_MAX_PROFILE_SIZE];} profile_buf_t;
16
17 char            *trace_profile_buf;
18 EXPORT_SYMBOL_GPL(trace_profile_buf);
19
20 char            *trace_profile_buf_nmi;
21 EXPORT_SYMBOL_GPL(trace_profile_buf_nmi);
22
23 /* Count the events in use (per event id, not per instance) */
24 static int      total_profile_count;
25
26 static int ftrace_profile_enable_event(struct ftrace_event_call *event)
27 {
28         char *buf;
29         int ret = -ENOMEM;
30
31         if (atomic_inc_return(&event->profile_count))
32                 return 0;
33
34         if (!total_profile_count) {
35                 buf = (char *)alloc_percpu(profile_buf_t);
36                 if (!buf)
37                         goto fail_buf;
38
39                 rcu_assign_pointer(trace_profile_buf, buf);
40
41                 buf = (char *)alloc_percpu(profile_buf_t);
42                 if (!buf)
43                         goto fail_buf_nmi;
44
45                 rcu_assign_pointer(trace_profile_buf_nmi, buf);
46         }
47
48         ret = event->profile_enable(event);
49         if (!ret) {
50                 total_profile_count++;
51                 return 0;
52         }
53
54 fail_buf_nmi:
55         if (!total_profile_count) {
56                 free_percpu(trace_profile_buf_nmi);
57                 free_percpu(trace_profile_buf);
58                 trace_profile_buf_nmi = NULL;
59                 trace_profile_buf = NULL;
60         }
61 fail_buf:
62         atomic_dec(&event->profile_count);
63
64         return ret;
65 }
66
67 int ftrace_profile_enable(int event_id)
68 {
69         struct ftrace_event_call *event;
70         int ret = -EINVAL;
71
72         mutex_lock(&event_mutex);
73         list_for_each_entry(event, &ftrace_events, list) {
74                 if (event->id == event_id && event->profile_enable &&
75                     try_module_get(event->mod)) {
76                         ret = ftrace_profile_enable_event(event);
77                         break;
78                 }
79         }
80         mutex_unlock(&event_mutex);
81
82         return ret;
83 }
84
85 static void ftrace_profile_disable_event(struct ftrace_event_call *event)
86 {
87         char *buf, *nmi_buf;
88
89         if (!atomic_add_negative(-1, &event->profile_count))
90                 return;
91
92         event->profile_disable(event);
93
94         if (!--total_profile_count) {
95                 buf = trace_profile_buf;
96                 rcu_assign_pointer(trace_profile_buf, NULL);
97
98                 nmi_buf = trace_profile_buf_nmi;
99                 rcu_assign_pointer(trace_profile_buf_nmi, NULL);
100
101                 /*
102                  * Ensure every events in profiling have finished before
103                  * releasing the buffers
104                  */
105                 synchronize_sched();
106
107                 free_percpu(buf);
108                 free_percpu(nmi_buf);
109         }
110 }
111
112 void ftrace_profile_disable(int event_id)
113 {
114         struct ftrace_event_call *event;
115
116         mutex_lock(&event_mutex);
117         list_for_each_entry(event, &ftrace_events, list) {
118                 if (event->id == event_id) {
119                         ftrace_profile_disable_event(event);
120                         module_put(event->mod);
121                         break;
122                 }
123         }
124         mutex_unlock(&event_mutex);
125 }