Merge tag 'gpio-v4.8-1' of git://git.kernel.org/pub/scm/linux/kernel/git/linusw/linux...
[cascardo/linux.git] / kernel / events / callchain.c
1 /*
2  * Performance events callchain code, extracted from core.c:
3  *
4  *  Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5  *  Copyright (C) 2008-2011 Red Hat, Inc., Ingo Molnar
6  *  Copyright (C) 2008-2011 Red Hat, Inc., Peter Zijlstra
7  *  Copyright  ©  2009 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
8  *
9  * For licensing details see kernel-base/COPYING
10  */
11
12 #include <linux/perf_event.h>
13 #include <linux/slab.h>
14 #include "internal.h"
15
16 struct callchain_cpus_entries {
17         struct rcu_head                 rcu_head;
18         struct perf_callchain_entry     *cpu_entries[0];
19 };
20
21 int sysctl_perf_event_max_stack __read_mostly = PERF_MAX_STACK_DEPTH;
22 int sysctl_perf_event_max_contexts_per_stack __read_mostly = PERF_MAX_CONTEXTS_PER_STACK;
23
24 static inline size_t perf_callchain_entry__sizeof(void)
25 {
26         return (sizeof(struct perf_callchain_entry) +
27                 sizeof(__u64) * (sysctl_perf_event_max_stack +
28                                  sysctl_perf_event_max_contexts_per_stack));
29 }
30
31 static DEFINE_PER_CPU(int, callchain_recursion[PERF_NR_CONTEXTS]);
32 static atomic_t nr_callchain_events;
33 static DEFINE_MUTEX(callchain_mutex);
34 static struct callchain_cpus_entries *callchain_cpus_entries;
35
36
37 __weak void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
38                                   struct pt_regs *regs)
39 {
40 }
41
42 __weak void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
43                                 struct pt_regs *regs)
44 {
45 }
46
47 static void release_callchain_buffers_rcu(struct rcu_head *head)
48 {
49         struct callchain_cpus_entries *entries;
50         int cpu;
51
52         entries = container_of(head, struct callchain_cpus_entries, rcu_head);
53
54         for_each_possible_cpu(cpu)
55                 kfree(entries->cpu_entries[cpu]);
56
57         kfree(entries);
58 }
59
60 static void release_callchain_buffers(void)
61 {
62         struct callchain_cpus_entries *entries;
63
64         entries = callchain_cpus_entries;
65         RCU_INIT_POINTER(callchain_cpus_entries, NULL);
66         call_rcu(&entries->rcu_head, release_callchain_buffers_rcu);
67 }
68
69 static int alloc_callchain_buffers(void)
70 {
71         int cpu;
72         int size;
73         struct callchain_cpus_entries *entries;
74
75         /*
76          * We can't use the percpu allocation API for data that can be
77          * accessed from NMI. Use a temporary manual per cpu allocation
78          * until that gets sorted out.
79          */
80         size = offsetof(struct callchain_cpus_entries, cpu_entries[nr_cpu_ids]);
81
82         entries = kzalloc(size, GFP_KERNEL);
83         if (!entries)
84                 return -ENOMEM;
85
86         size = perf_callchain_entry__sizeof() * PERF_NR_CONTEXTS;
87
88         for_each_possible_cpu(cpu) {
89                 entries->cpu_entries[cpu] = kmalloc_node(size, GFP_KERNEL,
90                                                          cpu_to_node(cpu));
91                 if (!entries->cpu_entries[cpu])
92                         goto fail;
93         }
94
95         rcu_assign_pointer(callchain_cpus_entries, entries);
96
97         return 0;
98
99 fail:
100         for_each_possible_cpu(cpu)
101                 kfree(entries->cpu_entries[cpu]);
102         kfree(entries);
103
104         return -ENOMEM;
105 }
106
107 int get_callchain_buffers(int event_max_stack)
108 {
109         int err = 0;
110         int count;
111
112         mutex_lock(&callchain_mutex);
113
114         count = atomic_inc_return(&nr_callchain_events);
115         if (WARN_ON_ONCE(count < 1)) {
116                 err = -EINVAL;
117                 goto exit;
118         }
119
120         if (count > 1) {
121                 /* If the allocation failed, give up */
122                 if (!callchain_cpus_entries)
123                         err = -ENOMEM;
124                 /*
125                  * If requesting per event more than the global cap,
126                  * return a different error to help userspace figure
127                  * this out.
128                  *
129                  * And also do it here so that we have &callchain_mutex held.
130                  */
131                 if (event_max_stack > sysctl_perf_event_max_stack)
132                         err = -EOVERFLOW;
133                 goto exit;
134         }
135
136         err = alloc_callchain_buffers();
137 exit:
138         if (err)
139                 atomic_dec(&nr_callchain_events);
140
141         mutex_unlock(&callchain_mutex);
142
143         return err;
144 }
145
146 void put_callchain_buffers(void)
147 {
148         if (atomic_dec_and_mutex_lock(&nr_callchain_events, &callchain_mutex)) {
149                 release_callchain_buffers();
150                 mutex_unlock(&callchain_mutex);
151         }
152 }
153
154 static struct perf_callchain_entry *get_callchain_entry(int *rctx)
155 {
156         int cpu;
157         struct callchain_cpus_entries *entries;
158
159         *rctx = get_recursion_context(this_cpu_ptr(callchain_recursion));
160         if (*rctx == -1)
161                 return NULL;
162
163         entries = rcu_dereference(callchain_cpus_entries);
164         if (!entries)
165                 return NULL;
166
167         cpu = smp_processor_id();
168
169         return (((void *)entries->cpu_entries[cpu]) +
170                 (*rctx * perf_callchain_entry__sizeof()));
171 }
172
173 static void
174 put_callchain_entry(int rctx)
175 {
176         put_recursion_context(this_cpu_ptr(callchain_recursion), rctx);
177 }
178
179 struct perf_callchain_entry *
180 perf_callchain(struct perf_event *event, struct pt_regs *regs)
181 {
182         bool kernel = !event->attr.exclude_callchain_kernel;
183         bool user   = !event->attr.exclude_callchain_user;
184         /* Disallow cross-task user callchains. */
185         bool crosstask = event->ctx->task && event->ctx->task != current;
186         const u32 max_stack = event->attr.sample_max_stack;
187
188         if (!kernel && !user)
189                 return NULL;
190
191         return get_perf_callchain(regs, 0, kernel, user, max_stack, crosstask, true);
192 }
193
194 struct perf_callchain_entry *
195 get_perf_callchain(struct pt_regs *regs, u32 init_nr, bool kernel, bool user,
196                    u32 max_stack, bool crosstask, bool add_mark)
197 {
198         struct perf_callchain_entry *entry;
199         struct perf_callchain_entry_ctx ctx;
200         int rctx;
201
202         entry = get_callchain_entry(&rctx);
203         if (rctx == -1)
204                 return NULL;
205
206         if (!entry)
207                 goto exit_put;
208
209         ctx.entry     = entry;
210         ctx.max_stack = max_stack;
211         ctx.nr        = entry->nr = init_nr;
212         ctx.contexts       = 0;
213         ctx.contexts_maxed = false;
214
215         if (kernel && !user_mode(regs)) {
216                 if (add_mark)
217                         perf_callchain_store_context(&ctx, PERF_CONTEXT_KERNEL);
218                 perf_callchain_kernel(&ctx, regs);
219         }
220
221         if (user) {
222                 if (!user_mode(regs)) {
223                         if  (current->mm)
224                                 regs = task_pt_regs(current);
225                         else
226                                 regs = NULL;
227                 }
228
229                 if (regs) {
230                         if (crosstask)
231                                 goto exit_put;
232
233                         if (add_mark)
234                                 perf_callchain_store_context(&ctx, PERF_CONTEXT_USER);
235                         perf_callchain_user(&ctx, regs);
236                 }
237         }
238
239 exit_put:
240         put_callchain_entry(rctx);
241
242         return entry;
243 }
244
245 /*
246  * Used for sysctl_perf_event_max_stack and
247  * sysctl_perf_event_max_contexts_per_stack.
248  */
249 int perf_event_max_stack_handler(struct ctl_table *table, int write,
250                                  void __user *buffer, size_t *lenp, loff_t *ppos)
251 {
252         int *value = table->data;
253         int new_value = *value, ret;
254         struct ctl_table new_table = *table;
255
256         new_table.data = &new_value;
257         ret = proc_dointvec_minmax(&new_table, write, buffer, lenp, ppos);
258         if (ret || !write)
259                 return ret;
260
261         mutex_lock(&callchain_mutex);
262         if (atomic_read(&nr_callchain_events))
263                 ret = -EBUSY;
264         else
265                 *value = new_value;
266
267         mutex_unlock(&callchain_mutex);
268
269         return ret;
270 }