ASoC: omap-hdmi-audio: Support for DRA7xx family
[cascardo/linux.git] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #define pr_fmt(fmt) fmt
12
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/tracefs.h>
17 #include <linux/uaccess.h>
18 #include <linux/bsearch.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/sort.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24
25 #include <trace/events/sched.h>
26
27 #include <asm/setup.h>
28
29 #include "trace_output.h"
30
31 #undef TRACE_SYSTEM
32 #define TRACE_SYSTEM "TRACE_SYSTEM"
33
34 DEFINE_MUTEX(event_mutex);
35
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_generic_fields);
38 static LIST_HEAD(ftrace_common_fields);
39
40 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41
42 static struct kmem_cache *field_cachep;
43 static struct kmem_cache *file_cachep;
44
45 static inline int system_refcount(struct event_subsystem *system)
46 {
47         return system->ref_count;
48 }
49
50 static int system_refcount_inc(struct event_subsystem *system)
51 {
52         return system->ref_count++;
53 }
54
55 static int system_refcount_dec(struct event_subsystem *system)
56 {
57         return --system->ref_count;
58 }
59
60 /* Double loops, do not use break, only goto's work */
61 #define do_for_each_event_file(tr, file)                        \
62         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
63                 list_for_each_entry(file, &tr->events, list)
64
65 #define do_for_each_event_file_safe(tr, file)                   \
66         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
67                 struct trace_event_file *___n;                          \
68                 list_for_each_entry_safe(file, ___n, &tr->events, list)
69
70 #define while_for_each_event_file()             \
71         }
72
73 static struct list_head *
74 trace_get_fields(struct trace_event_call *event_call)
75 {
76         if (!event_call->class->get_fields)
77                 return &event_call->class->fields;
78         return event_call->class->get_fields(event_call);
79 }
80
81 static struct ftrace_event_field *
82 __find_event_field(struct list_head *head, char *name)
83 {
84         struct ftrace_event_field *field;
85
86         list_for_each_entry(field, head, link) {
87                 if (!strcmp(field->name, name))
88                         return field;
89         }
90
91         return NULL;
92 }
93
94 struct ftrace_event_field *
95 trace_find_event_field(struct trace_event_call *call, char *name)
96 {
97         struct ftrace_event_field *field;
98         struct list_head *head;
99
100         field = __find_event_field(&ftrace_generic_fields, name);
101         if (field)
102                 return field;
103
104         field = __find_event_field(&ftrace_common_fields, name);
105         if (field)
106                 return field;
107
108         head = trace_get_fields(call);
109         return __find_event_field(head, name);
110 }
111
112 static int __trace_define_field(struct list_head *head, const char *type,
113                                 const char *name, int offset, int size,
114                                 int is_signed, int filter_type)
115 {
116         struct ftrace_event_field *field;
117
118         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
119         if (!field)
120                 return -ENOMEM;
121
122         field->name = name;
123         field->type = type;
124
125         if (filter_type == FILTER_OTHER)
126                 field->filter_type = filter_assign_type(type);
127         else
128                 field->filter_type = filter_type;
129
130         field->offset = offset;
131         field->size = size;
132         field->is_signed = is_signed;
133
134         list_add(&field->link, head);
135
136         return 0;
137 }
138
139 int trace_define_field(struct trace_event_call *call, const char *type,
140                        const char *name, int offset, int size, int is_signed,
141                        int filter_type)
142 {
143         struct list_head *head;
144
145         if (WARN_ON(!call->class))
146                 return 0;
147
148         head = trace_get_fields(call);
149         return __trace_define_field(head, type, name, offset, size,
150                                     is_signed, filter_type);
151 }
152 EXPORT_SYMBOL_GPL(trace_define_field);
153
154 #define __generic_field(type, item, filter_type)                        \
155         ret = __trace_define_field(&ftrace_generic_fields, #type,       \
156                                    #item, 0, 0, is_signed_type(type),   \
157                                    filter_type);                        \
158         if (ret)                                                        \
159                 return ret;
160
161 #define __common_field(type, item)                                      \
162         ret = __trace_define_field(&ftrace_common_fields, #type,        \
163                                    "common_" #item,                     \
164                                    offsetof(typeof(ent), item),         \
165                                    sizeof(ent.item),                    \
166                                    is_signed_type(type), FILTER_OTHER); \
167         if (ret)                                                        \
168                 return ret;
169
170 static int trace_define_generic_fields(void)
171 {
172         int ret;
173
174         __generic_field(int, cpu, FILTER_OTHER);
175         __generic_field(char *, comm, FILTER_PTR_STRING);
176
177         return ret;
178 }
179
180 static int trace_define_common_fields(void)
181 {
182         int ret;
183         struct trace_entry ent;
184
185         __common_field(unsigned short, type);
186         __common_field(unsigned char, flags);
187         __common_field(unsigned char, preempt_count);
188         __common_field(int, pid);
189
190         return ret;
191 }
192
193 static void trace_destroy_fields(struct trace_event_call *call)
194 {
195         struct ftrace_event_field *field, *next;
196         struct list_head *head;
197
198         head = trace_get_fields(call);
199         list_for_each_entry_safe(field, next, head, link) {
200                 list_del(&field->link);
201                 kmem_cache_free(field_cachep, field);
202         }
203 }
204
205 int trace_event_raw_init(struct trace_event_call *call)
206 {
207         int id;
208
209         id = register_trace_event(&call->event);
210         if (!id)
211                 return -ENODEV;
212
213         return 0;
214 }
215 EXPORT_SYMBOL_GPL(trace_event_raw_init);
216
217 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
218 {
219         struct trace_array *tr = trace_file->tr;
220         struct trace_array_cpu *data;
221         struct trace_pid_list *pid_list;
222
223         pid_list = rcu_dereference_sched(tr->filtered_pids);
224         if (!pid_list)
225                 return false;
226
227         data = this_cpu_ptr(tr->trace_buffer.data);
228
229         return data->ignore_pid;
230 }
231 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
232
233 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
234                                  struct trace_event_file *trace_file,
235                                  unsigned long len)
236 {
237         struct trace_event_call *event_call = trace_file->event_call;
238
239         if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
240             trace_event_ignore_this_pid(trace_file))
241                 return NULL;
242
243         local_save_flags(fbuffer->flags);
244         fbuffer->pc = preempt_count();
245         fbuffer->trace_file = trace_file;
246
247         fbuffer->event =
248                 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
249                                                 event_call->event.type, len,
250                                                 fbuffer->flags, fbuffer->pc);
251         if (!fbuffer->event)
252                 return NULL;
253
254         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
255         return fbuffer->entry;
256 }
257 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
258
259 static DEFINE_SPINLOCK(tracepoint_iter_lock);
260
261 static void output_printk(struct trace_event_buffer *fbuffer)
262 {
263         struct trace_event_call *event_call;
264         struct trace_event *event;
265         unsigned long flags;
266         struct trace_iterator *iter = tracepoint_print_iter;
267
268         if (!iter)
269                 return;
270
271         event_call = fbuffer->trace_file->event_call;
272         if (!event_call || !event_call->event.funcs ||
273             !event_call->event.funcs->trace)
274                 return;
275
276         event = &fbuffer->trace_file->event_call->event;
277
278         spin_lock_irqsave(&tracepoint_iter_lock, flags);
279         trace_seq_init(&iter->seq);
280         iter->ent = fbuffer->entry;
281         event_call->event.funcs->trace(iter, 0, event);
282         trace_seq_putc(&iter->seq, 0);
283         printk("%s", iter->seq.buffer);
284
285         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
286 }
287
288 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
289 {
290         if (tracepoint_printk)
291                 output_printk(fbuffer);
292
293         event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
294                                     fbuffer->event, fbuffer->entry,
295                                     fbuffer->flags, fbuffer->pc);
296 }
297 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
298
299 int trace_event_reg(struct trace_event_call *call,
300                     enum trace_reg type, void *data)
301 {
302         struct trace_event_file *file = data;
303
304         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
305         switch (type) {
306         case TRACE_REG_REGISTER:
307                 return tracepoint_probe_register(call->tp,
308                                                  call->class->probe,
309                                                  file);
310         case TRACE_REG_UNREGISTER:
311                 tracepoint_probe_unregister(call->tp,
312                                             call->class->probe,
313                                             file);
314                 return 0;
315
316 #ifdef CONFIG_PERF_EVENTS
317         case TRACE_REG_PERF_REGISTER:
318                 return tracepoint_probe_register(call->tp,
319                                                  call->class->perf_probe,
320                                                  call);
321         case TRACE_REG_PERF_UNREGISTER:
322                 tracepoint_probe_unregister(call->tp,
323                                             call->class->perf_probe,
324                                             call);
325                 return 0;
326         case TRACE_REG_PERF_OPEN:
327         case TRACE_REG_PERF_CLOSE:
328         case TRACE_REG_PERF_ADD:
329         case TRACE_REG_PERF_DEL:
330                 return 0;
331 #endif
332         }
333         return 0;
334 }
335 EXPORT_SYMBOL_GPL(trace_event_reg);
336
337 void trace_event_enable_cmd_record(bool enable)
338 {
339         struct trace_event_file *file;
340         struct trace_array *tr;
341
342         mutex_lock(&event_mutex);
343         do_for_each_event_file(tr, file) {
344
345                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
346                         continue;
347
348                 if (enable) {
349                         tracing_start_cmdline_record();
350                         set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
351                 } else {
352                         tracing_stop_cmdline_record();
353                         clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
354                 }
355         } while_for_each_event_file();
356         mutex_unlock(&event_mutex);
357 }
358
359 static int __ftrace_event_enable_disable(struct trace_event_file *file,
360                                          int enable, int soft_disable)
361 {
362         struct trace_event_call *call = file->event_call;
363         struct trace_array *tr = file->tr;
364         int ret = 0;
365         int disable;
366
367         switch (enable) {
368         case 0:
369                 /*
370                  * When soft_disable is set and enable is cleared, the sm_ref
371                  * reference counter is decremented. If it reaches 0, we want
372                  * to clear the SOFT_DISABLED flag but leave the event in the
373                  * state that it was. That is, if the event was enabled and
374                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
375                  * is set we do not want the event to be enabled before we
376                  * clear the bit.
377                  *
378                  * When soft_disable is not set but the SOFT_MODE flag is,
379                  * we do nothing. Do not disable the tracepoint, otherwise
380                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
381                  */
382                 if (soft_disable) {
383                         if (atomic_dec_return(&file->sm_ref) > 0)
384                                 break;
385                         disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
386                         clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
387                 } else
388                         disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
389
390                 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
391                         clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
392                         if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
393                                 tracing_stop_cmdline_record();
394                                 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
395                         }
396                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
397                 }
398                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
399                 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
400                         set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
401                 else
402                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
403                 break;
404         case 1:
405                 /*
406                  * When soft_disable is set and enable is set, we want to
407                  * register the tracepoint for the event, but leave the event
408                  * as is. That means, if the event was already enabled, we do
409                  * nothing (but set SOFT_MODE). If the event is disabled, we
410                  * set SOFT_DISABLED before enabling the event tracepoint, so
411                  * it still seems to be disabled.
412                  */
413                 if (!soft_disable)
414                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
415                 else {
416                         if (atomic_inc_return(&file->sm_ref) > 1)
417                                 break;
418                         set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
419                 }
420
421                 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
422
423                         /* Keep the event disabled, when going to SOFT_MODE. */
424                         if (soft_disable)
425                                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
426
427                         if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
428                                 tracing_start_cmdline_record();
429                                 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
430                         }
431                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
432                         if (ret) {
433                                 tracing_stop_cmdline_record();
434                                 pr_info("event trace: Could not enable event "
435                                         "%s\n", trace_event_name(call));
436                                 break;
437                         }
438                         set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
439
440                         /* WAS_ENABLED gets set but never cleared. */
441                         call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
442                 }
443                 break;
444         }
445
446         return ret;
447 }
448
449 int trace_event_enable_disable(struct trace_event_file *file,
450                                int enable, int soft_disable)
451 {
452         return __ftrace_event_enable_disable(file, enable, soft_disable);
453 }
454
455 static int ftrace_event_enable_disable(struct trace_event_file *file,
456                                        int enable)
457 {
458         return __ftrace_event_enable_disable(file, enable, 0);
459 }
460
461 static void ftrace_clear_events(struct trace_array *tr)
462 {
463         struct trace_event_file *file;
464
465         mutex_lock(&event_mutex);
466         list_for_each_entry(file, &tr->events, list) {
467                 ftrace_event_enable_disable(file, 0);
468         }
469         mutex_unlock(&event_mutex);
470 }
471
472 static int cmp_pid(const void *key, const void *elt)
473 {
474         const pid_t *search_pid = key;
475         const pid_t *pid = elt;
476
477         if (*search_pid == *pid)
478                 return 0;
479         if (*search_pid < *pid)
480                 return -1;
481         return 1;
482 }
483
484 static bool
485 check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
486 {
487         pid_t search_pid;
488         pid_t *pid;
489
490         /*
491          * Return false, because if filtered_pids does not exist,
492          * all pids are good to trace.
493          */
494         if (!filtered_pids)
495                 return false;
496
497         search_pid = task->pid;
498
499         pid = bsearch(&search_pid, filtered_pids->pids,
500                       filtered_pids->nr_pids, sizeof(pid_t),
501                       cmp_pid);
502         if (!pid)
503                 return true;
504
505         return false;
506 }
507
508 static void
509 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
510                     struct task_struct *prev, struct task_struct *next)
511 {
512         struct trace_array *tr = data;
513         struct trace_pid_list *pid_list;
514
515         pid_list = rcu_dereference_sched(tr->filtered_pids);
516
517         this_cpu_write(tr->trace_buffer.data->ignore_pid,
518                        check_ignore_pid(pid_list, prev) &&
519                        check_ignore_pid(pid_list, next));
520 }
521
522 static void
523 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
524                     struct task_struct *prev, struct task_struct *next)
525 {
526         struct trace_array *tr = data;
527         struct trace_pid_list *pid_list;
528
529         pid_list = rcu_dereference_sched(tr->filtered_pids);
530
531         this_cpu_write(tr->trace_buffer.data->ignore_pid,
532                        check_ignore_pid(pid_list, next));
533 }
534
535 static void
536 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
537 {
538         struct trace_array *tr = data;
539         struct trace_pid_list *pid_list;
540
541         /* Nothing to do if we are already tracing */
542         if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
543                 return;
544
545         pid_list = rcu_dereference_sched(tr->filtered_pids);
546
547         this_cpu_write(tr->trace_buffer.data->ignore_pid,
548                        check_ignore_pid(pid_list, task));
549 }
550
551 static void
552 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
553 {
554         struct trace_array *tr = data;
555         struct trace_pid_list *pid_list;
556
557         /* Nothing to do if we are not tracing */
558         if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
559                 return;
560
561         pid_list = rcu_dereference_sched(tr->filtered_pids);
562
563         /* Set tracing if current is enabled */
564         this_cpu_write(tr->trace_buffer.data->ignore_pid,
565                        check_ignore_pid(pid_list, current));
566 }
567
568 static void __ftrace_clear_event_pids(struct trace_array *tr)
569 {
570         struct trace_pid_list *pid_list;
571         struct trace_event_file *file;
572         int cpu;
573
574         pid_list = rcu_dereference_protected(tr->filtered_pids,
575                                              lockdep_is_held(&event_mutex));
576         if (!pid_list)
577                 return;
578
579         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
580         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
581
582         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
583         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
584
585         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
586         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
587
588         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
589         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
590
591         list_for_each_entry(file, &tr->events, list) {
592                 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
593         }
594
595         for_each_possible_cpu(cpu)
596                 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
597
598         rcu_assign_pointer(tr->filtered_pids, NULL);
599
600         /* Wait till all users are no longer using pid filtering */
601         synchronize_sched();
602
603         free_pages((unsigned long)pid_list->pids, pid_list->order);
604         kfree(pid_list);
605 }
606
607 static void ftrace_clear_event_pids(struct trace_array *tr)
608 {
609         mutex_lock(&event_mutex);
610         __ftrace_clear_event_pids(tr);
611         mutex_unlock(&event_mutex);
612 }
613
614 static void __put_system(struct event_subsystem *system)
615 {
616         struct event_filter *filter = system->filter;
617
618         WARN_ON_ONCE(system_refcount(system) == 0);
619         if (system_refcount_dec(system))
620                 return;
621
622         list_del(&system->list);
623
624         if (filter) {
625                 kfree(filter->filter_string);
626                 kfree(filter);
627         }
628         kfree_const(system->name);
629         kfree(system);
630 }
631
632 static void __get_system(struct event_subsystem *system)
633 {
634         WARN_ON_ONCE(system_refcount(system) == 0);
635         system_refcount_inc(system);
636 }
637
638 static void __get_system_dir(struct trace_subsystem_dir *dir)
639 {
640         WARN_ON_ONCE(dir->ref_count == 0);
641         dir->ref_count++;
642         __get_system(dir->subsystem);
643 }
644
645 static void __put_system_dir(struct trace_subsystem_dir *dir)
646 {
647         WARN_ON_ONCE(dir->ref_count == 0);
648         /* If the subsystem is about to be freed, the dir must be too */
649         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
650
651         __put_system(dir->subsystem);
652         if (!--dir->ref_count)
653                 kfree(dir);
654 }
655
656 static void put_system(struct trace_subsystem_dir *dir)
657 {
658         mutex_lock(&event_mutex);
659         __put_system_dir(dir);
660         mutex_unlock(&event_mutex);
661 }
662
663 static void remove_subsystem(struct trace_subsystem_dir *dir)
664 {
665         if (!dir)
666                 return;
667
668         if (!--dir->nr_events) {
669                 tracefs_remove_recursive(dir->entry);
670                 list_del(&dir->list);
671                 __put_system_dir(dir);
672         }
673 }
674
675 static void remove_event_file_dir(struct trace_event_file *file)
676 {
677         struct dentry *dir = file->dir;
678         struct dentry *child;
679
680         if (dir) {
681                 spin_lock(&dir->d_lock);        /* probably unneeded */
682                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
683                         if (d_really_is_positive(child))        /* probably unneeded */
684                                 d_inode(child)->i_private = NULL;
685                 }
686                 spin_unlock(&dir->d_lock);
687
688                 tracefs_remove_recursive(dir);
689         }
690
691         list_del(&file->list);
692         remove_subsystem(file->system);
693         free_event_filter(file->filter);
694         kmem_cache_free(file_cachep, file);
695 }
696
697 /*
698  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
699  */
700 static int
701 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
702                               const char *sub, const char *event, int set)
703 {
704         struct trace_event_file *file;
705         struct trace_event_call *call;
706         const char *name;
707         int ret = -EINVAL;
708
709         list_for_each_entry(file, &tr->events, list) {
710
711                 call = file->event_call;
712                 name = trace_event_name(call);
713
714                 if (!name || !call->class || !call->class->reg)
715                         continue;
716
717                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
718                         continue;
719
720                 if (match &&
721                     strcmp(match, name) != 0 &&
722                     strcmp(match, call->class->system) != 0)
723                         continue;
724
725                 if (sub && strcmp(sub, call->class->system) != 0)
726                         continue;
727
728                 if (event && strcmp(event, name) != 0)
729                         continue;
730
731                 ftrace_event_enable_disable(file, set);
732
733                 ret = 0;
734         }
735
736         return ret;
737 }
738
739 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
740                                   const char *sub, const char *event, int set)
741 {
742         int ret;
743
744         mutex_lock(&event_mutex);
745         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
746         mutex_unlock(&event_mutex);
747
748         return ret;
749 }
750
751 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
752 {
753         char *event = NULL, *sub = NULL, *match;
754         int ret;
755
756         /*
757          * The buf format can be <subsystem>:<event-name>
758          *  *:<event-name> means any event by that name.
759          *  :<event-name> is the same.
760          *
761          *  <subsystem>:* means all events in that subsystem
762          *  <subsystem>: means the same.
763          *
764          *  <name> (no ':') means all events in a subsystem with
765          *  the name <name> or any event that matches <name>
766          */
767
768         match = strsep(&buf, ":");
769         if (buf) {
770                 sub = match;
771                 event = buf;
772                 match = NULL;
773
774                 if (!strlen(sub) || strcmp(sub, "*") == 0)
775                         sub = NULL;
776                 if (!strlen(event) || strcmp(event, "*") == 0)
777                         event = NULL;
778         }
779
780         ret = __ftrace_set_clr_event(tr, match, sub, event, set);
781
782         /* Put back the colon to allow this to be called again */
783         if (buf)
784                 *(buf - 1) = ':';
785
786         return ret;
787 }
788
789 /**
790  * trace_set_clr_event - enable or disable an event
791  * @system: system name to match (NULL for any system)
792  * @event: event name to match (NULL for all events, within system)
793  * @set: 1 to enable, 0 to disable
794  *
795  * This is a way for other parts of the kernel to enable or disable
796  * event recording.
797  *
798  * Returns 0 on success, -EINVAL if the parameters do not match any
799  * registered events.
800  */
801 int trace_set_clr_event(const char *system, const char *event, int set)
802 {
803         struct trace_array *tr = top_trace_array();
804
805         if (!tr)
806                 return -ENODEV;
807
808         return __ftrace_set_clr_event(tr, NULL, system, event, set);
809 }
810 EXPORT_SYMBOL_GPL(trace_set_clr_event);
811
812 /* 128 should be much more than enough */
813 #define EVENT_BUF_SIZE          127
814
815 static ssize_t
816 ftrace_event_write(struct file *file, const char __user *ubuf,
817                    size_t cnt, loff_t *ppos)
818 {
819         struct trace_parser parser;
820         struct seq_file *m = file->private_data;
821         struct trace_array *tr = m->private;
822         ssize_t read, ret;
823
824         if (!cnt)
825                 return 0;
826
827         ret = tracing_update_buffers();
828         if (ret < 0)
829                 return ret;
830
831         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
832                 return -ENOMEM;
833
834         read = trace_get_user(&parser, ubuf, cnt, ppos);
835
836         if (read >= 0 && trace_parser_loaded((&parser))) {
837                 int set = 1;
838
839                 if (*parser.buffer == '!')
840                         set = 0;
841
842                 parser.buffer[parser.idx] = 0;
843
844                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
845                 if (ret)
846                         goto out_put;
847         }
848
849         ret = read;
850
851  out_put:
852         trace_parser_put(&parser);
853
854         return ret;
855 }
856
857 static void *
858 t_next(struct seq_file *m, void *v, loff_t *pos)
859 {
860         struct trace_event_file *file = v;
861         struct trace_event_call *call;
862         struct trace_array *tr = m->private;
863
864         (*pos)++;
865
866         list_for_each_entry_continue(file, &tr->events, list) {
867                 call = file->event_call;
868                 /*
869                  * The ftrace subsystem is for showing formats only.
870                  * They can not be enabled or disabled via the event files.
871                  */
872                 if (call->class && call->class->reg)
873                         return file;
874         }
875
876         return NULL;
877 }
878
879 static void *t_start(struct seq_file *m, loff_t *pos)
880 {
881         struct trace_event_file *file;
882         struct trace_array *tr = m->private;
883         loff_t l;
884
885         mutex_lock(&event_mutex);
886
887         file = list_entry(&tr->events, struct trace_event_file, list);
888         for (l = 0; l <= *pos; ) {
889                 file = t_next(m, file, &l);
890                 if (!file)
891                         break;
892         }
893         return file;
894 }
895
896 static void *
897 s_next(struct seq_file *m, void *v, loff_t *pos)
898 {
899         struct trace_event_file *file = v;
900         struct trace_array *tr = m->private;
901
902         (*pos)++;
903
904         list_for_each_entry_continue(file, &tr->events, list) {
905                 if (file->flags & EVENT_FILE_FL_ENABLED)
906                         return file;
907         }
908
909         return NULL;
910 }
911
912 static void *s_start(struct seq_file *m, loff_t *pos)
913 {
914         struct trace_event_file *file;
915         struct trace_array *tr = m->private;
916         loff_t l;
917
918         mutex_lock(&event_mutex);
919
920         file = list_entry(&tr->events, struct trace_event_file, list);
921         for (l = 0; l <= *pos; ) {
922                 file = s_next(m, file, &l);
923                 if (!file)
924                         break;
925         }
926         return file;
927 }
928
929 static int t_show(struct seq_file *m, void *v)
930 {
931         struct trace_event_file *file = v;
932         struct trace_event_call *call = file->event_call;
933
934         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
935                 seq_printf(m, "%s:", call->class->system);
936         seq_printf(m, "%s\n", trace_event_name(call));
937
938         return 0;
939 }
940
941 static void t_stop(struct seq_file *m, void *p)
942 {
943         mutex_unlock(&event_mutex);
944 }
945
946 static void *p_start(struct seq_file *m, loff_t *pos)
947         __acquires(RCU)
948 {
949         struct trace_pid_list *pid_list;
950         struct trace_array *tr = m->private;
951
952         /*
953          * Grab the mutex, to keep calls to p_next() having the same
954          * tr->filtered_pids as p_start() has.
955          * If we just passed the tr->filtered_pids around, then RCU would
956          * have been enough, but doing that makes things more complex.
957          */
958         mutex_lock(&event_mutex);
959         rcu_read_lock_sched();
960
961         pid_list = rcu_dereference_sched(tr->filtered_pids);
962
963         if (!pid_list || *pos >= pid_list->nr_pids)
964                 return NULL;
965
966         return (void *)&pid_list->pids[*pos];
967 }
968
969 static void p_stop(struct seq_file *m, void *p)
970         __releases(RCU)
971 {
972         rcu_read_unlock_sched();
973         mutex_unlock(&event_mutex);
974 }
975
976 static void *
977 p_next(struct seq_file *m, void *v, loff_t *pos)
978 {
979         struct trace_array *tr = m->private;
980         struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
981
982         (*pos)++;
983
984         if (*pos >= pid_list->nr_pids)
985                 return NULL;
986
987         return (void *)&pid_list->pids[*pos];
988 }
989
990 static int p_show(struct seq_file *m, void *v)
991 {
992         pid_t *pid = v;
993
994         seq_printf(m, "%d\n", *pid);
995         return 0;
996 }
997
998 static ssize_t
999 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1000                   loff_t *ppos)
1001 {
1002         struct trace_event_file *file;
1003         unsigned long flags;
1004         char buf[4] = "0";
1005
1006         mutex_lock(&event_mutex);
1007         file = event_file_data(filp);
1008         if (likely(file))
1009                 flags = file->flags;
1010         mutex_unlock(&event_mutex);
1011
1012         if (!file)
1013                 return -ENODEV;
1014
1015         if (flags & EVENT_FILE_FL_ENABLED &&
1016             !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1017                 strcpy(buf, "1");
1018
1019         if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1020             flags & EVENT_FILE_FL_SOFT_MODE)
1021                 strcat(buf, "*");
1022
1023         strcat(buf, "\n");
1024
1025         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1026 }
1027
1028 static ssize_t
1029 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1030                    loff_t *ppos)
1031 {
1032         struct trace_event_file *file;
1033         unsigned long val;
1034         int ret;
1035
1036         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1037         if (ret)
1038                 return ret;
1039
1040         ret = tracing_update_buffers();
1041         if (ret < 0)
1042                 return ret;
1043
1044         switch (val) {
1045         case 0:
1046         case 1:
1047                 ret = -ENODEV;
1048                 mutex_lock(&event_mutex);
1049                 file = event_file_data(filp);
1050                 if (likely(file))
1051                         ret = ftrace_event_enable_disable(file, val);
1052                 mutex_unlock(&event_mutex);
1053                 break;
1054
1055         default:
1056                 return -EINVAL;
1057         }
1058
1059         *ppos += cnt;
1060
1061         return ret ? ret : cnt;
1062 }
1063
1064 static ssize_t
1065 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1066                    loff_t *ppos)
1067 {
1068         const char set_to_char[4] = { '?', '0', '1', 'X' };
1069         struct trace_subsystem_dir *dir = filp->private_data;
1070         struct event_subsystem *system = dir->subsystem;
1071         struct trace_event_call *call;
1072         struct trace_event_file *file;
1073         struct trace_array *tr = dir->tr;
1074         char buf[2];
1075         int set = 0;
1076         int ret;
1077
1078         mutex_lock(&event_mutex);
1079         list_for_each_entry(file, &tr->events, list) {
1080                 call = file->event_call;
1081                 if (!trace_event_name(call) || !call->class || !call->class->reg)
1082                         continue;
1083
1084                 if (system && strcmp(call->class->system, system->name) != 0)
1085                         continue;
1086
1087                 /*
1088                  * We need to find out if all the events are set
1089                  * or if all events or cleared, or if we have
1090                  * a mixture.
1091                  */
1092                 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1093
1094                 /*
1095                  * If we have a mixture, no need to look further.
1096                  */
1097                 if (set == 3)
1098                         break;
1099         }
1100         mutex_unlock(&event_mutex);
1101
1102         buf[0] = set_to_char[set];
1103         buf[1] = '\n';
1104
1105         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1106
1107         return ret;
1108 }
1109
1110 static ssize_t
1111 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1112                     loff_t *ppos)
1113 {
1114         struct trace_subsystem_dir *dir = filp->private_data;
1115         struct event_subsystem *system = dir->subsystem;
1116         const char *name = NULL;
1117         unsigned long val;
1118         ssize_t ret;
1119
1120         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1121         if (ret)
1122                 return ret;
1123
1124         ret = tracing_update_buffers();
1125         if (ret < 0)
1126                 return ret;
1127
1128         if (val != 0 && val != 1)
1129                 return -EINVAL;
1130
1131         /*
1132          * Opening of "enable" adds a ref count to system,
1133          * so the name is safe to use.
1134          */
1135         if (system)
1136                 name = system->name;
1137
1138         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1139         if (ret)
1140                 goto out;
1141
1142         ret = cnt;
1143
1144 out:
1145         *ppos += cnt;
1146
1147         return ret;
1148 }
1149
1150 enum {
1151         FORMAT_HEADER           = 1,
1152         FORMAT_FIELD_SEPERATOR  = 2,
1153         FORMAT_PRINTFMT         = 3,
1154 };
1155
1156 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1157 {
1158         struct trace_event_call *call = event_file_data(m->private);
1159         struct list_head *common_head = &ftrace_common_fields;
1160         struct list_head *head = trace_get_fields(call);
1161         struct list_head *node = v;
1162
1163         (*pos)++;
1164
1165         switch ((unsigned long)v) {
1166         case FORMAT_HEADER:
1167                 node = common_head;
1168                 break;
1169
1170         case FORMAT_FIELD_SEPERATOR:
1171                 node = head;
1172                 break;
1173
1174         case FORMAT_PRINTFMT:
1175                 /* all done */
1176                 return NULL;
1177         }
1178
1179         node = node->prev;
1180         if (node == common_head)
1181                 return (void *)FORMAT_FIELD_SEPERATOR;
1182         else if (node == head)
1183                 return (void *)FORMAT_PRINTFMT;
1184         else
1185                 return node;
1186 }
1187
1188 static int f_show(struct seq_file *m, void *v)
1189 {
1190         struct trace_event_call *call = event_file_data(m->private);
1191         struct ftrace_event_field *field;
1192         const char *array_descriptor;
1193
1194         switch ((unsigned long)v) {
1195         case FORMAT_HEADER:
1196                 seq_printf(m, "name: %s\n", trace_event_name(call));
1197                 seq_printf(m, "ID: %d\n", call->event.type);
1198                 seq_puts(m, "format:\n");
1199                 return 0;
1200
1201         case FORMAT_FIELD_SEPERATOR:
1202                 seq_putc(m, '\n');
1203                 return 0;
1204
1205         case FORMAT_PRINTFMT:
1206                 seq_printf(m, "\nprint fmt: %s\n",
1207                            call->print_fmt);
1208                 return 0;
1209         }
1210
1211         field = list_entry(v, struct ftrace_event_field, link);
1212         /*
1213          * Smartly shows the array type(except dynamic array).
1214          * Normal:
1215          *      field:TYPE VAR
1216          * If TYPE := TYPE[LEN], it is shown:
1217          *      field:TYPE VAR[LEN]
1218          */
1219         array_descriptor = strchr(field->type, '[');
1220
1221         if (!strncmp(field->type, "__data_loc", 10))
1222                 array_descriptor = NULL;
1223
1224         if (!array_descriptor)
1225                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1226                            field->type, field->name, field->offset,
1227                            field->size, !!field->is_signed);
1228         else
1229                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1230                            (int)(array_descriptor - field->type),
1231                            field->type, field->name,
1232                            array_descriptor, field->offset,
1233                            field->size, !!field->is_signed);
1234
1235         return 0;
1236 }
1237
1238 static void *f_start(struct seq_file *m, loff_t *pos)
1239 {
1240         void *p = (void *)FORMAT_HEADER;
1241         loff_t l = 0;
1242
1243         /* ->stop() is called even if ->start() fails */
1244         mutex_lock(&event_mutex);
1245         if (!event_file_data(m->private))
1246                 return ERR_PTR(-ENODEV);
1247
1248         while (l < *pos && p)
1249                 p = f_next(m, p, &l);
1250
1251         return p;
1252 }
1253
1254 static void f_stop(struct seq_file *m, void *p)
1255 {
1256         mutex_unlock(&event_mutex);
1257 }
1258
1259 static const struct seq_operations trace_format_seq_ops = {
1260         .start          = f_start,
1261         .next           = f_next,
1262         .stop           = f_stop,
1263         .show           = f_show,
1264 };
1265
1266 static int trace_format_open(struct inode *inode, struct file *file)
1267 {
1268         struct seq_file *m;
1269         int ret;
1270
1271         ret = seq_open(file, &trace_format_seq_ops);
1272         if (ret < 0)
1273                 return ret;
1274
1275         m = file->private_data;
1276         m->private = file;
1277
1278         return 0;
1279 }
1280
1281 static ssize_t
1282 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1283 {
1284         int id = (long)event_file_data(filp);
1285         char buf[32];
1286         int len;
1287
1288         if (*ppos)
1289                 return 0;
1290
1291         if (unlikely(!id))
1292                 return -ENODEV;
1293
1294         len = sprintf(buf, "%d\n", id);
1295
1296         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1297 }
1298
1299 static ssize_t
1300 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1301                   loff_t *ppos)
1302 {
1303         struct trace_event_file *file;
1304         struct trace_seq *s;
1305         int r = -ENODEV;
1306
1307         if (*ppos)
1308                 return 0;
1309
1310         s = kmalloc(sizeof(*s), GFP_KERNEL);
1311
1312         if (!s)
1313                 return -ENOMEM;
1314
1315         trace_seq_init(s);
1316
1317         mutex_lock(&event_mutex);
1318         file = event_file_data(filp);
1319         if (file)
1320                 print_event_filter(file, s);
1321         mutex_unlock(&event_mutex);
1322
1323         if (file)
1324                 r = simple_read_from_buffer(ubuf, cnt, ppos,
1325                                             s->buffer, trace_seq_used(s));
1326
1327         kfree(s);
1328
1329         return r;
1330 }
1331
1332 static ssize_t
1333 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1334                    loff_t *ppos)
1335 {
1336         struct trace_event_file *file;
1337         char *buf;
1338         int err = -ENODEV;
1339
1340         if (cnt >= PAGE_SIZE)
1341                 return -EINVAL;
1342
1343         buf = memdup_user_nul(ubuf, cnt);
1344         if (IS_ERR(buf))
1345                 return PTR_ERR(buf);
1346
1347         mutex_lock(&event_mutex);
1348         file = event_file_data(filp);
1349         if (file)
1350                 err = apply_event_filter(file, buf);
1351         mutex_unlock(&event_mutex);
1352
1353         kfree(buf);
1354         if (err < 0)
1355                 return err;
1356
1357         *ppos += cnt;
1358
1359         return cnt;
1360 }
1361
1362 static LIST_HEAD(event_subsystems);
1363
1364 static int subsystem_open(struct inode *inode, struct file *filp)
1365 {
1366         struct event_subsystem *system = NULL;
1367         struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1368         struct trace_array *tr;
1369         int ret;
1370
1371         if (tracing_is_disabled())
1372                 return -ENODEV;
1373
1374         /* Make sure the system still exists */
1375         mutex_lock(&trace_types_lock);
1376         mutex_lock(&event_mutex);
1377         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1378                 list_for_each_entry(dir, &tr->systems, list) {
1379                         if (dir == inode->i_private) {
1380                                 /* Don't open systems with no events */
1381                                 if (dir->nr_events) {
1382                                         __get_system_dir(dir);
1383                                         system = dir->subsystem;
1384                                 }
1385                                 goto exit_loop;
1386                         }
1387                 }
1388         }
1389  exit_loop:
1390         mutex_unlock(&event_mutex);
1391         mutex_unlock(&trace_types_lock);
1392
1393         if (!system)
1394                 return -ENODEV;
1395
1396         /* Some versions of gcc think dir can be uninitialized here */
1397         WARN_ON(!dir);
1398
1399         /* Still need to increment the ref count of the system */
1400         if (trace_array_get(tr) < 0) {
1401                 put_system(dir);
1402                 return -ENODEV;
1403         }
1404
1405         ret = tracing_open_generic(inode, filp);
1406         if (ret < 0) {
1407                 trace_array_put(tr);
1408                 put_system(dir);
1409         }
1410
1411         return ret;
1412 }
1413
1414 static int system_tr_open(struct inode *inode, struct file *filp)
1415 {
1416         struct trace_subsystem_dir *dir;
1417         struct trace_array *tr = inode->i_private;
1418         int ret;
1419
1420         if (tracing_is_disabled())
1421                 return -ENODEV;
1422
1423         if (trace_array_get(tr) < 0)
1424                 return -ENODEV;
1425
1426         /* Make a temporary dir that has no system but points to tr */
1427         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1428         if (!dir) {
1429                 trace_array_put(tr);
1430                 return -ENOMEM;
1431         }
1432
1433         dir->tr = tr;
1434
1435         ret = tracing_open_generic(inode, filp);
1436         if (ret < 0) {
1437                 trace_array_put(tr);
1438                 kfree(dir);
1439                 return ret;
1440         }
1441
1442         filp->private_data = dir;
1443
1444         return 0;
1445 }
1446
1447 static int subsystem_release(struct inode *inode, struct file *file)
1448 {
1449         struct trace_subsystem_dir *dir = file->private_data;
1450
1451         trace_array_put(dir->tr);
1452
1453         /*
1454          * If dir->subsystem is NULL, then this is a temporary
1455          * descriptor that was made for a trace_array to enable
1456          * all subsystems.
1457          */
1458         if (dir->subsystem)
1459                 put_system(dir);
1460         else
1461                 kfree(dir);
1462
1463         return 0;
1464 }
1465
1466 static ssize_t
1467 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1468                       loff_t *ppos)
1469 {
1470         struct trace_subsystem_dir *dir = filp->private_data;
1471         struct event_subsystem *system = dir->subsystem;
1472         struct trace_seq *s;
1473         int r;
1474
1475         if (*ppos)
1476                 return 0;
1477
1478         s = kmalloc(sizeof(*s), GFP_KERNEL);
1479         if (!s)
1480                 return -ENOMEM;
1481
1482         trace_seq_init(s);
1483
1484         print_subsystem_event_filter(system, s);
1485         r = simple_read_from_buffer(ubuf, cnt, ppos,
1486                                     s->buffer, trace_seq_used(s));
1487
1488         kfree(s);
1489
1490         return r;
1491 }
1492
1493 static ssize_t
1494 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1495                        loff_t *ppos)
1496 {
1497         struct trace_subsystem_dir *dir = filp->private_data;
1498         char *buf;
1499         int err;
1500
1501         if (cnt >= PAGE_SIZE)
1502                 return -EINVAL;
1503
1504         buf = memdup_user_nul(ubuf, cnt);
1505         if (IS_ERR(buf))
1506                 return PTR_ERR(buf);
1507
1508         err = apply_subsystem_event_filter(dir, buf);
1509         kfree(buf);
1510         if (err < 0)
1511                 return err;
1512
1513         *ppos += cnt;
1514
1515         return cnt;
1516 }
1517
1518 static ssize_t
1519 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1520 {
1521         int (*func)(struct trace_seq *s) = filp->private_data;
1522         struct trace_seq *s;
1523         int r;
1524
1525         if (*ppos)
1526                 return 0;
1527
1528         s = kmalloc(sizeof(*s), GFP_KERNEL);
1529         if (!s)
1530                 return -ENOMEM;
1531
1532         trace_seq_init(s);
1533
1534         func(s);
1535         r = simple_read_from_buffer(ubuf, cnt, ppos,
1536                                     s->buffer, trace_seq_used(s));
1537
1538         kfree(s);
1539
1540         return r;
1541 }
1542
1543 static int max_pids(struct trace_pid_list *pid_list)
1544 {
1545         return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
1546 }
1547
1548 static void ignore_task_cpu(void *data)
1549 {
1550         struct trace_array *tr = data;
1551         struct trace_pid_list *pid_list;
1552
1553         /*
1554          * This function is called by on_each_cpu() while the
1555          * event_mutex is held.
1556          */
1557         pid_list = rcu_dereference_protected(tr->filtered_pids,
1558                                              mutex_is_locked(&event_mutex));
1559
1560         this_cpu_write(tr->trace_buffer.data->ignore_pid,
1561                        check_ignore_pid(pid_list, current));
1562 }
1563
1564 static ssize_t
1565 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1566                        size_t cnt, loff_t *ppos)
1567 {
1568         struct seq_file *m = filp->private_data;
1569         struct trace_array *tr = m->private;
1570         struct trace_pid_list *filtered_pids = NULL;
1571         struct trace_pid_list *pid_list = NULL;
1572         struct trace_event_file *file;
1573         struct trace_parser parser;
1574         unsigned long val;
1575         loff_t this_pos;
1576         ssize_t read = 0;
1577         ssize_t ret = 0;
1578         pid_t pid;
1579         int i;
1580
1581         if (!cnt)
1582                 return 0;
1583
1584         ret = tracing_update_buffers();
1585         if (ret < 0)
1586                 return ret;
1587
1588         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1589                 return -ENOMEM;
1590
1591         mutex_lock(&event_mutex);
1592         /*
1593          * Load as many pids into the array before doing a
1594          * swap from the tr->filtered_pids to the new list.
1595          */
1596         while (cnt > 0) {
1597
1598                 this_pos = 0;
1599
1600                 ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
1601                 if (ret < 0 || !trace_parser_loaded(&parser))
1602                         break;
1603
1604                 read += ret;
1605                 ubuf += ret;
1606                 cnt -= ret;
1607
1608                 parser.buffer[parser.idx] = 0;
1609
1610                 ret = -EINVAL;
1611                 if (kstrtoul(parser.buffer, 0, &val))
1612                         break;
1613                 if (val > INT_MAX)
1614                         break;
1615
1616                 pid = (pid_t)val;
1617
1618                 ret = -ENOMEM;
1619                 if (!pid_list) {
1620                         pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
1621                         if (!pid_list)
1622                                 break;
1623
1624                         filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1625                                                         lockdep_is_held(&event_mutex));
1626                         if (filtered_pids)
1627                                 pid_list->order = filtered_pids->order;
1628                         else
1629                                 pid_list->order = 0;
1630
1631                         pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
1632                                                                   pid_list->order);
1633                         if (!pid_list->pids)
1634                                 break;
1635
1636                         if (filtered_pids) {
1637                                 pid_list->nr_pids = filtered_pids->nr_pids;
1638                                 memcpy(pid_list->pids, filtered_pids->pids,
1639                                        pid_list->nr_pids * sizeof(pid_t));
1640                         } else
1641                                 pid_list->nr_pids = 0;
1642                 }
1643
1644                 if (pid_list->nr_pids >= max_pids(pid_list)) {
1645                         pid_t *pid_page;
1646
1647                         pid_page = (void *)__get_free_pages(GFP_KERNEL,
1648                                                             pid_list->order + 1);
1649                         if (!pid_page)
1650                                 break;
1651                         memcpy(pid_page, pid_list->pids,
1652                                pid_list->nr_pids * sizeof(pid_t));
1653                         free_pages((unsigned long)pid_list->pids, pid_list->order);
1654
1655                         pid_list->order++;
1656                         pid_list->pids = pid_page;
1657                 }
1658
1659                 pid_list->pids[pid_list->nr_pids++] = pid;
1660                 trace_parser_clear(&parser);
1661                 ret = 0;
1662         }
1663         trace_parser_put(&parser);
1664
1665         if (ret < 0) {
1666                 if (pid_list)
1667                         free_pages((unsigned long)pid_list->pids, pid_list->order);
1668                 kfree(pid_list);
1669                 mutex_unlock(&event_mutex);
1670                 return ret;
1671         }
1672
1673         if (!pid_list) {
1674                 mutex_unlock(&event_mutex);
1675                 return ret;
1676         }
1677
1678         sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
1679
1680         /* Remove duplicates */
1681         for (i = 1; i < pid_list->nr_pids; i++) {
1682                 int start = i;
1683
1684                 while (i < pid_list->nr_pids &&
1685                        pid_list->pids[i - 1] == pid_list->pids[i])
1686                         i++;
1687
1688                 if (start != i) {
1689                         if (i < pid_list->nr_pids) {
1690                                 memmove(&pid_list->pids[start], &pid_list->pids[i],
1691                                         (pid_list->nr_pids - i) * sizeof(pid_t));
1692                                 pid_list->nr_pids -= i - start;
1693                                 i = start;
1694                         } else
1695                                 pid_list->nr_pids = start;
1696                 }
1697         }
1698
1699         rcu_assign_pointer(tr->filtered_pids, pid_list);
1700
1701         list_for_each_entry(file, &tr->events, list) {
1702                 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1703         }
1704
1705         if (filtered_pids) {
1706                 synchronize_sched();
1707
1708                 free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
1709                 kfree(filtered_pids);
1710         } else {
1711                 /*
1712                  * Register a probe that is called before all other probes
1713                  * to set ignore_pid if next or prev do not match.
1714                  * Register a probe this is called after all other probes
1715                  * to only keep ignore_pid set if next pid matches.
1716                  */
1717                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1718                                                  tr, INT_MAX);
1719                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1720                                                  tr, 0);
1721
1722                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1723                                                  tr, INT_MAX);
1724                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1725                                                  tr, 0);
1726
1727                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1728                                                      tr, INT_MAX);
1729                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1730                                                      tr, 0);
1731
1732                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1733                                                  tr, INT_MAX);
1734                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1735                                                  tr, 0);
1736         }
1737
1738         /*
1739          * Ignoring of pids is done at task switch. But we have to
1740          * check for those tasks that are currently running.
1741          * Always do this in case a pid was appended or removed.
1742          */
1743         on_each_cpu(ignore_task_cpu, tr, 1);
1744
1745         mutex_unlock(&event_mutex);
1746
1747         ret = read;
1748         *ppos += read;
1749
1750         return ret;
1751 }
1752
1753 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1754 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1755 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1756 static int ftrace_event_release(struct inode *inode, struct file *file);
1757
1758 static const struct seq_operations show_event_seq_ops = {
1759         .start = t_start,
1760         .next = t_next,
1761         .show = t_show,
1762         .stop = t_stop,
1763 };
1764
1765 static const struct seq_operations show_set_event_seq_ops = {
1766         .start = s_start,
1767         .next = s_next,
1768         .show = t_show,
1769         .stop = t_stop,
1770 };
1771
1772 static const struct seq_operations show_set_pid_seq_ops = {
1773         .start = p_start,
1774         .next = p_next,
1775         .show = p_show,
1776         .stop = p_stop,
1777 };
1778
1779 static const struct file_operations ftrace_avail_fops = {
1780         .open = ftrace_event_avail_open,
1781         .read = seq_read,
1782         .llseek = seq_lseek,
1783         .release = seq_release,
1784 };
1785
1786 static const struct file_operations ftrace_set_event_fops = {
1787         .open = ftrace_event_set_open,
1788         .read = seq_read,
1789         .write = ftrace_event_write,
1790         .llseek = seq_lseek,
1791         .release = ftrace_event_release,
1792 };
1793
1794 static const struct file_operations ftrace_set_event_pid_fops = {
1795         .open = ftrace_event_set_pid_open,
1796         .read = seq_read,
1797         .write = ftrace_event_pid_write,
1798         .llseek = seq_lseek,
1799         .release = ftrace_event_release,
1800 };
1801
1802 static const struct file_operations ftrace_enable_fops = {
1803         .open = tracing_open_generic,
1804         .read = event_enable_read,
1805         .write = event_enable_write,
1806         .llseek = default_llseek,
1807 };
1808
1809 static const struct file_operations ftrace_event_format_fops = {
1810         .open = trace_format_open,
1811         .read = seq_read,
1812         .llseek = seq_lseek,
1813         .release = seq_release,
1814 };
1815
1816 static const struct file_operations ftrace_event_id_fops = {
1817         .read = event_id_read,
1818         .llseek = default_llseek,
1819 };
1820
1821 static const struct file_operations ftrace_event_filter_fops = {
1822         .open = tracing_open_generic,
1823         .read = event_filter_read,
1824         .write = event_filter_write,
1825         .llseek = default_llseek,
1826 };
1827
1828 static const struct file_operations ftrace_subsystem_filter_fops = {
1829         .open = subsystem_open,
1830         .read = subsystem_filter_read,
1831         .write = subsystem_filter_write,
1832         .llseek = default_llseek,
1833         .release = subsystem_release,
1834 };
1835
1836 static const struct file_operations ftrace_system_enable_fops = {
1837         .open = subsystem_open,
1838         .read = system_enable_read,
1839         .write = system_enable_write,
1840         .llseek = default_llseek,
1841         .release = subsystem_release,
1842 };
1843
1844 static const struct file_operations ftrace_tr_enable_fops = {
1845         .open = system_tr_open,
1846         .read = system_enable_read,
1847         .write = system_enable_write,
1848         .llseek = default_llseek,
1849         .release = subsystem_release,
1850 };
1851
1852 static const struct file_operations ftrace_show_header_fops = {
1853         .open = tracing_open_generic,
1854         .read = show_header,
1855         .llseek = default_llseek,
1856 };
1857
1858 static int
1859 ftrace_event_open(struct inode *inode, struct file *file,
1860                   const struct seq_operations *seq_ops)
1861 {
1862         struct seq_file *m;
1863         int ret;
1864
1865         ret = seq_open(file, seq_ops);
1866         if (ret < 0)
1867                 return ret;
1868         m = file->private_data;
1869         /* copy tr over to seq ops */
1870         m->private = inode->i_private;
1871
1872         return ret;
1873 }
1874
1875 static int ftrace_event_release(struct inode *inode, struct file *file)
1876 {
1877         struct trace_array *tr = inode->i_private;
1878
1879         trace_array_put(tr);
1880
1881         return seq_release(inode, file);
1882 }
1883
1884 static int
1885 ftrace_event_avail_open(struct inode *inode, struct file *file)
1886 {
1887         const struct seq_operations *seq_ops = &show_event_seq_ops;
1888
1889         return ftrace_event_open(inode, file, seq_ops);
1890 }
1891
1892 static int
1893 ftrace_event_set_open(struct inode *inode, struct file *file)
1894 {
1895         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1896         struct trace_array *tr = inode->i_private;
1897         int ret;
1898
1899         if (trace_array_get(tr) < 0)
1900                 return -ENODEV;
1901
1902         if ((file->f_mode & FMODE_WRITE) &&
1903             (file->f_flags & O_TRUNC))
1904                 ftrace_clear_events(tr);
1905
1906         ret = ftrace_event_open(inode, file, seq_ops);
1907         if (ret < 0)
1908                 trace_array_put(tr);
1909         return ret;
1910 }
1911
1912 static int
1913 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1914 {
1915         const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1916         struct trace_array *tr = inode->i_private;
1917         int ret;
1918
1919         if (trace_array_get(tr) < 0)
1920                 return -ENODEV;
1921
1922         if ((file->f_mode & FMODE_WRITE) &&
1923             (file->f_flags & O_TRUNC))
1924                 ftrace_clear_event_pids(tr);
1925
1926         ret = ftrace_event_open(inode, file, seq_ops);
1927         if (ret < 0)
1928                 trace_array_put(tr);
1929         return ret;
1930 }
1931
1932 static struct event_subsystem *
1933 create_new_subsystem(const char *name)
1934 {
1935         struct event_subsystem *system;
1936
1937         /* need to create new entry */
1938         system = kmalloc(sizeof(*system), GFP_KERNEL);
1939         if (!system)
1940                 return NULL;
1941
1942         system->ref_count = 1;
1943
1944         /* Only allocate if dynamic (kprobes and modules) */
1945         system->name = kstrdup_const(name, GFP_KERNEL);
1946         if (!system->name)
1947                 goto out_free;
1948
1949         system->filter = NULL;
1950
1951         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1952         if (!system->filter)
1953                 goto out_free;
1954
1955         list_add(&system->list, &event_subsystems);
1956
1957         return system;
1958
1959  out_free:
1960         kfree_const(system->name);
1961         kfree(system);
1962         return NULL;
1963 }
1964
1965 static struct dentry *
1966 event_subsystem_dir(struct trace_array *tr, const char *name,
1967                     struct trace_event_file *file, struct dentry *parent)
1968 {
1969         struct trace_subsystem_dir *dir;
1970         struct event_subsystem *system;
1971         struct dentry *entry;
1972
1973         /* First see if we did not already create this dir */
1974         list_for_each_entry(dir, &tr->systems, list) {
1975                 system = dir->subsystem;
1976                 if (strcmp(system->name, name) == 0) {
1977                         dir->nr_events++;
1978                         file->system = dir;
1979                         return dir->entry;
1980                 }
1981         }
1982
1983         /* Now see if the system itself exists. */
1984         list_for_each_entry(system, &event_subsystems, list) {
1985                 if (strcmp(system->name, name) == 0)
1986                         break;
1987         }
1988         /* Reset system variable when not found */
1989         if (&system->list == &event_subsystems)
1990                 system = NULL;
1991
1992         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1993         if (!dir)
1994                 goto out_fail;
1995
1996         if (!system) {
1997                 system = create_new_subsystem(name);
1998                 if (!system)
1999                         goto out_free;
2000         } else
2001                 __get_system(system);
2002
2003         dir->entry = tracefs_create_dir(name, parent);
2004         if (!dir->entry) {
2005                 pr_warn("Failed to create system directory %s\n", name);
2006                 __put_system(system);
2007                 goto out_free;
2008         }
2009
2010         dir->tr = tr;
2011         dir->ref_count = 1;
2012         dir->nr_events = 1;
2013         dir->subsystem = system;
2014         file->system = dir;
2015
2016         entry = tracefs_create_file("filter", 0644, dir->entry, dir,
2017                                     &ftrace_subsystem_filter_fops);
2018         if (!entry) {
2019                 kfree(system->filter);
2020                 system->filter = NULL;
2021                 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
2022         }
2023
2024         trace_create_file("enable", 0644, dir->entry, dir,
2025                           &ftrace_system_enable_fops);
2026
2027         list_add(&dir->list, &tr->systems);
2028
2029         return dir->entry;
2030
2031  out_free:
2032         kfree(dir);
2033  out_fail:
2034         /* Only print this message if failed on memory allocation */
2035         if (!dir || !system)
2036                 pr_warn("No memory to create event subsystem %s\n", name);
2037         return NULL;
2038 }
2039
2040 static int
2041 event_create_dir(struct dentry *parent, struct trace_event_file *file)
2042 {
2043         struct trace_event_call *call = file->event_call;
2044         struct trace_array *tr = file->tr;
2045         struct list_head *head;
2046         struct dentry *d_events;
2047         const char *name;
2048         int ret;
2049
2050         /*
2051          * If the trace point header did not define TRACE_SYSTEM
2052          * then the system would be called "TRACE_SYSTEM".
2053          */
2054         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
2055                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
2056                 if (!d_events)
2057                         return -ENOMEM;
2058         } else
2059                 d_events = parent;
2060
2061         name = trace_event_name(call);
2062         file->dir = tracefs_create_dir(name, d_events);
2063         if (!file->dir) {
2064                 pr_warn("Could not create tracefs '%s' directory\n", name);
2065                 return -1;
2066         }
2067
2068         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2069                 trace_create_file("enable", 0644, file->dir, file,
2070                                   &ftrace_enable_fops);
2071
2072 #ifdef CONFIG_PERF_EVENTS
2073         if (call->event.type && call->class->reg)
2074                 trace_create_file("id", 0444, file->dir,
2075                                   (void *)(long)call->event.type,
2076                                   &ftrace_event_id_fops);
2077 #endif
2078
2079         /*
2080          * Other events may have the same class. Only update
2081          * the fields if they are not already defined.
2082          */
2083         head = trace_get_fields(call);
2084         if (list_empty(head)) {
2085                 ret = call->class->define_fields(call);
2086                 if (ret < 0) {
2087                         pr_warn("Could not initialize trace point events/%s\n",
2088                                 name);
2089                         return -1;
2090                 }
2091         }
2092         trace_create_file("filter", 0644, file->dir, file,
2093                           &ftrace_event_filter_fops);
2094
2095         trace_create_file("trigger", 0644, file->dir, file,
2096                           &event_trigger_fops);
2097
2098         trace_create_file("format", 0444, file->dir, call,
2099                           &ftrace_event_format_fops);
2100
2101         return 0;
2102 }
2103
2104 static void remove_event_from_tracers(struct trace_event_call *call)
2105 {
2106         struct trace_event_file *file;
2107         struct trace_array *tr;
2108
2109         do_for_each_event_file_safe(tr, file) {
2110                 if (file->event_call != call)
2111                         continue;
2112
2113                 remove_event_file_dir(file);
2114                 /*
2115                  * The do_for_each_event_file_safe() is
2116                  * a double loop. After finding the call for this
2117                  * trace_array, we use break to jump to the next
2118                  * trace_array.
2119                  */
2120                 break;
2121         } while_for_each_event_file();
2122 }
2123
2124 static void event_remove(struct trace_event_call *call)
2125 {
2126         struct trace_array *tr;
2127         struct trace_event_file *file;
2128
2129         do_for_each_event_file(tr, file) {
2130                 if (file->event_call != call)
2131                         continue;
2132                 ftrace_event_enable_disable(file, 0);
2133                 /*
2134                  * The do_for_each_event_file() is
2135                  * a double loop. After finding the call for this
2136                  * trace_array, we use break to jump to the next
2137                  * trace_array.
2138                  */
2139                 break;
2140         } while_for_each_event_file();
2141
2142         if (call->event.funcs)
2143                 __unregister_trace_event(&call->event);
2144         remove_event_from_tracers(call);
2145         list_del(&call->list);
2146 }
2147
2148 static int event_init(struct trace_event_call *call)
2149 {
2150         int ret = 0;
2151         const char *name;
2152
2153         name = trace_event_name(call);
2154         if (WARN_ON(!name))
2155                 return -EINVAL;
2156
2157         if (call->class->raw_init) {
2158                 ret = call->class->raw_init(call);
2159                 if (ret < 0 && ret != -ENOSYS)
2160                         pr_warn("Could not initialize trace events/%s\n", name);
2161         }
2162
2163         return ret;
2164 }
2165
2166 static int
2167 __register_event(struct trace_event_call *call, struct module *mod)
2168 {
2169         int ret;
2170
2171         ret = event_init(call);
2172         if (ret < 0)
2173                 return ret;
2174
2175         list_add(&call->list, &ftrace_events);
2176         call->mod = mod;
2177
2178         return 0;
2179 }
2180
2181 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2182 {
2183         int rlen;
2184         int elen;
2185
2186         /* Find the length of the enum value as a string */
2187         elen = snprintf(ptr, 0, "%ld", map->enum_value);
2188         /* Make sure there's enough room to replace the string with the value */
2189         if (len < elen)
2190                 return NULL;
2191
2192         snprintf(ptr, elen + 1, "%ld", map->enum_value);
2193
2194         /* Get the rest of the string of ptr */
2195         rlen = strlen(ptr + len);
2196         memmove(ptr + elen, ptr + len, rlen);
2197         /* Make sure we end the new string */
2198         ptr[elen + rlen] = 0;
2199
2200         return ptr + elen;
2201 }
2202
2203 static void update_event_printk(struct trace_event_call *call,
2204                                 struct trace_enum_map *map)
2205 {
2206         char *ptr;
2207         int quote = 0;
2208         int len = strlen(map->enum_string);
2209
2210         for (ptr = call->print_fmt; *ptr; ptr++) {
2211                 if (*ptr == '\\') {
2212                         ptr++;
2213                         /* paranoid */
2214                         if (!*ptr)
2215                                 break;
2216                         continue;
2217                 }
2218                 if (*ptr == '"') {
2219                         quote ^= 1;
2220                         continue;
2221                 }
2222                 if (quote)
2223                         continue;
2224                 if (isdigit(*ptr)) {
2225                         /* skip numbers */
2226                         do {
2227                                 ptr++;
2228                                 /* Check for alpha chars like ULL */
2229                         } while (isalnum(*ptr));
2230                         if (!*ptr)
2231                                 break;
2232                         /*
2233                          * A number must have some kind of delimiter after
2234                          * it, and we can ignore that too.
2235                          */
2236                         continue;
2237                 }
2238                 if (isalpha(*ptr) || *ptr == '_') {
2239                         if (strncmp(map->enum_string, ptr, len) == 0 &&
2240                             !isalnum(ptr[len]) && ptr[len] != '_') {
2241                                 ptr = enum_replace(ptr, map, len);
2242                                 /* Hmm, enum string smaller than value */
2243                                 if (WARN_ON_ONCE(!ptr))
2244                                         return;
2245                                 /*
2246                                  * No need to decrement here, as enum_replace()
2247                                  * returns the pointer to the character passed
2248                                  * the enum, and two enums can not be placed
2249                                  * back to back without something in between.
2250                                  * We can skip that something in between.
2251                                  */
2252                                 continue;
2253                         }
2254                 skip_more:
2255                         do {
2256                                 ptr++;
2257                         } while (isalnum(*ptr) || *ptr == '_');
2258                         if (!*ptr)
2259                                 break;
2260                         /*
2261                          * If what comes after this variable is a '.' or
2262                          * '->' then we can continue to ignore that string.
2263                          */
2264                         if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2265                                 ptr += *ptr == '.' ? 1 : 2;
2266                                 if (!*ptr)
2267                                         break;
2268                                 goto skip_more;
2269                         }
2270                         /*
2271                          * Once again, we can skip the delimiter that came
2272                          * after the string.
2273                          */
2274                         continue;
2275                 }
2276         }
2277 }
2278
2279 void trace_event_enum_update(struct trace_enum_map **map, int len)
2280 {
2281         struct trace_event_call *call, *p;
2282         const char *last_system = NULL;
2283         int last_i;
2284         int i;
2285
2286         down_write(&trace_event_sem);
2287         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2288                 /* events are usually grouped together with systems */
2289                 if (!last_system || call->class->system != last_system) {
2290                         last_i = 0;
2291                         last_system = call->class->system;
2292                 }
2293
2294                 for (i = last_i; i < len; i++) {
2295                         if (call->class->system == map[i]->system) {
2296                                 /* Save the first system if need be */
2297                                 if (!last_i)
2298                                         last_i = i;
2299                                 update_event_printk(call, map[i]);
2300                         }
2301                 }
2302         }
2303         up_write(&trace_event_sem);
2304 }
2305
2306 static struct trace_event_file *
2307 trace_create_new_event(struct trace_event_call *call,
2308                        struct trace_array *tr)
2309 {
2310         struct trace_event_file *file;
2311
2312         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2313         if (!file)
2314                 return NULL;
2315
2316         file->event_call = call;
2317         file->tr = tr;
2318         atomic_set(&file->sm_ref, 0);
2319         atomic_set(&file->tm_ref, 0);
2320         INIT_LIST_HEAD(&file->triggers);
2321         list_add(&file->list, &tr->events);
2322
2323         return file;
2324 }
2325
2326 /* Add an event to a trace directory */
2327 static int
2328 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2329 {
2330         struct trace_event_file *file;
2331
2332         file = trace_create_new_event(call, tr);
2333         if (!file)
2334                 return -ENOMEM;
2335
2336         return event_create_dir(tr->event_dir, file);
2337 }
2338
2339 /*
2340  * Just create a decriptor for early init. A descriptor is required
2341  * for enabling events at boot. We want to enable events before
2342  * the filesystem is initialized.
2343  */
2344 static __init int
2345 __trace_early_add_new_event(struct trace_event_call *call,
2346                             struct trace_array *tr)
2347 {
2348         struct trace_event_file *file;
2349
2350         file = trace_create_new_event(call, tr);
2351         if (!file)
2352                 return -ENOMEM;
2353
2354         return 0;
2355 }
2356
2357 struct ftrace_module_file_ops;
2358 static void __add_event_to_tracers(struct trace_event_call *call);
2359
2360 /* Add an additional event_call dynamically */
2361 int trace_add_event_call(struct trace_event_call *call)
2362 {
2363         int ret;
2364         mutex_lock(&trace_types_lock);
2365         mutex_lock(&event_mutex);
2366
2367         ret = __register_event(call, NULL);
2368         if (ret >= 0)
2369                 __add_event_to_tracers(call);
2370
2371         mutex_unlock(&event_mutex);
2372         mutex_unlock(&trace_types_lock);
2373         return ret;
2374 }
2375
2376 /*
2377  * Must be called under locking of trace_types_lock, event_mutex and
2378  * trace_event_sem.
2379  */
2380 static void __trace_remove_event_call(struct trace_event_call *call)
2381 {
2382         event_remove(call);
2383         trace_destroy_fields(call);
2384         free_event_filter(call->filter);
2385         call->filter = NULL;
2386 }
2387
2388 static int probe_remove_event_call(struct trace_event_call *call)
2389 {
2390         struct trace_array *tr;
2391         struct trace_event_file *file;
2392
2393 #ifdef CONFIG_PERF_EVENTS
2394         if (call->perf_refcount)
2395                 return -EBUSY;
2396 #endif
2397         do_for_each_event_file(tr, file) {
2398                 if (file->event_call != call)
2399                         continue;
2400                 /*
2401                  * We can't rely on ftrace_event_enable_disable(enable => 0)
2402                  * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2403                  * TRACE_REG_UNREGISTER.
2404                  */
2405                 if (file->flags & EVENT_FILE_FL_ENABLED)
2406                         return -EBUSY;
2407                 /*
2408                  * The do_for_each_event_file_safe() is
2409                  * a double loop. After finding the call for this
2410                  * trace_array, we use break to jump to the next
2411                  * trace_array.
2412                  */
2413                 break;
2414         } while_for_each_event_file();
2415
2416         __trace_remove_event_call(call);
2417
2418         return 0;
2419 }
2420
2421 /* Remove an event_call */
2422 int trace_remove_event_call(struct trace_event_call *call)
2423 {
2424         int ret;
2425
2426         mutex_lock(&trace_types_lock);
2427         mutex_lock(&event_mutex);
2428         down_write(&trace_event_sem);
2429         ret = probe_remove_event_call(call);
2430         up_write(&trace_event_sem);
2431         mutex_unlock(&event_mutex);
2432         mutex_unlock(&trace_types_lock);
2433
2434         return ret;
2435 }
2436
2437 #define for_each_event(event, start, end)                       \
2438         for (event = start;                                     \
2439              (unsigned long)event < (unsigned long)end;         \
2440              event++)
2441
2442 #ifdef CONFIG_MODULES
2443
2444 static void trace_module_add_events(struct module *mod)
2445 {
2446         struct trace_event_call **call, **start, **end;
2447
2448         if (!mod->num_trace_events)
2449                 return;
2450
2451         /* Don't add infrastructure for mods without tracepoints */
2452         if (trace_module_has_bad_taint(mod)) {
2453                 pr_err("%s: module has bad taint, not creating trace events\n",
2454                        mod->name);
2455                 return;
2456         }
2457
2458         start = mod->trace_events;
2459         end = mod->trace_events + mod->num_trace_events;
2460
2461         for_each_event(call, start, end) {
2462                 __register_event(*call, mod);
2463                 __add_event_to_tracers(*call);
2464         }
2465 }
2466
2467 static void trace_module_remove_events(struct module *mod)
2468 {
2469         struct trace_event_call *call, *p;
2470         bool clear_trace = false;
2471
2472         down_write(&trace_event_sem);
2473         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2474                 if (call->mod == mod) {
2475                         if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2476                                 clear_trace = true;
2477                         __trace_remove_event_call(call);
2478                 }
2479         }
2480         up_write(&trace_event_sem);
2481
2482         /*
2483          * It is safest to reset the ring buffer if the module being unloaded
2484          * registered any events that were used. The only worry is if
2485          * a new module gets loaded, and takes on the same id as the events
2486          * of this module. When printing out the buffer, traced events left
2487          * over from this module may be passed to the new module events and
2488          * unexpected results may occur.
2489          */
2490         if (clear_trace)
2491                 tracing_reset_all_online_cpus();
2492 }
2493
2494 static int trace_module_notify(struct notifier_block *self,
2495                                unsigned long val, void *data)
2496 {
2497         struct module *mod = data;
2498
2499         mutex_lock(&trace_types_lock);
2500         mutex_lock(&event_mutex);
2501         switch (val) {
2502         case MODULE_STATE_COMING:
2503                 trace_module_add_events(mod);
2504                 break;
2505         case MODULE_STATE_GOING:
2506                 trace_module_remove_events(mod);
2507                 break;
2508         }
2509         mutex_unlock(&event_mutex);
2510         mutex_unlock(&trace_types_lock);
2511
2512         return 0;
2513 }
2514
2515 static struct notifier_block trace_module_nb = {
2516         .notifier_call = trace_module_notify,
2517         .priority = 1, /* higher than trace.c module notify */
2518 };
2519 #endif /* CONFIG_MODULES */
2520
2521 /* Create a new event directory structure for a trace directory. */
2522 static void
2523 __trace_add_event_dirs(struct trace_array *tr)
2524 {
2525         struct trace_event_call *call;
2526         int ret;
2527
2528         list_for_each_entry(call, &ftrace_events, list) {
2529                 ret = __trace_add_new_event(call, tr);
2530                 if (ret < 0)
2531                         pr_warn("Could not create directory for event %s\n",
2532                                 trace_event_name(call));
2533         }
2534 }
2535
2536 struct trace_event_file *
2537 find_event_file(struct trace_array *tr, const char *system,  const char *event)
2538 {
2539         struct trace_event_file *file;
2540         struct trace_event_call *call;
2541         const char *name;
2542
2543         list_for_each_entry(file, &tr->events, list) {
2544
2545                 call = file->event_call;
2546                 name = trace_event_name(call);
2547
2548                 if (!name || !call->class || !call->class->reg)
2549                         continue;
2550
2551                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2552                         continue;
2553
2554                 if (strcmp(event, name) == 0 &&
2555                     strcmp(system, call->class->system) == 0)
2556                         return file;
2557         }
2558         return NULL;
2559 }
2560
2561 #ifdef CONFIG_DYNAMIC_FTRACE
2562
2563 /* Avoid typos */
2564 #define ENABLE_EVENT_STR        "enable_event"
2565 #define DISABLE_EVENT_STR       "disable_event"
2566
2567 struct event_probe_data {
2568         struct trace_event_file *file;
2569         unsigned long                   count;
2570         int                             ref;
2571         bool                            enable;
2572 };
2573
2574 static void
2575 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2576 {
2577         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2578         struct event_probe_data *data = *pdata;
2579
2580         if (!data)
2581                 return;
2582
2583         if (data->enable)
2584                 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2585         else
2586                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2587 }
2588
2589 static void
2590 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2591 {
2592         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2593         struct event_probe_data *data = *pdata;
2594
2595         if (!data)
2596                 return;
2597
2598         if (!data->count)
2599                 return;
2600
2601         /* Skip if the event is in a state we want to switch to */
2602         if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2603                 return;
2604
2605         if (data->count != -1)
2606                 (data->count)--;
2607
2608         event_enable_probe(ip, parent_ip, _data);
2609 }
2610
2611 static int
2612 event_enable_print(struct seq_file *m, unsigned long ip,
2613                       struct ftrace_probe_ops *ops, void *_data)
2614 {
2615         struct event_probe_data *data = _data;
2616
2617         seq_printf(m, "%ps:", (void *)ip);
2618
2619         seq_printf(m, "%s:%s:%s",
2620                    data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2621                    data->file->event_call->class->system,
2622                    trace_event_name(data->file->event_call));
2623
2624         if (data->count == -1)
2625                 seq_puts(m, ":unlimited\n");
2626         else
2627                 seq_printf(m, ":count=%ld\n", data->count);
2628
2629         return 0;
2630 }
2631
2632 static int
2633 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2634                   void **_data)
2635 {
2636         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2637         struct event_probe_data *data = *pdata;
2638
2639         data->ref++;
2640         return 0;
2641 }
2642
2643 static void
2644 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2645                   void **_data)
2646 {
2647         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2648         struct event_probe_data *data = *pdata;
2649
2650         if (WARN_ON_ONCE(data->ref <= 0))
2651                 return;
2652
2653         data->ref--;
2654         if (!data->ref) {
2655                 /* Remove the SOFT_MODE flag */
2656                 __ftrace_event_enable_disable(data->file, 0, 1);
2657                 module_put(data->file->event_call->mod);
2658                 kfree(data);
2659         }
2660         *pdata = NULL;
2661 }
2662
2663 static struct ftrace_probe_ops event_enable_probe_ops = {
2664         .func                   = event_enable_probe,
2665         .print                  = event_enable_print,
2666         .init                   = event_enable_init,
2667         .free                   = event_enable_free,
2668 };
2669
2670 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2671         .func                   = event_enable_count_probe,
2672         .print                  = event_enable_print,
2673         .init                   = event_enable_init,
2674         .free                   = event_enable_free,
2675 };
2676
2677 static struct ftrace_probe_ops event_disable_probe_ops = {
2678         .func                   = event_enable_probe,
2679         .print                  = event_enable_print,
2680         .init                   = event_enable_init,
2681         .free                   = event_enable_free,
2682 };
2683
2684 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2685         .func                   = event_enable_count_probe,
2686         .print                  = event_enable_print,
2687         .init                   = event_enable_init,
2688         .free                   = event_enable_free,
2689 };
2690
2691 static int
2692 event_enable_func(struct ftrace_hash *hash,
2693                   char *glob, char *cmd, char *param, int enabled)
2694 {
2695         struct trace_array *tr = top_trace_array();
2696         struct trace_event_file *file;
2697         struct ftrace_probe_ops *ops;
2698         struct event_probe_data *data;
2699         const char *system;
2700         const char *event;
2701         char *number;
2702         bool enable;
2703         int ret;
2704
2705         if (!tr)
2706                 return -ENODEV;
2707
2708         /* hash funcs only work with set_ftrace_filter */
2709         if (!enabled || !param)
2710                 return -EINVAL;
2711
2712         system = strsep(&param, ":");
2713         if (!param)
2714                 return -EINVAL;
2715
2716         event = strsep(&param, ":");
2717
2718         mutex_lock(&event_mutex);
2719
2720         ret = -EINVAL;
2721         file = find_event_file(tr, system, event);
2722         if (!file)
2723                 goto out;
2724
2725         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2726
2727         if (enable)
2728                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2729         else
2730                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2731
2732         if (glob[0] == '!') {
2733                 unregister_ftrace_function_probe_func(glob+1, ops);
2734                 ret = 0;
2735                 goto out;
2736         }
2737
2738         ret = -ENOMEM;
2739         data = kzalloc(sizeof(*data), GFP_KERNEL);
2740         if (!data)
2741                 goto out;
2742
2743         data->enable = enable;
2744         data->count = -1;
2745         data->file = file;
2746
2747         if (!param)
2748                 goto out_reg;
2749
2750         number = strsep(&param, ":");
2751
2752         ret = -EINVAL;
2753         if (!strlen(number))
2754                 goto out_free;
2755
2756         /*
2757          * We use the callback data field (which is a pointer)
2758          * as our counter.
2759          */
2760         ret = kstrtoul(number, 0, &data->count);
2761         if (ret)
2762                 goto out_free;
2763
2764  out_reg:
2765         /* Don't let event modules unload while probe registered */
2766         ret = try_module_get(file->event_call->mod);
2767         if (!ret) {
2768                 ret = -EBUSY;
2769                 goto out_free;
2770         }
2771
2772         ret = __ftrace_event_enable_disable(file, 1, 1);
2773         if (ret < 0)
2774                 goto out_put;
2775         ret = register_ftrace_function_probe(glob, ops, data);
2776         /*
2777          * The above returns on success the # of functions enabled,
2778          * but if it didn't find any functions it returns zero.
2779          * Consider no functions a failure too.
2780          */
2781         if (!ret) {
2782                 ret = -ENOENT;
2783                 goto out_disable;
2784         } else if (ret < 0)
2785                 goto out_disable;
2786         /* Just return zero, not the number of enabled functions */
2787         ret = 0;
2788  out:
2789         mutex_unlock(&event_mutex);
2790         return ret;
2791
2792  out_disable:
2793         __ftrace_event_enable_disable(file, 0, 1);
2794  out_put:
2795         module_put(file->event_call->mod);
2796  out_free:
2797         kfree(data);
2798         goto out;
2799 }
2800
2801 static struct ftrace_func_command event_enable_cmd = {
2802         .name                   = ENABLE_EVENT_STR,
2803         .func                   = event_enable_func,
2804 };
2805
2806 static struct ftrace_func_command event_disable_cmd = {
2807         .name                   = DISABLE_EVENT_STR,
2808         .func                   = event_enable_func,
2809 };
2810
2811 static __init int register_event_cmds(void)
2812 {
2813         int ret;
2814
2815         ret = register_ftrace_command(&event_enable_cmd);
2816         if (WARN_ON(ret < 0))
2817                 return ret;
2818         ret = register_ftrace_command(&event_disable_cmd);
2819         if (WARN_ON(ret < 0))
2820                 unregister_ftrace_command(&event_enable_cmd);
2821         return ret;
2822 }
2823 #else
2824 static inline int register_event_cmds(void) { return 0; }
2825 #endif /* CONFIG_DYNAMIC_FTRACE */
2826
2827 /*
2828  * The top level array has already had its trace_event_file
2829  * descriptors created in order to allow for early events to
2830  * be recorded. This function is called after the tracefs has been
2831  * initialized, and we now have to create the files associated
2832  * to the events.
2833  */
2834 static __init void
2835 __trace_early_add_event_dirs(struct trace_array *tr)
2836 {
2837         struct trace_event_file *file;
2838         int ret;
2839
2840
2841         list_for_each_entry(file, &tr->events, list) {
2842                 ret = event_create_dir(tr->event_dir, file);
2843                 if (ret < 0)
2844                         pr_warn("Could not create directory for event %s\n",
2845                                 trace_event_name(file->event_call));
2846         }
2847 }
2848
2849 /*
2850  * For early boot up, the top trace array requires to have
2851  * a list of events that can be enabled. This must be done before
2852  * the filesystem is set up in order to allow events to be traced
2853  * early.
2854  */
2855 static __init void
2856 __trace_early_add_events(struct trace_array *tr)
2857 {
2858         struct trace_event_call *call;
2859         int ret;
2860
2861         list_for_each_entry(call, &ftrace_events, list) {
2862                 /* Early boot up should not have any modules loaded */
2863                 if (WARN_ON_ONCE(call->mod))
2864                         continue;
2865
2866                 ret = __trace_early_add_new_event(call, tr);
2867                 if (ret < 0)
2868                         pr_warn("Could not create early event %s\n",
2869                                 trace_event_name(call));
2870         }
2871 }
2872
2873 /* Remove the event directory structure for a trace directory. */
2874 static void
2875 __trace_remove_event_dirs(struct trace_array *tr)
2876 {
2877         struct trace_event_file *file, *next;
2878
2879         list_for_each_entry_safe(file, next, &tr->events, list)
2880                 remove_event_file_dir(file);
2881 }
2882
2883 static void __add_event_to_tracers(struct trace_event_call *call)
2884 {
2885         struct trace_array *tr;
2886
2887         list_for_each_entry(tr, &ftrace_trace_arrays, list)
2888                 __trace_add_new_event(call, tr);
2889 }
2890
2891 extern struct trace_event_call *__start_ftrace_events[];
2892 extern struct trace_event_call *__stop_ftrace_events[];
2893
2894 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2895
2896 static __init int setup_trace_event(char *str)
2897 {
2898         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2899         ring_buffer_expanded = true;
2900         tracing_selftest_disabled = true;
2901
2902         return 1;
2903 }
2904 __setup("trace_event=", setup_trace_event);
2905
2906 /* Expects to have event_mutex held when called */
2907 static int
2908 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2909 {
2910         struct dentry *d_events;
2911         struct dentry *entry;
2912
2913         entry = tracefs_create_file("set_event", 0644, parent,
2914                                     tr, &ftrace_set_event_fops);
2915         if (!entry) {
2916                 pr_warn("Could not create tracefs 'set_event' entry\n");
2917                 return -ENOMEM;
2918         }
2919
2920         d_events = tracefs_create_dir("events", parent);
2921         if (!d_events) {
2922                 pr_warn("Could not create tracefs 'events' directory\n");
2923                 return -ENOMEM;
2924         }
2925
2926         entry = tracefs_create_file("set_event_pid", 0644, parent,
2927                                     tr, &ftrace_set_event_pid_fops);
2928
2929         /* ring buffer internal formats */
2930         trace_create_file("header_page", 0444, d_events,
2931                           ring_buffer_print_page_header,
2932                           &ftrace_show_header_fops);
2933
2934         trace_create_file("header_event", 0444, d_events,
2935                           ring_buffer_print_entry_header,
2936                           &ftrace_show_header_fops);
2937
2938         trace_create_file("enable", 0644, d_events,
2939                           tr, &ftrace_tr_enable_fops);
2940
2941         tr->event_dir = d_events;
2942
2943         return 0;
2944 }
2945
2946 /**
2947  * event_trace_add_tracer - add a instance of a trace_array to events
2948  * @parent: The parent dentry to place the files/directories for events in
2949  * @tr: The trace array associated with these events
2950  *
2951  * When a new instance is created, it needs to set up its events
2952  * directory, as well as other files associated with events. It also
2953  * creates the event hierachry in the @parent/events directory.
2954  *
2955  * Returns 0 on success.
2956  */
2957 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2958 {
2959         int ret;
2960
2961         mutex_lock(&event_mutex);
2962
2963         ret = create_event_toplevel_files(parent, tr);
2964         if (ret)
2965                 goto out_unlock;
2966
2967         down_write(&trace_event_sem);
2968         __trace_add_event_dirs(tr);
2969         up_write(&trace_event_sem);
2970
2971  out_unlock:
2972         mutex_unlock(&event_mutex);
2973
2974         return ret;
2975 }
2976
2977 /*
2978  * The top trace array already had its file descriptors created.
2979  * Now the files themselves need to be created.
2980  */
2981 static __init int
2982 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2983 {
2984         int ret;
2985
2986         mutex_lock(&event_mutex);
2987
2988         ret = create_event_toplevel_files(parent, tr);
2989         if (ret)
2990                 goto out_unlock;
2991
2992         down_write(&trace_event_sem);
2993         __trace_early_add_event_dirs(tr);
2994         up_write(&trace_event_sem);
2995
2996  out_unlock:
2997         mutex_unlock(&event_mutex);
2998
2999         return ret;
3000 }
3001
3002 int event_trace_del_tracer(struct trace_array *tr)
3003 {
3004         mutex_lock(&event_mutex);
3005
3006         /* Disable any event triggers and associated soft-disabled events */
3007         clear_event_triggers(tr);
3008
3009         /* Clear the pid list */
3010         __ftrace_clear_event_pids(tr);
3011
3012         /* Disable any running events */
3013         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3014
3015         /* Access to events are within rcu_read_lock_sched() */
3016         synchronize_sched();
3017
3018         down_write(&trace_event_sem);
3019         __trace_remove_event_dirs(tr);
3020         tracefs_remove_recursive(tr->event_dir);
3021         up_write(&trace_event_sem);
3022
3023         tr->event_dir = NULL;
3024
3025         mutex_unlock(&event_mutex);
3026
3027         return 0;
3028 }
3029
3030 static __init int event_trace_memsetup(void)
3031 {
3032         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
3033         file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
3034         return 0;
3035 }
3036
3037 static __init void
3038 early_enable_events(struct trace_array *tr, bool disable_first)
3039 {
3040         char *buf = bootup_event_buf;
3041         char *token;
3042         int ret;
3043
3044         while (true) {
3045                 token = strsep(&buf, ",");
3046
3047                 if (!token)
3048                         break;
3049
3050                 if (*token) {
3051                         /* Restarting syscalls requires that we stop them first */
3052                         if (disable_first)
3053                                 ftrace_set_clr_event(tr, token, 0);
3054
3055                         ret = ftrace_set_clr_event(tr, token, 1);
3056                         if (ret)
3057                                 pr_warn("Failed to enable trace event: %s\n", token);
3058                 }
3059
3060                 /* Put back the comma to allow this to be called again */
3061                 if (buf)
3062                         *(buf - 1) = ',';
3063         }
3064 }
3065
3066 static __init int event_trace_enable(void)
3067 {
3068         struct trace_array *tr = top_trace_array();
3069         struct trace_event_call **iter, *call;
3070         int ret;
3071
3072         if (!tr)
3073                 return -ENODEV;
3074
3075         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3076
3077                 call = *iter;
3078                 ret = event_init(call);
3079                 if (!ret)
3080                         list_add(&call->list, &ftrace_events);
3081         }
3082
3083         /*
3084          * We need the top trace array to have a working set of trace
3085          * points at early init, before the debug files and directories
3086          * are created. Create the file entries now, and attach them
3087          * to the actual file dentries later.
3088          */
3089         __trace_early_add_events(tr);
3090
3091         early_enable_events(tr, false);
3092
3093         trace_printk_start_comm();
3094
3095         register_event_cmds();
3096
3097         register_trigger_cmds();
3098
3099         return 0;
3100 }
3101
3102 /*
3103  * event_trace_enable() is called from trace_event_init() first to
3104  * initialize events and perhaps start any events that are on the
3105  * command line. Unfortunately, there are some events that will not
3106  * start this early, like the system call tracepoints that need
3107  * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3108  * is called before pid 1 starts, and this flag is never set, making
3109  * the syscall tracepoint never get reached, but the event is enabled
3110  * regardless (and not doing anything).
3111  */
3112 static __init int event_trace_enable_again(void)
3113 {
3114         struct trace_array *tr;
3115
3116         tr = top_trace_array();
3117         if (!tr)
3118                 return -ENODEV;
3119
3120         early_enable_events(tr, true);
3121
3122         return 0;
3123 }
3124
3125 early_initcall(event_trace_enable_again);
3126
3127 static __init int event_trace_init(void)
3128 {
3129         struct trace_array *tr;
3130         struct dentry *d_tracer;
3131         struct dentry *entry;
3132         int ret;
3133
3134         tr = top_trace_array();
3135         if (!tr)
3136                 return -ENODEV;
3137
3138         d_tracer = tracing_init_dentry();
3139         if (IS_ERR(d_tracer))
3140                 return 0;
3141
3142         entry = tracefs_create_file("available_events", 0444, d_tracer,
3143                                     tr, &ftrace_avail_fops);
3144         if (!entry)
3145                 pr_warn("Could not create tracefs 'available_events' entry\n");
3146
3147         if (trace_define_generic_fields())
3148                 pr_warn("tracing: Failed to allocated generic fields");
3149
3150         if (trace_define_common_fields())
3151                 pr_warn("tracing: Failed to allocate common fields");
3152
3153         ret = early_event_add_tracer(d_tracer, tr);
3154         if (ret)
3155                 return ret;
3156
3157 #ifdef CONFIG_MODULES
3158         ret = register_module_notifier(&trace_module_nb);
3159         if (ret)
3160                 pr_warn("Failed to register trace events module notifier\n");
3161 #endif
3162         return 0;
3163 }
3164
3165 void __init trace_event_init(void)
3166 {
3167         event_trace_memsetup();
3168         init_ftrace_syscalls();
3169         event_trace_enable();
3170 }
3171
3172 fs_initcall(event_trace_init);
3173
3174 #ifdef CONFIG_FTRACE_STARTUP_TEST
3175
3176 static DEFINE_SPINLOCK(test_spinlock);
3177 static DEFINE_SPINLOCK(test_spinlock_irq);
3178 static DEFINE_MUTEX(test_mutex);
3179
3180 static __init void test_work(struct work_struct *dummy)
3181 {
3182         spin_lock(&test_spinlock);
3183         spin_lock_irq(&test_spinlock_irq);
3184         udelay(1);
3185         spin_unlock_irq(&test_spinlock_irq);
3186         spin_unlock(&test_spinlock);
3187
3188         mutex_lock(&test_mutex);
3189         msleep(1);
3190         mutex_unlock(&test_mutex);
3191 }
3192
3193 static __init int event_test_thread(void *unused)
3194 {
3195         void *test_malloc;
3196
3197         test_malloc = kmalloc(1234, GFP_KERNEL);
3198         if (!test_malloc)
3199                 pr_info("failed to kmalloc\n");
3200
3201         schedule_on_each_cpu(test_work);
3202
3203         kfree(test_malloc);
3204
3205         set_current_state(TASK_INTERRUPTIBLE);
3206         while (!kthread_should_stop()) {
3207                 schedule();
3208                 set_current_state(TASK_INTERRUPTIBLE);
3209         }
3210         __set_current_state(TASK_RUNNING);
3211
3212         return 0;
3213 }
3214
3215 /*
3216  * Do various things that may trigger events.
3217  */
3218 static __init void event_test_stuff(void)
3219 {
3220         struct task_struct *test_thread;
3221
3222         test_thread = kthread_run(event_test_thread, NULL, "test-events");
3223         msleep(1);
3224         kthread_stop(test_thread);
3225 }
3226
3227 /*
3228  * For every trace event defined, we will test each trace point separately,
3229  * and then by groups, and finally all trace points.
3230  */
3231 static __init void event_trace_self_tests(void)
3232 {
3233         struct trace_subsystem_dir *dir;
3234         struct trace_event_file *file;
3235         struct trace_event_call *call;
3236         struct event_subsystem *system;
3237         struct trace_array *tr;
3238         int ret;
3239
3240         tr = top_trace_array();
3241         if (!tr)
3242                 return;
3243
3244         pr_info("Running tests on trace events:\n");
3245
3246         list_for_each_entry(file, &tr->events, list) {
3247
3248                 call = file->event_call;
3249
3250                 /* Only test those that have a probe */
3251                 if (!call->class || !call->class->probe)
3252                         continue;
3253
3254 /*
3255  * Testing syscall events here is pretty useless, but
3256  * we still do it if configured. But this is time consuming.
3257  * What we really need is a user thread to perform the
3258  * syscalls as we test.
3259  */
3260 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3261                 if (call->class->system &&
3262                     strcmp(call->class->system, "syscalls") == 0)
3263                         continue;
3264 #endif
3265
3266                 pr_info("Testing event %s: ", trace_event_name(call));
3267
3268                 /*
3269                  * If an event is already enabled, someone is using
3270                  * it and the self test should not be on.
3271                  */
3272                 if (file->flags & EVENT_FILE_FL_ENABLED) {
3273                         pr_warn("Enabled event during self test!\n");
3274                         WARN_ON_ONCE(1);
3275                         continue;
3276                 }
3277
3278                 ftrace_event_enable_disable(file, 1);
3279                 event_test_stuff();
3280                 ftrace_event_enable_disable(file, 0);
3281
3282                 pr_cont("OK\n");
3283         }
3284
3285         /* Now test at the sub system level */
3286
3287         pr_info("Running tests on trace event systems:\n");
3288
3289         list_for_each_entry(dir, &tr->systems, list) {
3290
3291                 system = dir->subsystem;
3292
3293                 /* the ftrace system is special, skip it */
3294                 if (strcmp(system->name, "ftrace") == 0)
3295                         continue;
3296
3297                 pr_info("Testing event system %s: ", system->name);
3298
3299                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3300                 if (WARN_ON_ONCE(ret)) {
3301                         pr_warn("error enabling system %s\n",
3302                                 system->name);
3303                         continue;
3304                 }
3305
3306                 event_test_stuff();
3307
3308                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3309                 if (WARN_ON_ONCE(ret)) {
3310                         pr_warn("error disabling system %s\n",
3311                                 system->name);
3312                         continue;
3313                 }
3314
3315                 pr_cont("OK\n");
3316         }
3317
3318         /* Test with all events enabled */
3319
3320         pr_info("Running tests on all trace events:\n");
3321         pr_info("Testing all events: ");
3322
3323         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3324         if (WARN_ON_ONCE(ret)) {
3325                 pr_warn("error enabling all events\n");
3326                 return;
3327         }
3328
3329         event_test_stuff();
3330
3331         /* reset sysname */
3332         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3333         if (WARN_ON_ONCE(ret)) {
3334                 pr_warn("error disabling all events\n");
3335                 return;
3336         }
3337
3338         pr_cont("OK\n");
3339 }
3340
3341 #ifdef CONFIG_FUNCTION_TRACER
3342
3343 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3344
3345 static struct trace_array *event_tr;
3346
3347 static void __init
3348 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3349                           struct ftrace_ops *op, struct pt_regs *pt_regs)
3350 {
3351         struct ring_buffer_event *event;
3352         struct ring_buffer *buffer;
3353         struct ftrace_entry *entry;
3354         unsigned long flags;
3355         long disabled;
3356         int cpu;
3357         int pc;
3358
3359         pc = preempt_count();
3360         preempt_disable_notrace();
3361         cpu = raw_smp_processor_id();
3362         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3363
3364         if (disabled != 1)
3365                 goto out;
3366
3367         local_save_flags(flags);
3368
3369         event = trace_current_buffer_lock_reserve(&buffer,
3370                                                   TRACE_FN, sizeof(*entry),
3371                                                   flags, pc);
3372         if (!event)
3373                 goto out;
3374         entry   = ring_buffer_event_data(event);
3375         entry->ip                       = ip;
3376         entry->parent_ip                = parent_ip;
3377
3378         trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
3379
3380  out:
3381         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3382         preempt_enable_notrace();
3383 }
3384
3385 static struct ftrace_ops trace_ops __initdata  =
3386 {
3387         .func = function_test_events_call,
3388         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3389 };
3390
3391 static __init void event_trace_self_test_with_function(void)
3392 {
3393         int ret;
3394         event_tr = top_trace_array();
3395         if (WARN_ON(!event_tr))
3396                 return;
3397         ret = register_ftrace_function(&trace_ops);
3398         if (WARN_ON(ret < 0)) {
3399                 pr_info("Failed to enable function tracer for event tests\n");
3400                 return;
3401         }
3402         pr_info("Running tests again, along with the function tracer\n");
3403         event_trace_self_tests();
3404         unregister_ftrace_function(&trace_ops);
3405 }
3406 #else
3407 static __init void event_trace_self_test_with_function(void)
3408 {
3409 }
3410 #endif
3411
3412 static __init int event_trace_self_tests_init(void)
3413 {
3414         if (!tracing_selftest_disabled) {
3415                 event_trace_self_tests();
3416                 event_trace_self_test_with_function();
3417         }
3418
3419         return 0;
3420 }
3421
3422 late_initcall(event_trace_self_tests_init);
3423
3424 #endif