tracing: Fix showing function event in available_events
[cascardo/linux.git] / kernel / trace / trace_events.c
1 /*
2  * event tracer
3  *
4  * Copyright (C) 2008 Red Hat Inc, Steven Rostedt <srostedt@redhat.com>
5  *
6  *  - Added format output of fields of the trace point.
7  *    This was based off of work by Tom Zanussi <tzanussi@gmail.com>.
8  *
9  */
10
11 #define pr_fmt(fmt) fmt
12
13 #include <linux/workqueue.h>
14 #include <linux/spinlock.h>
15 #include <linux/kthread.h>
16 #include <linux/tracefs.h>
17 #include <linux/uaccess.h>
18 #include <linux/bsearch.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/sort.h>
22 #include <linux/slab.h>
23 #include <linux/delay.h>
24
25 #include <trace/events/sched.h>
26
27 #include <asm/setup.h>
28
29 #include "trace_output.h"
30
31 #undef TRACE_SYSTEM
32 #define TRACE_SYSTEM "TRACE_SYSTEM"
33
34 DEFINE_MUTEX(event_mutex);
35
36 LIST_HEAD(ftrace_events);
37 static LIST_HEAD(ftrace_generic_fields);
38 static LIST_HEAD(ftrace_common_fields);
39
40 #define GFP_TRACE (GFP_KERNEL | __GFP_ZERO)
41
42 static struct kmem_cache *field_cachep;
43 static struct kmem_cache *file_cachep;
44
45 static inline int system_refcount(struct event_subsystem *system)
46 {
47         return system->ref_count;
48 }
49
50 static int system_refcount_inc(struct event_subsystem *system)
51 {
52         return system->ref_count++;
53 }
54
55 static int system_refcount_dec(struct event_subsystem *system)
56 {
57         return --system->ref_count;
58 }
59
60 /* Double loops, do not use break, only goto's work */
61 #define do_for_each_event_file(tr, file)                        \
62         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
63                 list_for_each_entry(file, &tr->events, list)
64
65 #define do_for_each_event_file_safe(tr, file)                   \
66         list_for_each_entry(tr, &ftrace_trace_arrays, list) {   \
67                 struct trace_event_file *___n;                          \
68                 list_for_each_entry_safe(file, ___n, &tr->events, list)
69
70 #define while_for_each_event_file()             \
71         }
72
73 static struct list_head *
74 trace_get_fields(struct trace_event_call *event_call)
75 {
76         if (!event_call->class->get_fields)
77                 return &event_call->class->fields;
78         return event_call->class->get_fields(event_call);
79 }
80
81 static struct ftrace_event_field *
82 __find_event_field(struct list_head *head, char *name)
83 {
84         struct ftrace_event_field *field;
85
86         list_for_each_entry(field, head, link) {
87                 if (!strcmp(field->name, name))
88                         return field;
89         }
90
91         return NULL;
92 }
93
94 struct ftrace_event_field *
95 trace_find_event_field(struct trace_event_call *call, char *name)
96 {
97         struct ftrace_event_field *field;
98         struct list_head *head;
99
100         field = __find_event_field(&ftrace_generic_fields, name);
101         if (field)
102                 return field;
103
104         field = __find_event_field(&ftrace_common_fields, name);
105         if (field)
106                 return field;
107
108         head = trace_get_fields(call);
109         return __find_event_field(head, name);
110 }
111
112 static int __trace_define_field(struct list_head *head, const char *type,
113                                 const char *name, int offset, int size,
114                                 int is_signed, int filter_type)
115 {
116         struct ftrace_event_field *field;
117
118         field = kmem_cache_alloc(field_cachep, GFP_TRACE);
119         if (!field)
120                 return -ENOMEM;
121
122         field->name = name;
123         field->type = type;
124
125         if (filter_type == FILTER_OTHER)
126                 field->filter_type = filter_assign_type(type);
127         else
128                 field->filter_type = filter_type;
129
130         field->offset = offset;
131         field->size = size;
132         field->is_signed = is_signed;
133
134         list_add(&field->link, head);
135
136         return 0;
137 }
138
139 int trace_define_field(struct trace_event_call *call, const char *type,
140                        const char *name, int offset, int size, int is_signed,
141                        int filter_type)
142 {
143         struct list_head *head;
144
145         if (WARN_ON(!call->class))
146                 return 0;
147
148         head = trace_get_fields(call);
149         return __trace_define_field(head, type, name, offset, size,
150                                     is_signed, filter_type);
151 }
152 EXPORT_SYMBOL_GPL(trace_define_field);
153
154 #define __generic_field(type, item, filter_type)                        \
155         ret = __trace_define_field(&ftrace_generic_fields, #type,       \
156                                    #item, 0, 0, is_signed_type(type),   \
157                                    filter_type);                        \
158         if (ret)                                                        \
159                 return ret;
160
161 #define __common_field(type, item)                                      \
162         ret = __trace_define_field(&ftrace_common_fields, #type,        \
163                                    "common_" #item,                     \
164                                    offsetof(typeof(ent), item),         \
165                                    sizeof(ent.item),                    \
166                                    is_signed_type(type), FILTER_OTHER); \
167         if (ret)                                                        \
168                 return ret;
169
170 static int trace_define_generic_fields(void)
171 {
172         int ret;
173
174         __generic_field(int, cpu, FILTER_OTHER);
175         __generic_field(char *, comm, FILTER_PTR_STRING);
176
177         return ret;
178 }
179
180 static int trace_define_common_fields(void)
181 {
182         int ret;
183         struct trace_entry ent;
184
185         __common_field(unsigned short, type);
186         __common_field(unsigned char, flags);
187         __common_field(unsigned char, preempt_count);
188         __common_field(int, pid);
189
190         return ret;
191 }
192
193 static void trace_destroy_fields(struct trace_event_call *call)
194 {
195         struct ftrace_event_field *field, *next;
196         struct list_head *head;
197
198         head = trace_get_fields(call);
199         list_for_each_entry_safe(field, next, head, link) {
200                 list_del(&field->link);
201                 kmem_cache_free(field_cachep, field);
202         }
203 }
204
205 int trace_event_raw_init(struct trace_event_call *call)
206 {
207         int id;
208
209         id = register_trace_event(&call->event);
210         if (!id)
211                 return -ENODEV;
212
213         return 0;
214 }
215 EXPORT_SYMBOL_GPL(trace_event_raw_init);
216
217 bool trace_event_ignore_this_pid(struct trace_event_file *trace_file)
218 {
219         struct trace_array *tr = trace_file->tr;
220         struct trace_array_cpu *data;
221         struct trace_pid_list *pid_list;
222
223         pid_list = rcu_dereference_sched(tr->filtered_pids);
224         if (!pid_list)
225                 return false;
226
227         data = this_cpu_ptr(tr->trace_buffer.data);
228
229         return data->ignore_pid;
230 }
231 EXPORT_SYMBOL_GPL(trace_event_ignore_this_pid);
232
233 void *trace_event_buffer_reserve(struct trace_event_buffer *fbuffer,
234                                  struct trace_event_file *trace_file,
235                                  unsigned long len)
236 {
237         struct trace_event_call *event_call = trace_file->event_call;
238
239         if ((trace_file->flags & EVENT_FILE_FL_PID_FILTER) &&
240             trace_event_ignore_this_pid(trace_file))
241                 return NULL;
242
243         local_save_flags(fbuffer->flags);
244         fbuffer->pc = preempt_count();
245         fbuffer->trace_file = trace_file;
246
247         fbuffer->event =
248                 trace_event_buffer_lock_reserve(&fbuffer->buffer, trace_file,
249                                                 event_call->event.type, len,
250                                                 fbuffer->flags, fbuffer->pc);
251         if (!fbuffer->event)
252                 return NULL;
253
254         fbuffer->entry = ring_buffer_event_data(fbuffer->event);
255         return fbuffer->entry;
256 }
257 EXPORT_SYMBOL_GPL(trace_event_buffer_reserve);
258
259 static DEFINE_SPINLOCK(tracepoint_iter_lock);
260
261 static void output_printk(struct trace_event_buffer *fbuffer)
262 {
263         struct trace_event_call *event_call;
264         struct trace_event *event;
265         unsigned long flags;
266         struct trace_iterator *iter = tracepoint_print_iter;
267
268         if (!iter)
269                 return;
270
271         event_call = fbuffer->trace_file->event_call;
272         if (!event_call || !event_call->event.funcs ||
273             !event_call->event.funcs->trace)
274                 return;
275
276         event = &fbuffer->trace_file->event_call->event;
277
278         spin_lock_irqsave(&tracepoint_iter_lock, flags);
279         trace_seq_init(&iter->seq);
280         iter->ent = fbuffer->entry;
281         event_call->event.funcs->trace(iter, 0, event);
282         trace_seq_putc(&iter->seq, 0);
283         printk("%s", iter->seq.buffer);
284
285         spin_unlock_irqrestore(&tracepoint_iter_lock, flags);
286 }
287
288 void trace_event_buffer_commit(struct trace_event_buffer *fbuffer)
289 {
290         if (tracepoint_printk)
291                 output_printk(fbuffer);
292
293         event_trigger_unlock_commit(fbuffer->trace_file, fbuffer->buffer,
294                                     fbuffer->event, fbuffer->entry,
295                                     fbuffer->flags, fbuffer->pc);
296 }
297 EXPORT_SYMBOL_GPL(trace_event_buffer_commit);
298
299 int trace_event_reg(struct trace_event_call *call,
300                     enum trace_reg type, void *data)
301 {
302         struct trace_event_file *file = data;
303
304         WARN_ON(!(call->flags & TRACE_EVENT_FL_TRACEPOINT));
305         switch (type) {
306         case TRACE_REG_REGISTER:
307                 return tracepoint_probe_register(call->tp,
308                                                  call->class->probe,
309                                                  file);
310         case TRACE_REG_UNREGISTER:
311                 tracepoint_probe_unregister(call->tp,
312                                             call->class->probe,
313                                             file);
314                 return 0;
315
316 #ifdef CONFIG_PERF_EVENTS
317         case TRACE_REG_PERF_REGISTER:
318                 return tracepoint_probe_register(call->tp,
319                                                  call->class->perf_probe,
320                                                  call);
321         case TRACE_REG_PERF_UNREGISTER:
322                 tracepoint_probe_unregister(call->tp,
323                                             call->class->perf_probe,
324                                             call);
325                 return 0;
326         case TRACE_REG_PERF_OPEN:
327         case TRACE_REG_PERF_CLOSE:
328         case TRACE_REG_PERF_ADD:
329         case TRACE_REG_PERF_DEL:
330                 return 0;
331 #endif
332         }
333         return 0;
334 }
335 EXPORT_SYMBOL_GPL(trace_event_reg);
336
337 void trace_event_enable_cmd_record(bool enable)
338 {
339         struct trace_event_file *file;
340         struct trace_array *tr;
341
342         mutex_lock(&event_mutex);
343         do_for_each_event_file(tr, file) {
344
345                 if (!(file->flags & EVENT_FILE_FL_ENABLED))
346                         continue;
347
348                 if (enable) {
349                         tracing_start_cmdline_record();
350                         set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
351                 } else {
352                         tracing_stop_cmdline_record();
353                         clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
354                 }
355         } while_for_each_event_file();
356         mutex_unlock(&event_mutex);
357 }
358
359 static int __ftrace_event_enable_disable(struct trace_event_file *file,
360                                          int enable, int soft_disable)
361 {
362         struct trace_event_call *call = file->event_call;
363         struct trace_array *tr = file->tr;
364         int ret = 0;
365         int disable;
366
367         switch (enable) {
368         case 0:
369                 /*
370                  * When soft_disable is set and enable is cleared, the sm_ref
371                  * reference counter is decremented. If it reaches 0, we want
372                  * to clear the SOFT_DISABLED flag but leave the event in the
373                  * state that it was. That is, if the event was enabled and
374                  * SOFT_DISABLED isn't set, then do nothing. But if SOFT_DISABLED
375                  * is set we do not want the event to be enabled before we
376                  * clear the bit.
377                  *
378                  * When soft_disable is not set but the SOFT_MODE flag is,
379                  * we do nothing. Do not disable the tracepoint, otherwise
380                  * "soft enable"s (clearing the SOFT_DISABLED bit) wont work.
381                  */
382                 if (soft_disable) {
383                         if (atomic_dec_return(&file->sm_ref) > 0)
384                                 break;
385                         disable = file->flags & EVENT_FILE_FL_SOFT_DISABLED;
386                         clear_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
387                 } else
388                         disable = !(file->flags & EVENT_FILE_FL_SOFT_MODE);
389
390                 if (disable && (file->flags & EVENT_FILE_FL_ENABLED)) {
391                         clear_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
392                         if (file->flags & EVENT_FILE_FL_RECORDED_CMD) {
393                                 tracing_stop_cmdline_record();
394                                 clear_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
395                         }
396                         call->class->reg(call, TRACE_REG_UNREGISTER, file);
397                 }
398                 /* If in SOFT_MODE, just set the SOFT_DISABLE_BIT, else clear it */
399                 if (file->flags & EVENT_FILE_FL_SOFT_MODE)
400                         set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
401                 else
402                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
403                 break;
404         case 1:
405                 /*
406                  * When soft_disable is set and enable is set, we want to
407                  * register the tracepoint for the event, but leave the event
408                  * as is. That means, if the event was already enabled, we do
409                  * nothing (but set SOFT_MODE). If the event is disabled, we
410                  * set SOFT_DISABLED before enabling the event tracepoint, so
411                  * it still seems to be disabled.
412                  */
413                 if (!soft_disable)
414                         clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
415                 else {
416                         if (atomic_inc_return(&file->sm_ref) > 1)
417                                 break;
418                         set_bit(EVENT_FILE_FL_SOFT_MODE_BIT, &file->flags);
419                 }
420
421                 if (!(file->flags & EVENT_FILE_FL_ENABLED)) {
422
423                         /* Keep the event disabled, when going to SOFT_MODE. */
424                         if (soft_disable)
425                                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &file->flags);
426
427                         if (tr->trace_flags & TRACE_ITER_RECORD_CMD) {
428                                 tracing_start_cmdline_record();
429                                 set_bit(EVENT_FILE_FL_RECORDED_CMD_BIT, &file->flags);
430                         }
431                         ret = call->class->reg(call, TRACE_REG_REGISTER, file);
432                         if (ret) {
433                                 tracing_stop_cmdline_record();
434                                 pr_info("event trace: Could not enable event "
435                                         "%s\n", trace_event_name(call));
436                                 break;
437                         }
438                         set_bit(EVENT_FILE_FL_ENABLED_BIT, &file->flags);
439
440                         /* WAS_ENABLED gets set but never cleared. */
441                         call->flags |= TRACE_EVENT_FL_WAS_ENABLED;
442                 }
443                 break;
444         }
445
446         return ret;
447 }
448
449 int trace_event_enable_disable(struct trace_event_file *file,
450                                int enable, int soft_disable)
451 {
452         return __ftrace_event_enable_disable(file, enable, soft_disable);
453 }
454
455 static int ftrace_event_enable_disable(struct trace_event_file *file,
456                                        int enable)
457 {
458         return __ftrace_event_enable_disable(file, enable, 0);
459 }
460
461 static void ftrace_clear_events(struct trace_array *tr)
462 {
463         struct trace_event_file *file;
464
465         mutex_lock(&event_mutex);
466         list_for_each_entry(file, &tr->events, list) {
467                 ftrace_event_enable_disable(file, 0);
468         }
469         mutex_unlock(&event_mutex);
470 }
471
472 static int cmp_pid(const void *key, const void *elt)
473 {
474         const pid_t *search_pid = key;
475         const pid_t *pid = elt;
476
477         if (*search_pid == *pid)
478                 return 0;
479         if (*search_pid < *pid)
480                 return -1;
481         return 1;
482 }
483
484 static bool
485 check_ignore_pid(struct trace_pid_list *filtered_pids, struct task_struct *task)
486 {
487         pid_t search_pid;
488         pid_t *pid;
489
490         /*
491          * Return false, because if filtered_pids does not exist,
492          * all pids are good to trace.
493          */
494         if (!filtered_pids)
495                 return false;
496
497         search_pid = task->pid;
498
499         pid = bsearch(&search_pid, filtered_pids->pids,
500                       filtered_pids->nr_pids, sizeof(pid_t),
501                       cmp_pid);
502         if (!pid)
503                 return true;
504
505         return false;
506 }
507
508 static void
509 event_filter_pid_sched_switch_probe_pre(void *data, bool preempt,
510                     struct task_struct *prev, struct task_struct *next)
511 {
512         struct trace_array *tr = data;
513         struct trace_pid_list *pid_list;
514
515         pid_list = rcu_dereference_sched(tr->filtered_pids);
516
517         this_cpu_write(tr->trace_buffer.data->ignore_pid,
518                        check_ignore_pid(pid_list, prev) &&
519                        check_ignore_pid(pid_list, next));
520 }
521
522 static void
523 event_filter_pid_sched_switch_probe_post(void *data, bool preempt,
524                     struct task_struct *prev, struct task_struct *next)
525 {
526         struct trace_array *tr = data;
527         struct trace_pid_list *pid_list;
528
529         pid_list = rcu_dereference_sched(tr->filtered_pids);
530
531         this_cpu_write(tr->trace_buffer.data->ignore_pid,
532                        check_ignore_pid(pid_list, next));
533 }
534
535 static void
536 event_filter_pid_sched_wakeup_probe_pre(void *data, struct task_struct *task)
537 {
538         struct trace_array *tr = data;
539         struct trace_pid_list *pid_list;
540
541         /* Nothing to do if we are already tracing */
542         if (!this_cpu_read(tr->trace_buffer.data->ignore_pid))
543                 return;
544
545         pid_list = rcu_dereference_sched(tr->filtered_pids);
546
547         this_cpu_write(tr->trace_buffer.data->ignore_pid,
548                        check_ignore_pid(pid_list, task));
549 }
550
551 static void
552 event_filter_pid_sched_wakeup_probe_post(void *data, struct task_struct *task)
553 {
554         struct trace_array *tr = data;
555         struct trace_pid_list *pid_list;
556
557         /* Nothing to do if we are not tracing */
558         if (this_cpu_read(tr->trace_buffer.data->ignore_pid))
559                 return;
560
561         pid_list = rcu_dereference_sched(tr->filtered_pids);
562
563         /* Set tracing if current is enabled */
564         this_cpu_write(tr->trace_buffer.data->ignore_pid,
565                        check_ignore_pid(pid_list, current));
566 }
567
568 static void __ftrace_clear_event_pids(struct trace_array *tr)
569 {
570         struct trace_pid_list *pid_list;
571         struct trace_event_file *file;
572         int cpu;
573
574         pid_list = rcu_dereference_protected(tr->filtered_pids,
575                                              lockdep_is_held(&event_mutex));
576         if (!pid_list)
577                 return;
578
579         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_pre, tr);
580         unregister_trace_sched_switch(event_filter_pid_sched_switch_probe_post, tr);
581
582         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre, tr);
583         unregister_trace_sched_wakeup(event_filter_pid_sched_wakeup_probe_post, tr);
584
585         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre, tr);
586         unregister_trace_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post, tr);
587
588         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_pre, tr);
589         unregister_trace_sched_waking(event_filter_pid_sched_wakeup_probe_post, tr);
590
591         list_for_each_entry(file, &tr->events, list) {
592                 clear_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
593         }
594
595         for_each_possible_cpu(cpu)
596                 per_cpu_ptr(tr->trace_buffer.data, cpu)->ignore_pid = false;
597
598         rcu_assign_pointer(tr->filtered_pids, NULL);
599
600         /* Wait till all users are no longer using pid filtering */
601         synchronize_sched();
602
603         free_pages((unsigned long)pid_list->pids, pid_list->order);
604         kfree(pid_list);
605 }
606
607 static void ftrace_clear_event_pids(struct trace_array *tr)
608 {
609         mutex_lock(&event_mutex);
610         __ftrace_clear_event_pids(tr);
611         mutex_unlock(&event_mutex);
612 }
613
614 static void __put_system(struct event_subsystem *system)
615 {
616         struct event_filter *filter = system->filter;
617
618         WARN_ON_ONCE(system_refcount(system) == 0);
619         if (system_refcount_dec(system))
620                 return;
621
622         list_del(&system->list);
623
624         if (filter) {
625                 kfree(filter->filter_string);
626                 kfree(filter);
627         }
628         kfree_const(system->name);
629         kfree(system);
630 }
631
632 static void __get_system(struct event_subsystem *system)
633 {
634         WARN_ON_ONCE(system_refcount(system) == 0);
635         system_refcount_inc(system);
636 }
637
638 static void __get_system_dir(struct trace_subsystem_dir *dir)
639 {
640         WARN_ON_ONCE(dir->ref_count == 0);
641         dir->ref_count++;
642         __get_system(dir->subsystem);
643 }
644
645 static void __put_system_dir(struct trace_subsystem_dir *dir)
646 {
647         WARN_ON_ONCE(dir->ref_count == 0);
648         /* If the subsystem is about to be freed, the dir must be too */
649         WARN_ON_ONCE(system_refcount(dir->subsystem) == 1 && dir->ref_count != 1);
650
651         __put_system(dir->subsystem);
652         if (!--dir->ref_count)
653                 kfree(dir);
654 }
655
656 static void put_system(struct trace_subsystem_dir *dir)
657 {
658         mutex_lock(&event_mutex);
659         __put_system_dir(dir);
660         mutex_unlock(&event_mutex);
661 }
662
663 static void remove_subsystem(struct trace_subsystem_dir *dir)
664 {
665         if (!dir)
666                 return;
667
668         if (!--dir->nr_events) {
669                 tracefs_remove_recursive(dir->entry);
670                 list_del(&dir->list);
671                 __put_system_dir(dir);
672         }
673 }
674
675 static void remove_event_file_dir(struct trace_event_file *file)
676 {
677         struct dentry *dir = file->dir;
678         struct dentry *child;
679
680         if (dir) {
681                 spin_lock(&dir->d_lock);        /* probably unneeded */
682                 list_for_each_entry(child, &dir->d_subdirs, d_child) {
683                         if (d_really_is_positive(child))        /* probably unneeded */
684                                 d_inode(child)->i_private = NULL;
685                 }
686                 spin_unlock(&dir->d_lock);
687
688                 tracefs_remove_recursive(dir);
689         }
690
691         list_del(&file->list);
692         remove_subsystem(file->system);
693         free_event_filter(file->filter);
694         kmem_cache_free(file_cachep, file);
695 }
696
697 /*
698  * __ftrace_set_clr_event(NULL, NULL, NULL, set) will set/unset all events.
699  */
700 static int
701 __ftrace_set_clr_event_nolock(struct trace_array *tr, const char *match,
702                               const char *sub, const char *event, int set)
703 {
704         struct trace_event_file *file;
705         struct trace_event_call *call;
706         const char *name;
707         int ret = -EINVAL;
708
709         list_for_each_entry(file, &tr->events, list) {
710
711                 call = file->event_call;
712                 name = trace_event_name(call);
713
714                 if (!name || !call->class || !call->class->reg)
715                         continue;
716
717                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
718                         continue;
719
720                 if (match &&
721                     strcmp(match, name) != 0 &&
722                     strcmp(match, call->class->system) != 0)
723                         continue;
724
725                 if (sub && strcmp(sub, call->class->system) != 0)
726                         continue;
727
728                 if (event && strcmp(event, name) != 0)
729                         continue;
730
731                 ftrace_event_enable_disable(file, set);
732
733                 ret = 0;
734         }
735
736         return ret;
737 }
738
739 static int __ftrace_set_clr_event(struct trace_array *tr, const char *match,
740                                   const char *sub, const char *event, int set)
741 {
742         int ret;
743
744         mutex_lock(&event_mutex);
745         ret = __ftrace_set_clr_event_nolock(tr, match, sub, event, set);
746         mutex_unlock(&event_mutex);
747
748         return ret;
749 }
750
751 static int ftrace_set_clr_event(struct trace_array *tr, char *buf, int set)
752 {
753         char *event = NULL, *sub = NULL, *match;
754         int ret;
755
756         /*
757          * The buf format can be <subsystem>:<event-name>
758          *  *:<event-name> means any event by that name.
759          *  :<event-name> is the same.
760          *
761          *  <subsystem>:* means all events in that subsystem
762          *  <subsystem>: means the same.
763          *
764          *  <name> (no ':') means all events in a subsystem with
765          *  the name <name> or any event that matches <name>
766          */
767
768         match = strsep(&buf, ":");
769         if (buf) {
770                 sub = match;
771                 event = buf;
772                 match = NULL;
773
774                 if (!strlen(sub) || strcmp(sub, "*") == 0)
775                         sub = NULL;
776                 if (!strlen(event) || strcmp(event, "*") == 0)
777                         event = NULL;
778         }
779
780         ret = __ftrace_set_clr_event(tr, match, sub, event, set);
781
782         /* Put back the colon to allow this to be called again */
783         if (buf)
784                 *(buf - 1) = ':';
785
786         return ret;
787 }
788
789 /**
790  * trace_set_clr_event - enable or disable an event
791  * @system: system name to match (NULL for any system)
792  * @event: event name to match (NULL for all events, within system)
793  * @set: 1 to enable, 0 to disable
794  *
795  * This is a way for other parts of the kernel to enable or disable
796  * event recording.
797  *
798  * Returns 0 on success, -EINVAL if the parameters do not match any
799  * registered events.
800  */
801 int trace_set_clr_event(const char *system, const char *event, int set)
802 {
803         struct trace_array *tr = top_trace_array();
804
805         if (!tr)
806                 return -ENODEV;
807
808         return __ftrace_set_clr_event(tr, NULL, system, event, set);
809 }
810 EXPORT_SYMBOL_GPL(trace_set_clr_event);
811
812 /* 128 should be much more than enough */
813 #define EVENT_BUF_SIZE          127
814
815 static ssize_t
816 ftrace_event_write(struct file *file, const char __user *ubuf,
817                    size_t cnt, loff_t *ppos)
818 {
819         struct trace_parser parser;
820         struct seq_file *m = file->private_data;
821         struct trace_array *tr = m->private;
822         ssize_t read, ret;
823
824         if (!cnt)
825                 return 0;
826
827         ret = tracing_update_buffers();
828         if (ret < 0)
829                 return ret;
830
831         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
832                 return -ENOMEM;
833
834         read = trace_get_user(&parser, ubuf, cnt, ppos);
835
836         if (read >= 0 && trace_parser_loaded((&parser))) {
837                 int set = 1;
838
839                 if (*parser.buffer == '!')
840                         set = 0;
841
842                 parser.buffer[parser.idx] = 0;
843
844                 ret = ftrace_set_clr_event(tr, parser.buffer + !set, set);
845                 if (ret)
846                         goto out_put;
847         }
848
849         ret = read;
850
851  out_put:
852         trace_parser_put(&parser);
853
854         return ret;
855 }
856
857 static void *
858 t_next(struct seq_file *m, void *v, loff_t *pos)
859 {
860         struct trace_event_file *file = v;
861         struct trace_event_call *call;
862         struct trace_array *tr = m->private;
863
864         (*pos)++;
865
866         list_for_each_entry_continue(file, &tr->events, list) {
867                 call = file->event_call;
868                 /*
869                  * The ftrace subsystem is for showing formats only.
870                  * They can not be enabled or disabled via the event files.
871                  */
872                 if (call->class && call->class->reg &&
873                     !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
874                         return file;
875         }
876
877         return NULL;
878 }
879
880 static void *t_start(struct seq_file *m, loff_t *pos)
881 {
882         struct trace_event_file *file;
883         struct trace_array *tr = m->private;
884         loff_t l;
885
886         mutex_lock(&event_mutex);
887
888         file = list_entry(&tr->events, struct trace_event_file, list);
889         for (l = 0; l <= *pos; ) {
890                 file = t_next(m, file, &l);
891                 if (!file)
892                         break;
893         }
894         return file;
895 }
896
897 static void *
898 s_next(struct seq_file *m, void *v, loff_t *pos)
899 {
900         struct trace_event_file *file = v;
901         struct trace_array *tr = m->private;
902
903         (*pos)++;
904
905         list_for_each_entry_continue(file, &tr->events, list) {
906                 if (file->flags & EVENT_FILE_FL_ENABLED)
907                         return file;
908         }
909
910         return NULL;
911 }
912
913 static void *s_start(struct seq_file *m, loff_t *pos)
914 {
915         struct trace_event_file *file;
916         struct trace_array *tr = m->private;
917         loff_t l;
918
919         mutex_lock(&event_mutex);
920
921         file = list_entry(&tr->events, struct trace_event_file, list);
922         for (l = 0; l <= *pos; ) {
923                 file = s_next(m, file, &l);
924                 if (!file)
925                         break;
926         }
927         return file;
928 }
929
930 static int t_show(struct seq_file *m, void *v)
931 {
932         struct trace_event_file *file = v;
933         struct trace_event_call *call = file->event_call;
934
935         if (strcmp(call->class->system, TRACE_SYSTEM) != 0)
936                 seq_printf(m, "%s:", call->class->system);
937         seq_printf(m, "%s\n", trace_event_name(call));
938
939         return 0;
940 }
941
942 static void t_stop(struct seq_file *m, void *p)
943 {
944         mutex_unlock(&event_mutex);
945 }
946
947 static void *p_start(struct seq_file *m, loff_t *pos)
948         __acquires(RCU)
949 {
950         struct trace_pid_list *pid_list;
951         struct trace_array *tr = m->private;
952
953         /*
954          * Grab the mutex, to keep calls to p_next() having the same
955          * tr->filtered_pids as p_start() has.
956          * If we just passed the tr->filtered_pids around, then RCU would
957          * have been enough, but doing that makes things more complex.
958          */
959         mutex_lock(&event_mutex);
960         rcu_read_lock_sched();
961
962         pid_list = rcu_dereference_sched(tr->filtered_pids);
963
964         if (!pid_list || *pos >= pid_list->nr_pids)
965                 return NULL;
966
967         return (void *)&pid_list->pids[*pos];
968 }
969
970 static void p_stop(struct seq_file *m, void *p)
971         __releases(RCU)
972 {
973         rcu_read_unlock_sched();
974         mutex_unlock(&event_mutex);
975 }
976
977 static void *
978 p_next(struct seq_file *m, void *v, loff_t *pos)
979 {
980         struct trace_array *tr = m->private;
981         struct trace_pid_list *pid_list = rcu_dereference_sched(tr->filtered_pids);
982
983         (*pos)++;
984
985         if (*pos >= pid_list->nr_pids)
986                 return NULL;
987
988         return (void *)&pid_list->pids[*pos];
989 }
990
991 static int p_show(struct seq_file *m, void *v)
992 {
993         pid_t *pid = v;
994
995         seq_printf(m, "%d\n", *pid);
996         return 0;
997 }
998
999 static ssize_t
1000 event_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1001                   loff_t *ppos)
1002 {
1003         struct trace_event_file *file;
1004         unsigned long flags;
1005         char buf[4] = "0";
1006
1007         mutex_lock(&event_mutex);
1008         file = event_file_data(filp);
1009         if (likely(file))
1010                 flags = file->flags;
1011         mutex_unlock(&event_mutex);
1012
1013         if (!file)
1014                 return -ENODEV;
1015
1016         if (flags & EVENT_FILE_FL_ENABLED &&
1017             !(flags & EVENT_FILE_FL_SOFT_DISABLED))
1018                 strcpy(buf, "1");
1019
1020         if (flags & EVENT_FILE_FL_SOFT_DISABLED ||
1021             flags & EVENT_FILE_FL_SOFT_MODE)
1022                 strcat(buf, "*");
1023
1024         strcat(buf, "\n");
1025
1026         return simple_read_from_buffer(ubuf, cnt, ppos, buf, strlen(buf));
1027 }
1028
1029 static ssize_t
1030 event_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1031                    loff_t *ppos)
1032 {
1033         struct trace_event_file *file;
1034         unsigned long val;
1035         int ret;
1036
1037         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1038         if (ret)
1039                 return ret;
1040
1041         ret = tracing_update_buffers();
1042         if (ret < 0)
1043                 return ret;
1044
1045         switch (val) {
1046         case 0:
1047         case 1:
1048                 ret = -ENODEV;
1049                 mutex_lock(&event_mutex);
1050                 file = event_file_data(filp);
1051                 if (likely(file))
1052                         ret = ftrace_event_enable_disable(file, val);
1053                 mutex_unlock(&event_mutex);
1054                 break;
1055
1056         default:
1057                 return -EINVAL;
1058         }
1059
1060         *ppos += cnt;
1061
1062         return ret ? ret : cnt;
1063 }
1064
1065 static ssize_t
1066 system_enable_read(struct file *filp, char __user *ubuf, size_t cnt,
1067                    loff_t *ppos)
1068 {
1069         const char set_to_char[4] = { '?', '0', '1', 'X' };
1070         struct trace_subsystem_dir *dir = filp->private_data;
1071         struct event_subsystem *system = dir->subsystem;
1072         struct trace_event_call *call;
1073         struct trace_event_file *file;
1074         struct trace_array *tr = dir->tr;
1075         char buf[2];
1076         int set = 0;
1077         int ret;
1078
1079         mutex_lock(&event_mutex);
1080         list_for_each_entry(file, &tr->events, list) {
1081                 call = file->event_call;
1082                 if (!trace_event_name(call) || !call->class || !call->class->reg)
1083                         continue;
1084
1085                 if (system && strcmp(call->class->system, system->name) != 0)
1086                         continue;
1087
1088                 /*
1089                  * We need to find out if all the events are set
1090                  * or if all events or cleared, or if we have
1091                  * a mixture.
1092                  */
1093                 set |= (1 << !!(file->flags & EVENT_FILE_FL_ENABLED));
1094
1095                 /*
1096                  * If we have a mixture, no need to look further.
1097                  */
1098                 if (set == 3)
1099                         break;
1100         }
1101         mutex_unlock(&event_mutex);
1102
1103         buf[0] = set_to_char[set];
1104         buf[1] = '\n';
1105
1106         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
1107
1108         return ret;
1109 }
1110
1111 static ssize_t
1112 system_enable_write(struct file *filp, const char __user *ubuf, size_t cnt,
1113                     loff_t *ppos)
1114 {
1115         struct trace_subsystem_dir *dir = filp->private_data;
1116         struct event_subsystem *system = dir->subsystem;
1117         const char *name = NULL;
1118         unsigned long val;
1119         ssize_t ret;
1120
1121         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1122         if (ret)
1123                 return ret;
1124
1125         ret = tracing_update_buffers();
1126         if (ret < 0)
1127                 return ret;
1128
1129         if (val != 0 && val != 1)
1130                 return -EINVAL;
1131
1132         /*
1133          * Opening of "enable" adds a ref count to system,
1134          * so the name is safe to use.
1135          */
1136         if (system)
1137                 name = system->name;
1138
1139         ret = __ftrace_set_clr_event(dir->tr, NULL, name, NULL, val);
1140         if (ret)
1141                 goto out;
1142
1143         ret = cnt;
1144
1145 out:
1146         *ppos += cnt;
1147
1148         return ret;
1149 }
1150
1151 enum {
1152         FORMAT_HEADER           = 1,
1153         FORMAT_FIELD_SEPERATOR  = 2,
1154         FORMAT_PRINTFMT         = 3,
1155 };
1156
1157 static void *f_next(struct seq_file *m, void *v, loff_t *pos)
1158 {
1159         struct trace_event_call *call = event_file_data(m->private);
1160         struct list_head *common_head = &ftrace_common_fields;
1161         struct list_head *head = trace_get_fields(call);
1162         struct list_head *node = v;
1163
1164         (*pos)++;
1165
1166         switch ((unsigned long)v) {
1167         case FORMAT_HEADER:
1168                 node = common_head;
1169                 break;
1170
1171         case FORMAT_FIELD_SEPERATOR:
1172                 node = head;
1173                 break;
1174
1175         case FORMAT_PRINTFMT:
1176                 /* all done */
1177                 return NULL;
1178         }
1179
1180         node = node->prev;
1181         if (node == common_head)
1182                 return (void *)FORMAT_FIELD_SEPERATOR;
1183         else if (node == head)
1184                 return (void *)FORMAT_PRINTFMT;
1185         else
1186                 return node;
1187 }
1188
1189 static int f_show(struct seq_file *m, void *v)
1190 {
1191         struct trace_event_call *call = event_file_data(m->private);
1192         struct ftrace_event_field *field;
1193         const char *array_descriptor;
1194
1195         switch ((unsigned long)v) {
1196         case FORMAT_HEADER:
1197                 seq_printf(m, "name: %s\n", trace_event_name(call));
1198                 seq_printf(m, "ID: %d\n", call->event.type);
1199                 seq_puts(m, "format:\n");
1200                 return 0;
1201
1202         case FORMAT_FIELD_SEPERATOR:
1203                 seq_putc(m, '\n');
1204                 return 0;
1205
1206         case FORMAT_PRINTFMT:
1207                 seq_printf(m, "\nprint fmt: %s\n",
1208                            call->print_fmt);
1209                 return 0;
1210         }
1211
1212         field = list_entry(v, struct ftrace_event_field, link);
1213         /*
1214          * Smartly shows the array type(except dynamic array).
1215          * Normal:
1216          *      field:TYPE VAR
1217          * If TYPE := TYPE[LEN], it is shown:
1218          *      field:TYPE VAR[LEN]
1219          */
1220         array_descriptor = strchr(field->type, '[');
1221
1222         if (!strncmp(field->type, "__data_loc", 10))
1223                 array_descriptor = NULL;
1224
1225         if (!array_descriptor)
1226                 seq_printf(m, "\tfield:%s %s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1227                            field->type, field->name, field->offset,
1228                            field->size, !!field->is_signed);
1229         else
1230                 seq_printf(m, "\tfield:%.*s %s%s;\toffset:%u;\tsize:%u;\tsigned:%d;\n",
1231                            (int)(array_descriptor - field->type),
1232                            field->type, field->name,
1233                            array_descriptor, field->offset,
1234                            field->size, !!field->is_signed);
1235
1236         return 0;
1237 }
1238
1239 static void *f_start(struct seq_file *m, loff_t *pos)
1240 {
1241         void *p = (void *)FORMAT_HEADER;
1242         loff_t l = 0;
1243
1244         /* ->stop() is called even if ->start() fails */
1245         mutex_lock(&event_mutex);
1246         if (!event_file_data(m->private))
1247                 return ERR_PTR(-ENODEV);
1248
1249         while (l < *pos && p)
1250                 p = f_next(m, p, &l);
1251
1252         return p;
1253 }
1254
1255 static void f_stop(struct seq_file *m, void *p)
1256 {
1257         mutex_unlock(&event_mutex);
1258 }
1259
1260 static const struct seq_operations trace_format_seq_ops = {
1261         .start          = f_start,
1262         .next           = f_next,
1263         .stop           = f_stop,
1264         .show           = f_show,
1265 };
1266
1267 static int trace_format_open(struct inode *inode, struct file *file)
1268 {
1269         struct seq_file *m;
1270         int ret;
1271
1272         ret = seq_open(file, &trace_format_seq_ops);
1273         if (ret < 0)
1274                 return ret;
1275
1276         m = file->private_data;
1277         m->private = file;
1278
1279         return 0;
1280 }
1281
1282 static ssize_t
1283 event_id_read(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1284 {
1285         int id = (long)event_file_data(filp);
1286         char buf[32];
1287         int len;
1288
1289         if (*ppos)
1290                 return 0;
1291
1292         if (unlikely(!id))
1293                 return -ENODEV;
1294
1295         len = sprintf(buf, "%d\n", id);
1296
1297         return simple_read_from_buffer(ubuf, cnt, ppos, buf, len);
1298 }
1299
1300 static ssize_t
1301 event_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1302                   loff_t *ppos)
1303 {
1304         struct trace_event_file *file;
1305         struct trace_seq *s;
1306         int r = -ENODEV;
1307
1308         if (*ppos)
1309                 return 0;
1310
1311         s = kmalloc(sizeof(*s), GFP_KERNEL);
1312
1313         if (!s)
1314                 return -ENOMEM;
1315
1316         trace_seq_init(s);
1317
1318         mutex_lock(&event_mutex);
1319         file = event_file_data(filp);
1320         if (file)
1321                 print_event_filter(file, s);
1322         mutex_unlock(&event_mutex);
1323
1324         if (file)
1325                 r = simple_read_from_buffer(ubuf, cnt, ppos,
1326                                             s->buffer, trace_seq_used(s));
1327
1328         kfree(s);
1329
1330         return r;
1331 }
1332
1333 static ssize_t
1334 event_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1335                    loff_t *ppos)
1336 {
1337         struct trace_event_file *file;
1338         char *buf;
1339         int err = -ENODEV;
1340
1341         if (cnt >= PAGE_SIZE)
1342                 return -EINVAL;
1343
1344         buf = memdup_user_nul(ubuf, cnt);
1345         if (IS_ERR(buf))
1346                 return PTR_ERR(buf);
1347
1348         mutex_lock(&event_mutex);
1349         file = event_file_data(filp);
1350         if (file)
1351                 err = apply_event_filter(file, buf);
1352         mutex_unlock(&event_mutex);
1353
1354         kfree(buf);
1355         if (err < 0)
1356                 return err;
1357
1358         *ppos += cnt;
1359
1360         return cnt;
1361 }
1362
1363 static LIST_HEAD(event_subsystems);
1364
1365 static int subsystem_open(struct inode *inode, struct file *filp)
1366 {
1367         struct event_subsystem *system = NULL;
1368         struct trace_subsystem_dir *dir = NULL; /* Initialize for gcc */
1369         struct trace_array *tr;
1370         int ret;
1371
1372         if (tracing_is_disabled())
1373                 return -ENODEV;
1374
1375         /* Make sure the system still exists */
1376         mutex_lock(&trace_types_lock);
1377         mutex_lock(&event_mutex);
1378         list_for_each_entry(tr, &ftrace_trace_arrays, list) {
1379                 list_for_each_entry(dir, &tr->systems, list) {
1380                         if (dir == inode->i_private) {
1381                                 /* Don't open systems with no events */
1382                                 if (dir->nr_events) {
1383                                         __get_system_dir(dir);
1384                                         system = dir->subsystem;
1385                                 }
1386                                 goto exit_loop;
1387                         }
1388                 }
1389         }
1390  exit_loop:
1391         mutex_unlock(&event_mutex);
1392         mutex_unlock(&trace_types_lock);
1393
1394         if (!system)
1395                 return -ENODEV;
1396
1397         /* Some versions of gcc think dir can be uninitialized here */
1398         WARN_ON(!dir);
1399
1400         /* Still need to increment the ref count of the system */
1401         if (trace_array_get(tr) < 0) {
1402                 put_system(dir);
1403                 return -ENODEV;
1404         }
1405
1406         ret = tracing_open_generic(inode, filp);
1407         if (ret < 0) {
1408                 trace_array_put(tr);
1409                 put_system(dir);
1410         }
1411
1412         return ret;
1413 }
1414
1415 static int system_tr_open(struct inode *inode, struct file *filp)
1416 {
1417         struct trace_subsystem_dir *dir;
1418         struct trace_array *tr = inode->i_private;
1419         int ret;
1420
1421         if (tracing_is_disabled())
1422                 return -ENODEV;
1423
1424         if (trace_array_get(tr) < 0)
1425                 return -ENODEV;
1426
1427         /* Make a temporary dir that has no system but points to tr */
1428         dir = kzalloc(sizeof(*dir), GFP_KERNEL);
1429         if (!dir) {
1430                 trace_array_put(tr);
1431                 return -ENOMEM;
1432         }
1433
1434         dir->tr = tr;
1435
1436         ret = tracing_open_generic(inode, filp);
1437         if (ret < 0) {
1438                 trace_array_put(tr);
1439                 kfree(dir);
1440                 return ret;
1441         }
1442
1443         filp->private_data = dir;
1444
1445         return 0;
1446 }
1447
1448 static int subsystem_release(struct inode *inode, struct file *file)
1449 {
1450         struct trace_subsystem_dir *dir = file->private_data;
1451
1452         trace_array_put(dir->tr);
1453
1454         /*
1455          * If dir->subsystem is NULL, then this is a temporary
1456          * descriptor that was made for a trace_array to enable
1457          * all subsystems.
1458          */
1459         if (dir->subsystem)
1460                 put_system(dir);
1461         else
1462                 kfree(dir);
1463
1464         return 0;
1465 }
1466
1467 static ssize_t
1468 subsystem_filter_read(struct file *filp, char __user *ubuf, size_t cnt,
1469                       loff_t *ppos)
1470 {
1471         struct trace_subsystem_dir *dir = filp->private_data;
1472         struct event_subsystem *system = dir->subsystem;
1473         struct trace_seq *s;
1474         int r;
1475
1476         if (*ppos)
1477                 return 0;
1478
1479         s = kmalloc(sizeof(*s), GFP_KERNEL);
1480         if (!s)
1481                 return -ENOMEM;
1482
1483         trace_seq_init(s);
1484
1485         print_subsystem_event_filter(system, s);
1486         r = simple_read_from_buffer(ubuf, cnt, ppos,
1487                                     s->buffer, trace_seq_used(s));
1488
1489         kfree(s);
1490
1491         return r;
1492 }
1493
1494 static ssize_t
1495 subsystem_filter_write(struct file *filp, const char __user *ubuf, size_t cnt,
1496                        loff_t *ppos)
1497 {
1498         struct trace_subsystem_dir *dir = filp->private_data;
1499         char *buf;
1500         int err;
1501
1502         if (cnt >= PAGE_SIZE)
1503                 return -EINVAL;
1504
1505         buf = memdup_user_nul(ubuf, cnt);
1506         if (IS_ERR(buf))
1507                 return PTR_ERR(buf);
1508
1509         err = apply_subsystem_event_filter(dir, buf);
1510         kfree(buf);
1511         if (err < 0)
1512                 return err;
1513
1514         *ppos += cnt;
1515
1516         return cnt;
1517 }
1518
1519 static ssize_t
1520 show_header(struct file *filp, char __user *ubuf, size_t cnt, loff_t *ppos)
1521 {
1522         int (*func)(struct trace_seq *s) = filp->private_data;
1523         struct trace_seq *s;
1524         int r;
1525
1526         if (*ppos)
1527                 return 0;
1528
1529         s = kmalloc(sizeof(*s), GFP_KERNEL);
1530         if (!s)
1531                 return -ENOMEM;
1532
1533         trace_seq_init(s);
1534
1535         func(s);
1536         r = simple_read_from_buffer(ubuf, cnt, ppos,
1537                                     s->buffer, trace_seq_used(s));
1538
1539         kfree(s);
1540
1541         return r;
1542 }
1543
1544 static int max_pids(struct trace_pid_list *pid_list)
1545 {
1546         return (PAGE_SIZE << pid_list->order) / sizeof(pid_t);
1547 }
1548
1549 static void ignore_task_cpu(void *data)
1550 {
1551         struct trace_array *tr = data;
1552         struct trace_pid_list *pid_list;
1553
1554         /*
1555          * This function is called by on_each_cpu() while the
1556          * event_mutex is held.
1557          */
1558         pid_list = rcu_dereference_protected(tr->filtered_pids,
1559                                              mutex_is_locked(&event_mutex));
1560
1561         this_cpu_write(tr->trace_buffer.data->ignore_pid,
1562                        check_ignore_pid(pid_list, current));
1563 }
1564
1565 static ssize_t
1566 ftrace_event_pid_write(struct file *filp, const char __user *ubuf,
1567                        size_t cnt, loff_t *ppos)
1568 {
1569         struct seq_file *m = filp->private_data;
1570         struct trace_array *tr = m->private;
1571         struct trace_pid_list *filtered_pids = NULL;
1572         struct trace_pid_list *pid_list = NULL;
1573         struct trace_event_file *file;
1574         struct trace_parser parser;
1575         unsigned long val;
1576         loff_t this_pos;
1577         ssize_t read = 0;
1578         ssize_t ret = 0;
1579         pid_t pid;
1580         int i;
1581
1582         if (!cnt)
1583                 return 0;
1584
1585         ret = tracing_update_buffers();
1586         if (ret < 0)
1587                 return ret;
1588
1589         if (trace_parser_get_init(&parser, EVENT_BUF_SIZE + 1))
1590                 return -ENOMEM;
1591
1592         mutex_lock(&event_mutex);
1593         /*
1594          * Load as many pids into the array before doing a
1595          * swap from the tr->filtered_pids to the new list.
1596          */
1597         while (cnt > 0) {
1598
1599                 this_pos = 0;
1600
1601                 ret = trace_get_user(&parser, ubuf, cnt, &this_pos);
1602                 if (ret < 0 || !trace_parser_loaded(&parser))
1603                         break;
1604
1605                 read += ret;
1606                 ubuf += ret;
1607                 cnt -= ret;
1608
1609                 parser.buffer[parser.idx] = 0;
1610
1611                 ret = -EINVAL;
1612                 if (kstrtoul(parser.buffer, 0, &val))
1613                         break;
1614                 if (val > INT_MAX)
1615                         break;
1616
1617                 pid = (pid_t)val;
1618
1619                 ret = -ENOMEM;
1620                 if (!pid_list) {
1621                         pid_list = kmalloc(sizeof(*pid_list), GFP_KERNEL);
1622                         if (!pid_list)
1623                                 break;
1624
1625                         filtered_pids = rcu_dereference_protected(tr->filtered_pids,
1626                                                         lockdep_is_held(&event_mutex));
1627                         if (filtered_pids)
1628                                 pid_list->order = filtered_pids->order;
1629                         else
1630                                 pid_list->order = 0;
1631
1632                         pid_list->pids = (void *)__get_free_pages(GFP_KERNEL,
1633                                                                   pid_list->order);
1634                         if (!pid_list->pids)
1635                                 break;
1636
1637                         if (filtered_pids) {
1638                                 pid_list->nr_pids = filtered_pids->nr_pids;
1639                                 memcpy(pid_list->pids, filtered_pids->pids,
1640                                        pid_list->nr_pids * sizeof(pid_t));
1641                         } else
1642                                 pid_list->nr_pids = 0;
1643                 }
1644
1645                 if (pid_list->nr_pids >= max_pids(pid_list)) {
1646                         pid_t *pid_page;
1647
1648                         pid_page = (void *)__get_free_pages(GFP_KERNEL,
1649                                                             pid_list->order + 1);
1650                         if (!pid_page)
1651                                 break;
1652                         memcpy(pid_page, pid_list->pids,
1653                                pid_list->nr_pids * sizeof(pid_t));
1654                         free_pages((unsigned long)pid_list->pids, pid_list->order);
1655
1656                         pid_list->order++;
1657                         pid_list->pids = pid_page;
1658                 }
1659
1660                 pid_list->pids[pid_list->nr_pids++] = pid;
1661                 trace_parser_clear(&parser);
1662                 ret = 0;
1663         }
1664         trace_parser_put(&parser);
1665
1666         if (ret < 0) {
1667                 if (pid_list)
1668                         free_pages((unsigned long)pid_list->pids, pid_list->order);
1669                 kfree(pid_list);
1670                 mutex_unlock(&event_mutex);
1671                 return ret;
1672         }
1673
1674         if (!pid_list) {
1675                 mutex_unlock(&event_mutex);
1676                 return ret;
1677         }
1678
1679         sort(pid_list->pids, pid_list->nr_pids, sizeof(pid_t), cmp_pid, NULL);
1680
1681         /* Remove duplicates */
1682         for (i = 1; i < pid_list->nr_pids; i++) {
1683                 int start = i;
1684
1685                 while (i < pid_list->nr_pids &&
1686                        pid_list->pids[i - 1] == pid_list->pids[i])
1687                         i++;
1688
1689                 if (start != i) {
1690                         if (i < pid_list->nr_pids) {
1691                                 memmove(&pid_list->pids[start], &pid_list->pids[i],
1692                                         (pid_list->nr_pids - i) * sizeof(pid_t));
1693                                 pid_list->nr_pids -= i - start;
1694                                 i = start;
1695                         } else
1696                                 pid_list->nr_pids = start;
1697                 }
1698         }
1699
1700         rcu_assign_pointer(tr->filtered_pids, pid_list);
1701
1702         list_for_each_entry(file, &tr->events, list) {
1703                 set_bit(EVENT_FILE_FL_PID_FILTER_BIT, &file->flags);
1704         }
1705
1706         if (filtered_pids) {
1707                 synchronize_sched();
1708
1709                 free_pages((unsigned long)filtered_pids->pids, filtered_pids->order);
1710                 kfree(filtered_pids);
1711         } else {
1712                 /*
1713                  * Register a probe that is called before all other probes
1714                  * to set ignore_pid if next or prev do not match.
1715                  * Register a probe this is called after all other probes
1716                  * to only keep ignore_pid set if next pid matches.
1717                  */
1718                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_pre,
1719                                                  tr, INT_MAX);
1720                 register_trace_prio_sched_switch(event_filter_pid_sched_switch_probe_post,
1721                                                  tr, 0);
1722
1723                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_pre,
1724                                                  tr, INT_MAX);
1725                 register_trace_prio_sched_wakeup(event_filter_pid_sched_wakeup_probe_post,
1726                                                  tr, 0);
1727
1728                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_pre,
1729                                                      tr, INT_MAX);
1730                 register_trace_prio_sched_wakeup_new(event_filter_pid_sched_wakeup_probe_post,
1731                                                      tr, 0);
1732
1733                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_pre,
1734                                                  tr, INT_MAX);
1735                 register_trace_prio_sched_waking(event_filter_pid_sched_wakeup_probe_post,
1736                                                  tr, 0);
1737         }
1738
1739         /*
1740          * Ignoring of pids is done at task switch. But we have to
1741          * check for those tasks that are currently running.
1742          * Always do this in case a pid was appended or removed.
1743          */
1744         on_each_cpu(ignore_task_cpu, tr, 1);
1745
1746         mutex_unlock(&event_mutex);
1747
1748         ret = read;
1749         *ppos += read;
1750
1751         return ret;
1752 }
1753
1754 static int ftrace_event_avail_open(struct inode *inode, struct file *file);
1755 static int ftrace_event_set_open(struct inode *inode, struct file *file);
1756 static int ftrace_event_set_pid_open(struct inode *inode, struct file *file);
1757 static int ftrace_event_release(struct inode *inode, struct file *file);
1758
1759 static const struct seq_operations show_event_seq_ops = {
1760         .start = t_start,
1761         .next = t_next,
1762         .show = t_show,
1763         .stop = t_stop,
1764 };
1765
1766 static const struct seq_operations show_set_event_seq_ops = {
1767         .start = s_start,
1768         .next = s_next,
1769         .show = t_show,
1770         .stop = t_stop,
1771 };
1772
1773 static const struct seq_operations show_set_pid_seq_ops = {
1774         .start = p_start,
1775         .next = p_next,
1776         .show = p_show,
1777         .stop = p_stop,
1778 };
1779
1780 static const struct file_operations ftrace_avail_fops = {
1781         .open = ftrace_event_avail_open,
1782         .read = seq_read,
1783         .llseek = seq_lseek,
1784         .release = seq_release,
1785 };
1786
1787 static const struct file_operations ftrace_set_event_fops = {
1788         .open = ftrace_event_set_open,
1789         .read = seq_read,
1790         .write = ftrace_event_write,
1791         .llseek = seq_lseek,
1792         .release = ftrace_event_release,
1793 };
1794
1795 static const struct file_operations ftrace_set_event_pid_fops = {
1796         .open = ftrace_event_set_pid_open,
1797         .read = seq_read,
1798         .write = ftrace_event_pid_write,
1799         .llseek = seq_lseek,
1800         .release = ftrace_event_release,
1801 };
1802
1803 static const struct file_operations ftrace_enable_fops = {
1804         .open = tracing_open_generic,
1805         .read = event_enable_read,
1806         .write = event_enable_write,
1807         .llseek = default_llseek,
1808 };
1809
1810 static const struct file_operations ftrace_event_format_fops = {
1811         .open = trace_format_open,
1812         .read = seq_read,
1813         .llseek = seq_lseek,
1814         .release = seq_release,
1815 };
1816
1817 static const struct file_operations ftrace_event_id_fops = {
1818         .read = event_id_read,
1819         .llseek = default_llseek,
1820 };
1821
1822 static const struct file_operations ftrace_event_filter_fops = {
1823         .open = tracing_open_generic,
1824         .read = event_filter_read,
1825         .write = event_filter_write,
1826         .llseek = default_llseek,
1827 };
1828
1829 static const struct file_operations ftrace_subsystem_filter_fops = {
1830         .open = subsystem_open,
1831         .read = subsystem_filter_read,
1832         .write = subsystem_filter_write,
1833         .llseek = default_llseek,
1834         .release = subsystem_release,
1835 };
1836
1837 static const struct file_operations ftrace_system_enable_fops = {
1838         .open = subsystem_open,
1839         .read = system_enable_read,
1840         .write = system_enable_write,
1841         .llseek = default_llseek,
1842         .release = subsystem_release,
1843 };
1844
1845 static const struct file_operations ftrace_tr_enable_fops = {
1846         .open = system_tr_open,
1847         .read = system_enable_read,
1848         .write = system_enable_write,
1849         .llseek = default_llseek,
1850         .release = subsystem_release,
1851 };
1852
1853 static const struct file_operations ftrace_show_header_fops = {
1854         .open = tracing_open_generic,
1855         .read = show_header,
1856         .llseek = default_llseek,
1857 };
1858
1859 static int
1860 ftrace_event_open(struct inode *inode, struct file *file,
1861                   const struct seq_operations *seq_ops)
1862 {
1863         struct seq_file *m;
1864         int ret;
1865
1866         ret = seq_open(file, seq_ops);
1867         if (ret < 0)
1868                 return ret;
1869         m = file->private_data;
1870         /* copy tr over to seq ops */
1871         m->private = inode->i_private;
1872
1873         return ret;
1874 }
1875
1876 static int ftrace_event_release(struct inode *inode, struct file *file)
1877 {
1878         struct trace_array *tr = inode->i_private;
1879
1880         trace_array_put(tr);
1881
1882         return seq_release(inode, file);
1883 }
1884
1885 static int
1886 ftrace_event_avail_open(struct inode *inode, struct file *file)
1887 {
1888         const struct seq_operations *seq_ops = &show_event_seq_ops;
1889
1890         return ftrace_event_open(inode, file, seq_ops);
1891 }
1892
1893 static int
1894 ftrace_event_set_open(struct inode *inode, struct file *file)
1895 {
1896         const struct seq_operations *seq_ops = &show_set_event_seq_ops;
1897         struct trace_array *tr = inode->i_private;
1898         int ret;
1899
1900         if (trace_array_get(tr) < 0)
1901                 return -ENODEV;
1902
1903         if ((file->f_mode & FMODE_WRITE) &&
1904             (file->f_flags & O_TRUNC))
1905                 ftrace_clear_events(tr);
1906
1907         ret = ftrace_event_open(inode, file, seq_ops);
1908         if (ret < 0)
1909                 trace_array_put(tr);
1910         return ret;
1911 }
1912
1913 static int
1914 ftrace_event_set_pid_open(struct inode *inode, struct file *file)
1915 {
1916         const struct seq_operations *seq_ops = &show_set_pid_seq_ops;
1917         struct trace_array *tr = inode->i_private;
1918         int ret;
1919
1920         if (trace_array_get(tr) < 0)
1921                 return -ENODEV;
1922
1923         if ((file->f_mode & FMODE_WRITE) &&
1924             (file->f_flags & O_TRUNC))
1925                 ftrace_clear_event_pids(tr);
1926
1927         ret = ftrace_event_open(inode, file, seq_ops);
1928         if (ret < 0)
1929                 trace_array_put(tr);
1930         return ret;
1931 }
1932
1933 static struct event_subsystem *
1934 create_new_subsystem(const char *name)
1935 {
1936         struct event_subsystem *system;
1937
1938         /* need to create new entry */
1939         system = kmalloc(sizeof(*system), GFP_KERNEL);
1940         if (!system)
1941                 return NULL;
1942
1943         system->ref_count = 1;
1944
1945         /* Only allocate if dynamic (kprobes and modules) */
1946         system->name = kstrdup_const(name, GFP_KERNEL);
1947         if (!system->name)
1948                 goto out_free;
1949
1950         system->filter = NULL;
1951
1952         system->filter = kzalloc(sizeof(struct event_filter), GFP_KERNEL);
1953         if (!system->filter)
1954                 goto out_free;
1955
1956         list_add(&system->list, &event_subsystems);
1957
1958         return system;
1959
1960  out_free:
1961         kfree_const(system->name);
1962         kfree(system);
1963         return NULL;
1964 }
1965
1966 static struct dentry *
1967 event_subsystem_dir(struct trace_array *tr, const char *name,
1968                     struct trace_event_file *file, struct dentry *parent)
1969 {
1970         struct trace_subsystem_dir *dir;
1971         struct event_subsystem *system;
1972         struct dentry *entry;
1973
1974         /* First see if we did not already create this dir */
1975         list_for_each_entry(dir, &tr->systems, list) {
1976                 system = dir->subsystem;
1977                 if (strcmp(system->name, name) == 0) {
1978                         dir->nr_events++;
1979                         file->system = dir;
1980                         return dir->entry;
1981                 }
1982         }
1983
1984         /* Now see if the system itself exists. */
1985         list_for_each_entry(system, &event_subsystems, list) {
1986                 if (strcmp(system->name, name) == 0)
1987                         break;
1988         }
1989         /* Reset system variable when not found */
1990         if (&system->list == &event_subsystems)
1991                 system = NULL;
1992
1993         dir = kmalloc(sizeof(*dir), GFP_KERNEL);
1994         if (!dir)
1995                 goto out_fail;
1996
1997         if (!system) {
1998                 system = create_new_subsystem(name);
1999                 if (!system)
2000                         goto out_free;
2001         } else
2002                 __get_system(system);
2003
2004         dir->entry = tracefs_create_dir(name, parent);
2005         if (!dir->entry) {
2006                 pr_warn("Failed to create system directory %s\n", name);
2007                 __put_system(system);
2008                 goto out_free;
2009         }
2010
2011         dir->tr = tr;
2012         dir->ref_count = 1;
2013         dir->nr_events = 1;
2014         dir->subsystem = system;
2015         file->system = dir;
2016
2017         entry = tracefs_create_file("filter", 0644, dir->entry, dir,
2018                                     &ftrace_subsystem_filter_fops);
2019         if (!entry) {
2020                 kfree(system->filter);
2021                 system->filter = NULL;
2022                 pr_warn("Could not create tracefs '%s/filter' entry\n", name);
2023         }
2024
2025         trace_create_file("enable", 0644, dir->entry, dir,
2026                           &ftrace_system_enable_fops);
2027
2028         list_add(&dir->list, &tr->systems);
2029
2030         return dir->entry;
2031
2032  out_free:
2033         kfree(dir);
2034  out_fail:
2035         /* Only print this message if failed on memory allocation */
2036         if (!dir || !system)
2037                 pr_warn("No memory to create event subsystem %s\n", name);
2038         return NULL;
2039 }
2040
2041 static int
2042 event_create_dir(struct dentry *parent, struct trace_event_file *file)
2043 {
2044         struct trace_event_call *call = file->event_call;
2045         struct trace_array *tr = file->tr;
2046         struct list_head *head;
2047         struct dentry *d_events;
2048         const char *name;
2049         int ret;
2050
2051         /*
2052          * If the trace point header did not define TRACE_SYSTEM
2053          * then the system would be called "TRACE_SYSTEM".
2054          */
2055         if (strcmp(call->class->system, TRACE_SYSTEM) != 0) {
2056                 d_events = event_subsystem_dir(tr, call->class->system, file, parent);
2057                 if (!d_events)
2058                         return -ENOMEM;
2059         } else
2060                 d_events = parent;
2061
2062         name = trace_event_name(call);
2063         file->dir = tracefs_create_dir(name, d_events);
2064         if (!file->dir) {
2065                 pr_warn("Could not create tracefs '%s' directory\n", name);
2066                 return -1;
2067         }
2068
2069         if (call->class->reg && !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
2070                 trace_create_file("enable", 0644, file->dir, file,
2071                                   &ftrace_enable_fops);
2072
2073 #ifdef CONFIG_PERF_EVENTS
2074         if (call->event.type && call->class->reg)
2075                 trace_create_file("id", 0444, file->dir,
2076                                   (void *)(long)call->event.type,
2077                                   &ftrace_event_id_fops);
2078 #endif
2079
2080         /*
2081          * Other events may have the same class. Only update
2082          * the fields if they are not already defined.
2083          */
2084         head = trace_get_fields(call);
2085         if (list_empty(head)) {
2086                 ret = call->class->define_fields(call);
2087                 if (ret < 0) {
2088                         pr_warn("Could not initialize trace point events/%s\n",
2089                                 name);
2090                         return -1;
2091                 }
2092         }
2093         trace_create_file("filter", 0644, file->dir, file,
2094                           &ftrace_event_filter_fops);
2095
2096         trace_create_file("trigger", 0644, file->dir, file,
2097                           &event_trigger_fops);
2098
2099         trace_create_file("format", 0444, file->dir, call,
2100                           &ftrace_event_format_fops);
2101
2102         return 0;
2103 }
2104
2105 static void remove_event_from_tracers(struct trace_event_call *call)
2106 {
2107         struct trace_event_file *file;
2108         struct trace_array *tr;
2109
2110         do_for_each_event_file_safe(tr, file) {
2111                 if (file->event_call != call)
2112                         continue;
2113
2114                 remove_event_file_dir(file);
2115                 /*
2116                  * The do_for_each_event_file_safe() is
2117                  * a double loop. After finding the call for this
2118                  * trace_array, we use break to jump to the next
2119                  * trace_array.
2120                  */
2121                 break;
2122         } while_for_each_event_file();
2123 }
2124
2125 static void event_remove(struct trace_event_call *call)
2126 {
2127         struct trace_array *tr;
2128         struct trace_event_file *file;
2129
2130         do_for_each_event_file(tr, file) {
2131                 if (file->event_call != call)
2132                         continue;
2133                 ftrace_event_enable_disable(file, 0);
2134                 /*
2135                  * The do_for_each_event_file() is
2136                  * a double loop. After finding the call for this
2137                  * trace_array, we use break to jump to the next
2138                  * trace_array.
2139                  */
2140                 break;
2141         } while_for_each_event_file();
2142
2143         if (call->event.funcs)
2144                 __unregister_trace_event(&call->event);
2145         remove_event_from_tracers(call);
2146         list_del(&call->list);
2147 }
2148
2149 static int event_init(struct trace_event_call *call)
2150 {
2151         int ret = 0;
2152         const char *name;
2153
2154         name = trace_event_name(call);
2155         if (WARN_ON(!name))
2156                 return -EINVAL;
2157
2158         if (call->class->raw_init) {
2159                 ret = call->class->raw_init(call);
2160                 if (ret < 0 && ret != -ENOSYS)
2161                         pr_warn("Could not initialize trace events/%s\n", name);
2162         }
2163
2164         return ret;
2165 }
2166
2167 static int
2168 __register_event(struct trace_event_call *call, struct module *mod)
2169 {
2170         int ret;
2171
2172         ret = event_init(call);
2173         if (ret < 0)
2174                 return ret;
2175
2176         list_add(&call->list, &ftrace_events);
2177         call->mod = mod;
2178
2179         return 0;
2180 }
2181
2182 static char *enum_replace(char *ptr, struct trace_enum_map *map, int len)
2183 {
2184         int rlen;
2185         int elen;
2186
2187         /* Find the length of the enum value as a string */
2188         elen = snprintf(ptr, 0, "%ld", map->enum_value);
2189         /* Make sure there's enough room to replace the string with the value */
2190         if (len < elen)
2191                 return NULL;
2192
2193         snprintf(ptr, elen + 1, "%ld", map->enum_value);
2194
2195         /* Get the rest of the string of ptr */
2196         rlen = strlen(ptr + len);
2197         memmove(ptr + elen, ptr + len, rlen);
2198         /* Make sure we end the new string */
2199         ptr[elen + rlen] = 0;
2200
2201         return ptr + elen;
2202 }
2203
2204 static void update_event_printk(struct trace_event_call *call,
2205                                 struct trace_enum_map *map)
2206 {
2207         char *ptr;
2208         int quote = 0;
2209         int len = strlen(map->enum_string);
2210
2211         for (ptr = call->print_fmt; *ptr; ptr++) {
2212                 if (*ptr == '\\') {
2213                         ptr++;
2214                         /* paranoid */
2215                         if (!*ptr)
2216                                 break;
2217                         continue;
2218                 }
2219                 if (*ptr == '"') {
2220                         quote ^= 1;
2221                         continue;
2222                 }
2223                 if (quote)
2224                         continue;
2225                 if (isdigit(*ptr)) {
2226                         /* skip numbers */
2227                         do {
2228                                 ptr++;
2229                                 /* Check for alpha chars like ULL */
2230                         } while (isalnum(*ptr));
2231                         if (!*ptr)
2232                                 break;
2233                         /*
2234                          * A number must have some kind of delimiter after
2235                          * it, and we can ignore that too.
2236                          */
2237                         continue;
2238                 }
2239                 if (isalpha(*ptr) || *ptr == '_') {
2240                         if (strncmp(map->enum_string, ptr, len) == 0 &&
2241                             !isalnum(ptr[len]) && ptr[len] != '_') {
2242                                 ptr = enum_replace(ptr, map, len);
2243                                 /* Hmm, enum string smaller than value */
2244                                 if (WARN_ON_ONCE(!ptr))
2245                                         return;
2246                                 /*
2247                                  * No need to decrement here, as enum_replace()
2248                                  * returns the pointer to the character passed
2249                                  * the enum, and two enums can not be placed
2250                                  * back to back without something in between.
2251                                  * We can skip that something in between.
2252                                  */
2253                                 continue;
2254                         }
2255                 skip_more:
2256                         do {
2257                                 ptr++;
2258                         } while (isalnum(*ptr) || *ptr == '_');
2259                         if (!*ptr)
2260                                 break;
2261                         /*
2262                          * If what comes after this variable is a '.' or
2263                          * '->' then we can continue to ignore that string.
2264                          */
2265                         if (*ptr == '.' || (ptr[0] == '-' && ptr[1] == '>')) {
2266                                 ptr += *ptr == '.' ? 1 : 2;
2267                                 if (!*ptr)
2268                                         break;
2269                                 goto skip_more;
2270                         }
2271                         /*
2272                          * Once again, we can skip the delimiter that came
2273                          * after the string.
2274                          */
2275                         continue;
2276                 }
2277         }
2278 }
2279
2280 void trace_event_enum_update(struct trace_enum_map **map, int len)
2281 {
2282         struct trace_event_call *call, *p;
2283         const char *last_system = NULL;
2284         int last_i;
2285         int i;
2286
2287         down_write(&trace_event_sem);
2288         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2289                 /* events are usually grouped together with systems */
2290                 if (!last_system || call->class->system != last_system) {
2291                         last_i = 0;
2292                         last_system = call->class->system;
2293                 }
2294
2295                 for (i = last_i; i < len; i++) {
2296                         if (call->class->system == map[i]->system) {
2297                                 /* Save the first system if need be */
2298                                 if (!last_i)
2299                                         last_i = i;
2300                                 update_event_printk(call, map[i]);
2301                         }
2302                 }
2303         }
2304         up_write(&trace_event_sem);
2305 }
2306
2307 static struct trace_event_file *
2308 trace_create_new_event(struct trace_event_call *call,
2309                        struct trace_array *tr)
2310 {
2311         struct trace_event_file *file;
2312
2313         file = kmem_cache_alloc(file_cachep, GFP_TRACE);
2314         if (!file)
2315                 return NULL;
2316
2317         file->event_call = call;
2318         file->tr = tr;
2319         atomic_set(&file->sm_ref, 0);
2320         atomic_set(&file->tm_ref, 0);
2321         INIT_LIST_HEAD(&file->triggers);
2322         list_add(&file->list, &tr->events);
2323
2324         return file;
2325 }
2326
2327 /* Add an event to a trace directory */
2328 static int
2329 __trace_add_new_event(struct trace_event_call *call, struct trace_array *tr)
2330 {
2331         struct trace_event_file *file;
2332
2333         file = trace_create_new_event(call, tr);
2334         if (!file)
2335                 return -ENOMEM;
2336
2337         return event_create_dir(tr->event_dir, file);
2338 }
2339
2340 /*
2341  * Just create a decriptor for early init. A descriptor is required
2342  * for enabling events at boot. We want to enable events before
2343  * the filesystem is initialized.
2344  */
2345 static __init int
2346 __trace_early_add_new_event(struct trace_event_call *call,
2347                             struct trace_array *tr)
2348 {
2349         struct trace_event_file *file;
2350
2351         file = trace_create_new_event(call, tr);
2352         if (!file)
2353                 return -ENOMEM;
2354
2355         return 0;
2356 }
2357
2358 struct ftrace_module_file_ops;
2359 static void __add_event_to_tracers(struct trace_event_call *call);
2360
2361 /* Add an additional event_call dynamically */
2362 int trace_add_event_call(struct trace_event_call *call)
2363 {
2364         int ret;
2365         mutex_lock(&trace_types_lock);
2366         mutex_lock(&event_mutex);
2367
2368         ret = __register_event(call, NULL);
2369         if (ret >= 0)
2370                 __add_event_to_tracers(call);
2371
2372         mutex_unlock(&event_mutex);
2373         mutex_unlock(&trace_types_lock);
2374         return ret;
2375 }
2376
2377 /*
2378  * Must be called under locking of trace_types_lock, event_mutex and
2379  * trace_event_sem.
2380  */
2381 static void __trace_remove_event_call(struct trace_event_call *call)
2382 {
2383         event_remove(call);
2384         trace_destroy_fields(call);
2385         free_event_filter(call->filter);
2386         call->filter = NULL;
2387 }
2388
2389 static int probe_remove_event_call(struct trace_event_call *call)
2390 {
2391         struct trace_array *tr;
2392         struct trace_event_file *file;
2393
2394 #ifdef CONFIG_PERF_EVENTS
2395         if (call->perf_refcount)
2396                 return -EBUSY;
2397 #endif
2398         do_for_each_event_file(tr, file) {
2399                 if (file->event_call != call)
2400                         continue;
2401                 /*
2402                  * We can't rely on ftrace_event_enable_disable(enable => 0)
2403                  * we are going to do, EVENT_FILE_FL_SOFT_MODE can suppress
2404                  * TRACE_REG_UNREGISTER.
2405                  */
2406                 if (file->flags & EVENT_FILE_FL_ENABLED)
2407                         return -EBUSY;
2408                 /*
2409                  * The do_for_each_event_file_safe() is
2410                  * a double loop. After finding the call for this
2411                  * trace_array, we use break to jump to the next
2412                  * trace_array.
2413                  */
2414                 break;
2415         } while_for_each_event_file();
2416
2417         __trace_remove_event_call(call);
2418
2419         return 0;
2420 }
2421
2422 /* Remove an event_call */
2423 int trace_remove_event_call(struct trace_event_call *call)
2424 {
2425         int ret;
2426
2427         mutex_lock(&trace_types_lock);
2428         mutex_lock(&event_mutex);
2429         down_write(&trace_event_sem);
2430         ret = probe_remove_event_call(call);
2431         up_write(&trace_event_sem);
2432         mutex_unlock(&event_mutex);
2433         mutex_unlock(&trace_types_lock);
2434
2435         return ret;
2436 }
2437
2438 #define for_each_event(event, start, end)                       \
2439         for (event = start;                                     \
2440              (unsigned long)event < (unsigned long)end;         \
2441              event++)
2442
2443 #ifdef CONFIG_MODULES
2444
2445 static void trace_module_add_events(struct module *mod)
2446 {
2447         struct trace_event_call **call, **start, **end;
2448
2449         if (!mod->num_trace_events)
2450                 return;
2451
2452         /* Don't add infrastructure for mods without tracepoints */
2453         if (trace_module_has_bad_taint(mod)) {
2454                 pr_err("%s: module has bad taint, not creating trace events\n",
2455                        mod->name);
2456                 return;
2457         }
2458
2459         start = mod->trace_events;
2460         end = mod->trace_events + mod->num_trace_events;
2461
2462         for_each_event(call, start, end) {
2463                 __register_event(*call, mod);
2464                 __add_event_to_tracers(*call);
2465         }
2466 }
2467
2468 static void trace_module_remove_events(struct module *mod)
2469 {
2470         struct trace_event_call *call, *p;
2471         bool clear_trace = false;
2472
2473         down_write(&trace_event_sem);
2474         list_for_each_entry_safe(call, p, &ftrace_events, list) {
2475                 if (call->mod == mod) {
2476                         if (call->flags & TRACE_EVENT_FL_WAS_ENABLED)
2477                                 clear_trace = true;
2478                         __trace_remove_event_call(call);
2479                 }
2480         }
2481         up_write(&trace_event_sem);
2482
2483         /*
2484          * It is safest to reset the ring buffer if the module being unloaded
2485          * registered any events that were used. The only worry is if
2486          * a new module gets loaded, and takes on the same id as the events
2487          * of this module. When printing out the buffer, traced events left
2488          * over from this module may be passed to the new module events and
2489          * unexpected results may occur.
2490          */
2491         if (clear_trace)
2492                 tracing_reset_all_online_cpus();
2493 }
2494
2495 static int trace_module_notify(struct notifier_block *self,
2496                                unsigned long val, void *data)
2497 {
2498         struct module *mod = data;
2499
2500         mutex_lock(&trace_types_lock);
2501         mutex_lock(&event_mutex);
2502         switch (val) {
2503         case MODULE_STATE_COMING:
2504                 trace_module_add_events(mod);
2505                 break;
2506         case MODULE_STATE_GOING:
2507                 trace_module_remove_events(mod);
2508                 break;
2509         }
2510         mutex_unlock(&event_mutex);
2511         mutex_unlock(&trace_types_lock);
2512
2513         return 0;
2514 }
2515
2516 static struct notifier_block trace_module_nb = {
2517         .notifier_call = trace_module_notify,
2518         .priority = 1, /* higher than trace.c module notify */
2519 };
2520 #endif /* CONFIG_MODULES */
2521
2522 /* Create a new event directory structure for a trace directory. */
2523 static void
2524 __trace_add_event_dirs(struct trace_array *tr)
2525 {
2526         struct trace_event_call *call;
2527         int ret;
2528
2529         list_for_each_entry(call, &ftrace_events, list) {
2530                 ret = __trace_add_new_event(call, tr);
2531                 if (ret < 0)
2532                         pr_warn("Could not create directory for event %s\n",
2533                                 trace_event_name(call));
2534         }
2535 }
2536
2537 struct trace_event_file *
2538 find_event_file(struct trace_array *tr, const char *system,  const char *event)
2539 {
2540         struct trace_event_file *file;
2541         struct trace_event_call *call;
2542         const char *name;
2543
2544         list_for_each_entry(file, &tr->events, list) {
2545
2546                 call = file->event_call;
2547                 name = trace_event_name(call);
2548
2549                 if (!name || !call->class || !call->class->reg)
2550                         continue;
2551
2552                 if (call->flags & TRACE_EVENT_FL_IGNORE_ENABLE)
2553                         continue;
2554
2555                 if (strcmp(event, name) == 0 &&
2556                     strcmp(system, call->class->system) == 0)
2557                         return file;
2558         }
2559         return NULL;
2560 }
2561
2562 #ifdef CONFIG_DYNAMIC_FTRACE
2563
2564 /* Avoid typos */
2565 #define ENABLE_EVENT_STR        "enable_event"
2566 #define DISABLE_EVENT_STR       "disable_event"
2567
2568 struct event_probe_data {
2569         struct trace_event_file *file;
2570         unsigned long                   count;
2571         int                             ref;
2572         bool                            enable;
2573 };
2574
2575 static void
2576 event_enable_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2577 {
2578         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2579         struct event_probe_data *data = *pdata;
2580
2581         if (!data)
2582                 return;
2583
2584         if (data->enable)
2585                 clear_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2586         else
2587                 set_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT, &data->file->flags);
2588 }
2589
2590 static void
2591 event_enable_count_probe(unsigned long ip, unsigned long parent_ip, void **_data)
2592 {
2593         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2594         struct event_probe_data *data = *pdata;
2595
2596         if (!data)
2597                 return;
2598
2599         if (!data->count)
2600                 return;
2601
2602         /* Skip if the event is in a state we want to switch to */
2603         if (data->enable == !(data->file->flags & EVENT_FILE_FL_SOFT_DISABLED))
2604                 return;
2605
2606         if (data->count != -1)
2607                 (data->count)--;
2608
2609         event_enable_probe(ip, parent_ip, _data);
2610 }
2611
2612 static int
2613 event_enable_print(struct seq_file *m, unsigned long ip,
2614                       struct ftrace_probe_ops *ops, void *_data)
2615 {
2616         struct event_probe_data *data = _data;
2617
2618         seq_printf(m, "%ps:", (void *)ip);
2619
2620         seq_printf(m, "%s:%s:%s",
2621                    data->enable ? ENABLE_EVENT_STR : DISABLE_EVENT_STR,
2622                    data->file->event_call->class->system,
2623                    trace_event_name(data->file->event_call));
2624
2625         if (data->count == -1)
2626                 seq_puts(m, ":unlimited\n");
2627         else
2628                 seq_printf(m, ":count=%ld\n", data->count);
2629
2630         return 0;
2631 }
2632
2633 static int
2634 event_enable_init(struct ftrace_probe_ops *ops, unsigned long ip,
2635                   void **_data)
2636 {
2637         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2638         struct event_probe_data *data = *pdata;
2639
2640         data->ref++;
2641         return 0;
2642 }
2643
2644 static void
2645 event_enable_free(struct ftrace_probe_ops *ops, unsigned long ip,
2646                   void **_data)
2647 {
2648         struct event_probe_data **pdata = (struct event_probe_data **)_data;
2649         struct event_probe_data *data = *pdata;
2650
2651         if (WARN_ON_ONCE(data->ref <= 0))
2652                 return;
2653
2654         data->ref--;
2655         if (!data->ref) {
2656                 /* Remove the SOFT_MODE flag */
2657                 __ftrace_event_enable_disable(data->file, 0, 1);
2658                 module_put(data->file->event_call->mod);
2659                 kfree(data);
2660         }
2661         *pdata = NULL;
2662 }
2663
2664 static struct ftrace_probe_ops event_enable_probe_ops = {
2665         .func                   = event_enable_probe,
2666         .print                  = event_enable_print,
2667         .init                   = event_enable_init,
2668         .free                   = event_enable_free,
2669 };
2670
2671 static struct ftrace_probe_ops event_enable_count_probe_ops = {
2672         .func                   = event_enable_count_probe,
2673         .print                  = event_enable_print,
2674         .init                   = event_enable_init,
2675         .free                   = event_enable_free,
2676 };
2677
2678 static struct ftrace_probe_ops event_disable_probe_ops = {
2679         .func                   = event_enable_probe,
2680         .print                  = event_enable_print,
2681         .init                   = event_enable_init,
2682         .free                   = event_enable_free,
2683 };
2684
2685 static struct ftrace_probe_ops event_disable_count_probe_ops = {
2686         .func                   = event_enable_count_probe,
2687         .print                  = event_enable_print,
2688         .init                   = event_enable_init,
2689         .free                   = event_enable_free,
2690 };
2691
2692 static int
2693 event_enable_func(struct ftrace_hash *hash,
2694                   char *glob, char *cmd, char *param, int enabled)
2695 {
2696         struct trace_array *tr = top_trace_array();
2697         struct trace_event_file *file;
2698         struct ftrace_probe_ops *ops;
2699         struct event_probe_data *data;
2700         const char *system;
2701         const char *event;
2702         char *number;
2703         bool enable;
2704         int ret;
2705
2706         if (!tr)
2707                 return -ENODEV;
2708
2709         /* hash funcs only work with set_ftrace_filter */
2710         if (!enabled || !param)
2711                 return -EINVAL;
2712
2713         system = strsep(&param, ":");
2714         if (!param)
2715                 return -EINVAL;
2716
2717         event = strsep(&param, ":");
2718
2719         mutex_lock(&event_mutex);
2720
2721         ret = -EINVAL;
2722         file = find_event_file(tr, system, event);
2723         if (!file)
2724                 goto out;
2725
2726         enable = strcmp(cmd, ENABLE_EVENT_STR) == 0;
2727
2728         if (enable)
2729                 ops = param ? &event_enable_count_probe_ops : &event_enable_probe_ops;
2730         else
2731                 ops = param ? &event_disable_count_probe_ops : &event_disable_probe_ops;
2732
2733         if (glob[0] == '!') {
2734                 unregister_ftrace_function_probe_func(glob+1, ops);
2735                 ret = 0;
2736                 goto out;
2737         }
2738
2739         ret = -ENOMEM;
2740         data = kzalloc(sizeof(*data), GFP_KERNEL);
2741         if (!data)
2742                 goto out;
2743
2744         data->enable = enable;
2745         data->count = -1;
2746         data->file = file;
2747
2748         if (!param)
2749                 goto out_reg;
2750
2751         number = strsep(&param, ":");
2752
2753         ret = -EINVAL;
2754         if (!strlen(number))
2755                 goto out_free;
2756
2757         /*
2758          * We use the callback data field (which is a pointer)
2759          * as our counter.
2760          */
2761         ret = kstrtoul(number, 0, &data->count);
2762         if (ret)
2763                 goto out_free;
2764
2765  out_reg:
2766         /* Don't let event modules unload while probe registered */
2767         ret = try_module_get(file->event_call->mod);
2768         if (!ret) {
2769                 ret = -EBUSY;
2770                 goto out_free;
2771         }
2772
2773         ret = __ftrace_event_enable_disable(file, 1, 1);
2774         if (ret < 0)
2775                 goto out_put;
2776         ret = register_ftrace_function_probe(glob, ops, data);
2777         /*
2778          * The above returns on success the # of functions enabled,
2779          * but if it didn't find any functions it returns zero.
2780          * Consider no functions a failure too.
2781          */
2782         if (!ret) {
2783                 ret = -ENOENT;
2784                 goto out_disable;
2785         } else if (ret < 0)
2786                 goto out_disable;
2787         /* Just return zero, not the number of enabled functions */
2788         ret = 0;
2789  out:
2790         mutex_unlock(&event_mutex);
2791         return ret;
2792
2793  out_disable:
2794         __ftrace_event_enable_disable(file, 0, 1);
2795  out_put:
2796         module_put(file->event_call->mod);
2797  out_free:
2798         kfree(data);
2799         goto out;
2800 }
2801
2802 static struct ftrace_func_command event_enable_cmd = {
2803         .name                   = ENABLE_EVENT_STR,
2804         .func                   = event_enable_func,
2805 };
2806
2807 static struct ftrace_func_command event_disable_cmd = {
2808         .name                   = DISABLE_EVENT_STR,
2809         .func                   = event_enable_func,
2810 };
2811
2812 static __init int register_event_cmds(void)
2813 {
2814         int ret;
2815
2816         ret = register_ftrace_command(&event_enable_cmd);
2817         if (WARN_ON(ret < 0))
2818                 return ret;
2819         ret = register_ftrace_command(&event_disable_cmd);
2820         if (WARN_ON(ret < 0))
2821                 unregister_ftrace_command(&event_enable_cmd);
2822         return ret;
2823 }
2824 #else
2825 static inline int register_event_cmds(void) { return 0; }
2826 #endif /* CONFIG_DYNAMIC_FTRACE */
2827
2828 /*
2829  * The top level array has already had its trace_event_file
2830  * descriptors created in order to allow for early events to
2831  * be recorded. This function is called after the tracefs has been
2832  * initialized, and we now have to create the files associated
2833  * to the events.
2834  */
2835 static __init void
2836 __trace_early_add_event_dirs(struct trace_array *tr)
2837 {
2838         struct trace_event_file *file;
2839         int ret;
2840
2841
2842         list_for_each_entry(file, &tr->events, list) {
2843                 ret = event_create_dir(tr->event_dir, file);
2844                 if (ret < 0)
2845                         pr_warn("Could not create directory for event %s\n",
2846                                 trace_event_name(file->event_call));
2847         }
2848 }
2849
2850 /*
2851  * For early boot up, the top trace array requires to have
2852  * a list of events that can be enabled. This must be done before
2853  * the filesystem is set up in order to allow events to be traced
2854  * early.
2855  */
2856 static __init void
2857 __trace_early_add_events(struct trace_array *tr)
2858 {
2859         struct trace_event_call *call;
2860         int ret;
2861
2862         list_for_each_entry(call, &ftrace_events, list) {
2863                 /* Early boot up should not have any modules loaded */
2864                 if (WARN_ON_ONCE(call->mod))
2865                         continue;
2866
2867                 ret = __trace_early_add_new_event(call, tr);
2868                 if (ret < 0)
2869                         pr_warn("Could not create early event %s\n",
2870                                 trace_event_name(call));
2871         }
2872 }
2873
2874 /* Remove the event directory structure for a trace directory. */
2875 static void
2876 __trace_remove_event_dirs(struct trace_array *tr)
2877 {
2878         struct trace_event_file *file, *next;
2879
2880         list_for_each_entry_safe(file, next, &tr->events, list)
2881                 remove_event_file_dir(file);
2882 }
2883
2884 static void __add_event_to_tracers(struct trace_event_call *call)
2885 {
2886         struct trace_array *tr;
2887
2888         list_for_each_entry(tr, &ftrace_trace_arrays, list)
2889                 __trace_add_new_event(call, tr);
2890 }
2891
2892 extern struct trace_event_call *__start_ftrace_events[];
2893 extern struct trace_event_call *__stop_ftrace_events[];
2894
2895 static char bootup_event_buf[COMMAND_LINE_SIZE] __initdata;
2896
2897 static __init int setup_trace_event(char *str)
2898 {
2899         strlcpy(bootup_event_buf, str, COMMAND_LINE_SIZE);
2900         ring_buffer_expanded = true;
2901         tracing_selftest_disabled = true;
2902
2903         return 1;
2904 }
2905 __setup("trace_event=", setup_trace_event);
2906
2907 /* Expects to have event_mutex held when called */
2908 static int
2909 create_event_toplevel_files(struct dentry *parent, struct trace_array *tr)
2910 {
2911         struct dentry *d_events;
2912         struct dentry *entry;
2913
2914         entry = tracefs_create_file("set_event", 0644, parent,
2915                                     tr, &ftrace_set_event_fops);
2916         if (!entry) {
2917                 pr_warn("Could not create tracefs 'set_event' entry\n");
2918                 return -ENOMEM;
2919         }
2920
2921         d_events = tracefs_create_dir("events", parent);
2922         if (!d_events) {
2923                 pr_warn("Could not create tracefs 'events' directory\n");
2924                 return -ENOMEM;
2925         }
2926
2927         entry = tracefs_create_file("set_event_pid", 0644, parent,
2928                                     tr, &ftrace_set_event_pid_fops);
2929
2930         /* ring buffer internal formats */
2931         trace_create_file("header_page", 0444, d_events,
2932                           ring_buffer_print_page_header,
2933                           &ftrace_show_header_fops);
2934
2935         trace_create_file("header_event", 0444, d_events,
2936                           ring_buffer_print_entry_header,
2937                           &ftrace_show_header_fops);
2938
2939         trace_create_file("enable", 0644, d_events,
2940                           tr, &ftrace_tr_enable_fops);
2941
2942         tr->event_dir = d_events;
2943
2944         return 0;
2945 }
2946
2947 /**
2948  * event_trace_add_tracer - add a instance of a trace_array to events
2949  * @parent: The parent dentry to place the files/directories for events in
2950  * @tr: The trace array associated with these events
2951  *
2952  * When a new instance is created, it needs to set up its events
2953  * directory, as well as other files associated with events. It also
2954  * creates the event hierachry in the @parent/events directory.
2955  *
2956  * Returns 0 on success.
2957  */
2958 int event_trace_add_tracer(struct dentry *parent, struct trace_array *tr)
2959 {
2960         int ret;
2961
2962         mutex_lock(&event_mutex);
2963
2964         ret = create_event_toplevel_files(parent, tr);
2965         if (ret)
2966                 goto out_unlock;
2967
2968         down_write(&trace_event_sem);
2969         __trace_add_event_dirs(tr);
2970         up_write(&trace_event_sem);
2971
2972  out_unlock:
2973         mutex_unlock(&event_mutex);
2974
2975         return ret;
2976 }
2977
2978 /*
2979  * The top trace array already had its file descriptors created.
2980  * Now the files themselves need to be created.
2981  */
2982 static __init int
2983 early_event_add_tracer(struct dentry *parent, struct trace_array *tr)
2984 {
2985         int ret;
2986
2987         mutex_lock(&event_mutex);
2988
2989         ret = create_event_toplevel_files(parent, tr);
2990         if (ret)
2991                 goto out_unlock;
2992
2993         down_write(&trace_event_sem);
2994         __trace_early_add_event_dirs(tr);
2995         up_write(&trace_event_sem);
2996
2997  out_unlock:
2998         mutex_unlock(&event_mutex);
2999
3000         return ret;
3001 }
3002
3003 int event_trace_del_tracer(struct trace_array *tr)
3004 {
3005         mutex_lock(&event_mutex);
3006
3007         /* Disable any event triggers and associated soft-disabled events */
3008         clear_event_triggers(tr);
3009
3010         /* Clear the pid list */
3011         __ftrace_clear_event_pids(tr);
3012
3013         /* Disable any running events */
3014         __ftrace_set_clr_event_nolock(tr, NULL, NULL, NULL, 0);
3015
3016         /* Access to events are within rcu_read_lock_sched() */
3017         synchronize_sched();
3018
3019         down_write(&trace_event_sem);
3020         __trace_remove_event_dirs(tr);
3021         tracefs_remove_recursive(tr->event_dir);
3022         up_write(&trace_event_sem);
3023
3024         tr->event_dir = NULL;
3025
3026         mutex_unlock(&event_mutex);
3027
3028         return 0;
3029 }
3030
3031 static __init int event_trace_memsetup(void)
3032 {
3033         field_cachep = KMEM_CACHE(ftrace_event_field, SLAB_PANIC);
3034         file_cachep = KMEM_CACHE(trace_event_file, SLAB_PANIC);
3035         return 0;
3036 }
3037
3038 static __init void
3039 early_enable_events(struct trace_array *tr, bool disable_first)
3040 {
3041         char *buf = bootup_event_buf;
3042         char *token;
3043         int ret;
3044
3045         while (true) {
3046                 token = strsep(&buf, ",");
3047
3048                 if (!token)
3049                         break;
3050
3051                 if (*token) {
3052                         /* Restarting syscalls requires that we stop them first */
3053                         if (disable_first)
3054                                 ftrace_set_clr_event(tr, token, 0);
3055
3056                         ret = ftrace_set_clr_event(tr, token, 1);
3057                         if (ret)
3058                                 pr_warn("Failed to enable trace event: %s\n", token);
3059                 }
3060
3061                 /* Put back the comma to allow this to be called again */
3062                 if (buf)
3063                         *(buf - 1) = ',';
3064         }
3065 }
3066
3067 static __init int event_trace_enable(void)
3068 {
3069         struct trace_array *tr = top_trace_array();
3070         struct trace_event_call **iter, *call;
3071         int ret;
3072
3073         if (!tr)
3074                 return -ENODEV;
3075
3076         for_each_event(iter, __start_ftrace_events, __stop_ftrace_events) {
3077
3078                 call = *iter;
3079                 ret = event_init(call);
3080                 if (!ret)
3081                         list_add(&call->list, &ftrace_events);
3082         }
3083
3084         /*
3085          * We need the top trace array to have a working set of trace
3086          * points at early init, before the debug files and directories
3087          * are created. Create the file entries now, and attach them
3088          * to the actual file dentries later.
3089          */
3090         __trace_early_add_events(tr);
3091
3092         early_enable_events(tr, false);
3093
3094         trace_printk_start_comm();
3095
3096         register_event_cmds();
3097
3098         register_trigger_cmds();
3099
3100         return 0;
3101 }
3102
3103 /*
3104  * event_trace_enable() is called from trace_event_init() first to
3105  * initialize events and perhaps start any events that are on the
3106  * command line. Unfortunately, there are some events that will not
3107  * start this early, like the system call tracepoints that need
3108  * to set the TIF_SYSCALL_TRACEPOINT flag of pid 1. But event_trace_enable()
3109  * is called before pid 1 starts, and this flag is never set, making
3110  * the syscall tracepoint never get reached, but the event is enabled
3111  * regardless (and not doing anything).
3112  */
3113 static __init int event_trace_enable_again(void)
3114 {
3115         struct trace_array *tr;
3116
3117         tr = top_trace_array();
3118         if (!tr)
3119                 return -ENODEV;
3120
3121         early_enable_events(tr, true);
3122
3123         return 0;
3124 }
3125
3126 early_initcall(event_trace_enable_again);
3127
3128 static __init int event_trace_init(void)
3129 {
3130         struct trace_array *tr;
3131         struct dentry *d_tracer;
3132         struct dentry *entry;
3133         int ret;
3134
3135         tr = top_trace_array();
3136         if (!tr)
3137                 return -ENODEV;
3138
3139         d_tracer = tracing_init_dentry();
3140         if (IS_ERR(d_tracer))
3141                 return 0;
3142
3143         entry = tracefs_create_file("available_events", 0444, d_tracer,
3144                                     tr, &ftrace_avail_fops);
3145         if (!entry)
3146                 pr_warn("Could not create tracefs 'available_events' entry\n");
3147
3148         if (trace_define_generic_fields())
3149                 pr_warn("tracing: Failed to allocated generic fields");
3150
3151         if (trace_define_common_fields())
3152                 pr_warn("tracing: Failed to allocate common fields");
3153
3154         ret = early_event_add_tracer(d_tracer, tr);
3155         if (ret)
3156                 return ret;
3157
3158 #ifdef CONFIG_MODULES
3159         ret = register_module_notifier(&trace_module_nb);
3160         if (ret)
3161                 pr_warn("Failed to register trace events module notifier\n");
3162 #endif
3163         return 0;
3164 }
3165
3166 void __init trace_event_init(void)
3167 {
3168         event_trace_memsetup();
3169         init_ftrace_syscalls();
3170         event_trace_enable();
3171 }
3172
3173 fs_initcall(event_trace_init);
3174
3175 #ifdef CONFIG_FTRACE_STARTUP_TEST
3176
3177 static DEFINE_SPINLOCK(test_spinlock);
3178 static DEFINE_SPINLOCK(test_spinlock_irq);
3179 static DEFINE_MUTEX(test_mutex);
3180
3181 static __init void test_work(struct work_struct *dummy)
3182 {
3183         spin_lock(&test_spinlock);
3184         spin_lock_irq(&test_spinlock_irq);
3185         udelay(1);
3186         spin_unlock_irq(&test_spinlock_irq);
3187         spin_unlock(&test_spinlock);
3188
3189         mutex_lock(&test_mutex);
3190         msleep(1);
3191         mutex_unlock(&test_mutex);
3192 }
3193
3194 static __init int event_test_thread(void *unused)
3195 {
3196         void *test_malloc;
3197
3198         test_malloc = kmalloc(1234, GFP_KERNEL);
3199         if (!test_malloc)
3200                 pr_info("failed to kmalloc\n");
3201
3202         schedule_on_each_cpu(test_work);
3203
3204         kfree(test_malloc);
3205
3206         set_current_state(TASK_INTERRUPTIBLE);
3207         while (!kthread_should_stop()) {
3208                 schedule();
3209                 set_current_state(TASK_INTERRUPTIBLE);
3210         }
3211         __set_current_state(TASK_RUNNING);
3212
3213         return 0;
3214 }
3215
3216 /*
3217  * Do various things that may trigger events.
3218  */
3219 static __init void event_test_stuff(void)
3220 {
3221         struct task_struct *test_thread;
3222
3223         test_thread = kthread_run(event_test_thread, NULL, "test-events");
3224         msleep(1);
3225         kthread_stop(test_thread);
3226 }
3227
3228 /*
3229  * For every trace event defined, we will test each trace point separately,
3230  * and then by groups, and finally all trace points.
3231  */
3232 static __init void event_trace_self_tests(void)
3233 {
3234         struct trace_subsystem_dir *dir;
3235         struct trace_event_file *file;
3236         struct trace_event_call *call;
3237         struct event_subsystem *system;
3238         struct trace_array *tr;
3239         int ret;
3240
3241         tr = top_trace_array();
3242         if (!tr)
3243                 return;
3244
3245         pr_info("Running tests on trace events:\n");
3246
3247         list_for_each_entry(file, &tr->events, list) {
3248
3249                 call = file->event_call;
3250
3251                 /* Only test those that have a probe */
3252                 if (!call->class || !call->class->probe)
3253                         continue;
3254
3255 /*
3256  * Testing syscall events here is pretty useless, but
3257  * we still do it if configured. But this is time consuming.
3258  * What we really need is a user thread to perform the
3259  * syscalls as we test.
3260  */
3261 #ifndef CONFIG_EVENT_TRACE_TEST_SYSCALLS
3262                 if (call->class->system &&
3263                     strcmp(call->class->system, "syscalls") == 0)
3264                         continue;
3265 #endif
3266
3267                 pr_info("Testing event %s: ", trace_event_name(call));
3268
3269                 /*
3270                  * If an event is already enabled, someone is using
3271                  * it and the self test should not be on.
3272                  */
3273                 if (file->flags & EVENT_FILE_FL_ENABLED) {
3274                         pr_warn("Enabled event during self test!\n");
3275                         WARN_ON_ONCE(1);
3276                         continue;
3277                 }
3278
3279                 ftrace_event_enable_disable(file, 1);
3280                 event_test_stuff();
3281                 ftrace_event_enable_disable(file, 0);
3282
3283                 pr_cont("OK\n");
3284         }
3285
3286         /* Now test at the sub system level */
3287
3288         pr_info("Running tests on trace event systems:\n");
3289
3290         list_for_each_entry(dir, &tr->systems, list) {
3291
3292                 system = dir->subsystem;
3293
3294                 /* the ftrace system is special, skip it */
3295                 if (strcmp(system->name, "ftrace") == 0)
3296                         continue;
3297
3298                 pr_info("Testing event system %s: ", system->name);
3299
3300                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 1);
3301                 if (WARN_ON_ONCE(ret)) {
3302                         pr_warn("error enabling system %s\n",
3303                                 system->name);
3304                         continue;
3305                 }
3306
3307                 event_test_stuff();
3308
3309                 ret = __ftrace_set_clr_event(tr, NULL, system->name, NULL, 0);
3310                 if (WARN_ON_ONCE(ret)) {
3311                         pr_warn("error disabling system %s\n",
3312                                 system->name);
3313                         continue;
3314                 }
3315
3316                 pr_cont("OK\n");
3317         }
3318
3319         /* Test with all events enabled */
3320
3321         pr_info("Running tests on all trace events:\n");
3322         pr_info("Testing all events: ");
3323
3324         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 1);
3325         if (WARN_ON_ONCE(ret)) {
3326                 pr_warn("error enabling all events\n");
3327                 return;
3328         }
3329
3330         event_test_stuff();
3331
3332         /* reset sysname */
3333         ret = __ftrace_set_clr_event(tr, NULL, NULL, NULL, 0);
3334         if (WARN_ON_ONCE(ret)) {
3335                 pr_warn("error disabling all events\n");
3336                 return;
3337         }
3338
3339         pr_cont("OK\n");
3340 }
3341
3342 #ifdef CONFIG_FUNCTION_TRACER
3343
3344 static DEFINE_PER_CPU(atomic_t, ftrace_test_event_disable);
3345
3346 static struct trace_array *event_tr;
3347
3348 static void __init
3349 function_test_events_call(unsigned long ip, unsigned long parent_ip,
3350                           struct ftrace_ops *op, struct pt_regs *pt_regs)
3351 {
3352         struct ring_buffer_event *event;
3353         struct ring_buffer *buffer;
3354         struct ftrace_entry *entry;
3355         unsigned long flags;
3356         long disabled;
3357         int cpu;
3358         int pc;
3359
3360         pc = preempt_count();
3361         preempt_disable_notrace();
3362         cpu = raw_smp_processor_id();
3363         disabled = atomic_inc_return(&per_cpu(ftrace_test_event_disable, cpu));
3364
3365         if (disabled != 1)
3366                 goto out;
3367
3368         local_save_flags(flags);
3369
3370         event = trace_current_buffer_lock_reserve(&buffer,
3371                                                   TRACE_FN, sizeof(*entry),
3372                                                   flags, pc);
3373         if (!event)
3374                 goto out;
3375         entry   = ring_buffer_event_data(event);
3376         entry->ip                       = ip;
3377         entry->parent_ip                = parent_ip;
3378
3379         trace_buffer_unlock_commit(event_tr, buffer, event, flags, pc);
3380
3381  out:
3382         atomic_dec(&per_cpu(ftrace_test_event_disable, cpu));
3383         preempt_enable_notrace();
3384 }
3385
3386 static struct ftrace_ops trace_ops __initdata  =
3387 {
3388         .func = function_test_events_call,
3389         .flags = FTRACE_OPS_FL_RECURSION_SAFE,
3390 };
3391
3392 static __init void event_trace_self_test_with_function(void)
3393 {
3394         int ret;
3395         event_tr = top_trace_array();
3396         if (WARN_ON(!event_tr))
3397                 return;
3398         ret = register_ftrace_function(&trace_ops);
3399         if (WARN_ON(ret < 0)) {
3400                 pr_info("Failed to enable function tracer for event tests\n");
3401                 return;
3402         }
3403         pr_info("Running tests again, along with the function tracer\n");
3404         event_trace_self_tests();
3405         unregister_ftrace_function(&trace_ops);
3406 }
3407 #else
3408 static __init void event_trace_self_test_with_function(void)
3409 {
3410 }
3411 #endif
3412
3413 static __init int event_trace_self_tests_init(void)
3414 {
3415         if (!tracing_selftest_disabled) {
3416                 event_trace_self_tests();
3417                 event_trace_self_test_with_function();
3418         }
3419
3420         return 0;
3421 }
3422
3423 late_initcall(event_trace_self_tests_init);
3424
3425 #endif