tracing: Add trace_options kernel command line parameter
[cascardo/linux.git] / kernel / trace / trace.c
1 /*
2  * ring buffer based function tracer
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally taken from the RT patch by:
8  *    Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code from the latency_tracer, that is:
11  *  Copyright (C) 2004-2006 Ingo Molnar
12  *  Copyright (C) 2004 William Lee Irwin III
13  */
14 #include <linux/ring_buffer.h>
15 #include <generated/utsrelease.h>
16 #include <linux/stacktrace.h>
17 #include <linux/writeback.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/notifier.h>
21 #include <linux/irqflags.h>
22 #include <linux/irq_work.h>
23 #include <linux/debugfs.h>
24 #include <linux/pagemap.h>
25 #include <linux/hardirq.h>
26 #include <linux/linkage.h>
27 #include <linux/uaccess.h>
28 #include <linux/kprobes.h>
29 #include <linux/ftrace.h>
30 #include <linux/module.h>
31 #include <linux/percpu.h>
32 #include <linux/splice.h>
33 #include <linux/kdebug.h>
34 #include <linux/string.h>
35 #include <linux/rwsem.h>
36 #include <linux/slab.h>
37 #include <linux/ctype.h>
38 #include <linux/init.h>
39 #include <linux/poll.h>
40 #include <linux/nmi.h>
41 #include <linux/fs.h>
42
43 #include "trace.h"
44 #include "trace_output.h"
45
46 /*
47  * On boot up, the ring buffer is set to the minimum size, so that
48  * we do not waste memory on systems that are not using tracing.
49  */
50 int ring_buffer_expanded;
51
52 /*
53  * We need to change this state when a selftest is running.
54  * A selftest will lurk into the ring-buffer to count the
55  * entries inserted during the selftest although some concurrent
56  * insertions into the ring-buffer such as trace_printk could occurred
57  * at the same time, giving false positive or negative results.
58  */
59 static bool __read_mostly tracing_selftest_running;
60
61 /*
62  * If a tracer is running, we do not want to run SELFTEST.
63  */
64 bool __read_mostly tracing_selftest_disabled;
65
66 /* For tracers that don't implement custom flags */
67 static struct tracer_opt dummy_tracer_opt[] = {
68         { }
69 };
70
71 static struct tracer_flags dummy_tracer_flags = {
72         .val = 0,
73         .opts = dummy_tracer_opt
74 };
75
76 static int dummy_set_flag(u32 old_flags, u32 bit, int set)
77 {
78         return 0;
79 }
80
81 /*
82  * To prevent the comm cache from being overwritten when no
83  * tracing is active, only save the comm when a trace event
84  * occurred.
85  */
86 static DEFINE_PER_CPU(bool, trace_cmdline_save);
87
88 /*
89  * When a reader is waiting for data, then this variable is
90  * set to true.
91  */
92 static bool trace_wakeup_needed;
93
94 static struct irq_work trace_work_wakeup;
95
96 /*
97  * Kill all tracing for good (never come back).
98  * It is initialized to 1 but will turn to zero if the initialization
99  * of the tracer is successful. But that is the only place that sets
100  * this back to zero.
101  */
102 static int tracing_disabled = 1;
103
104 DEFINE_PER_CPU(int, ftrace_cpu_disabled);
105
106 cpumask_var_t __read_mostly     tracing_buffer_mask;
107
108 /*
109  * ftrace_dump_on_oops - variable to dump ftrace buffer on oops
110  *
111  * If there is an oops (or kernel panic) and the ftrace_dump_on_oops
112  * is set, then ftrace_dump is called. This will output the contents
113  * of the ftrace buffers to the console.  This is very useful for
114  * capturing traces that lead to crashes and outputing it to a
115  * serial console.
116  *
117  * It is default off, but you can enable it with either specifying
118  * "ftrace_dump_on_oops" in the kernel command line, or setting
119  * /proc/sys/kernel/ftrace_dump_on_oops
120  * Set 1 if you want to dump buffers of all CPUs
121  * Set 2 if you want to dump the buffer of the CPU that triggered oops
122  */
123
124 enum ftrace_dump_mode ftrace_dump_on_oops;
125
126 static int tracing_set_tracer(const char *buf);
127
128 #define MAX_TRACER_SIZE         100
129 static char bootup_tracer_buf[MAX_TRACER_SIZE] __initdata;
130 static char *default_bootup_tracer;
131
132 static int __init set_cmdline_ftrace(char *str)
133 {
134         strncpy(bootup_tracer_buf, str, MAX_TRACER_SIZE);
135         default_bootup_tracer = bootup_tracer_buf;
136         /* We are using ftrace early, expand it */
137         ring_buffer_expanded = 1;
138         return 1;
139 }
140 __setup("ftrace=", set_cmdline_ftrace);
141
142 static int __init set_ftrace_dump_on_oops(char *str)
143 {
144         if (*str++ != '=' || !*str) {
145                 ftrace_dump_on_oops = DUMP_ALL;
146                 return 1;
147         }
148
149         if (!strcmp("orig_cpu", str)) {
150                 ftrace_dump_on_oops = DUMP_ORIG;
151                 return 1;
152         }
153
154         return 0;
155 }
156 __setup("ftrace_dump_on_oops", set_ftrace_dump_on_oops);
157
158
159 static char trace_boot_options_buf[MAX_TRACER_SIZE] __initdata;
160 static char *trace_boot_options __initdata;
161
162 static int __init set_trace_boot_options(char *str)
163 {
164         strncpy(trace_boot_options_buf, str, MAX_TRACER_SIZE);
165         trace_boot_options = trace_boot_options_buf;
166         return 0;
167 }
168 __setup("trace_options=", set_trace_boot_options);
169
170 unsigned long long ns2usecs(cycle_t nsec)
171 {
172         nsec += 500;
173         do_div(nsec, 1000);
174         return nsec;
175 }
176
177 /*
178  * The global_trace is the descriptor that holds the tracing
179  * buffers for the live tracing. For each CPU, it contains
180  * a link list of pages that will store trace entries. The
181  * page descriptor of the pages in the memory is used to hold
182  * the link list by linking the lru item in the page descriptor
183  * to each of the pages in the buffer per CPU.
184  *
185  * For each active CPU there is a data field that holds the
186  * pages for the buffer for that CPU. Each CPU has the same number
187  * of pages allocated for its buffer.
188  */
189 static struct trace_array       global_trace;
190
191 static DEFINE_PER_CPU(struct trace_array_cpu, global_trace_cpu);
192
193 int filter_current_check_discard(struct ring_buffer *buffer,
194                                  struct ftrace_event_call *call, void *rec,
195                                  struct ring_buffer_event *event)
196 {
197         return filter_check_discard(call, rec, buffer, event);
198 }
199 EXPORT_SYMBOL_GPL(filter_current_check_discard);
200
201 cycle_t ftrace_now(int cpu)
202 {
203         u64 ts;
204
205         /* Early boot up does not have a buffer yet */
206         if (!global_trace.buffer)
207                 return trace_clock_local();
208
209         ts = ring_buffer_time_stamp(global_trace.buffer, cpu);
210         ring_buffer_normalize_time_stamp(global_trace.buffer, cpu, &ts);
211
212         return ts;
213 }
214
215 /*
216  * The max_tr is used to snapshot the global_trace when a maximum
217  * latency is reached. Some tracers will use this to store a maximum
218  * trace while it continues examining live traces.
219  *
220  * The buffers for the max_tr are set up the same as the global_trace.
221  * When a snapshot is taken, the link list of the max_tr is swapped
222  * with the link list of the global_trace and the buffers are reset for
223  * the global_trace so the tracing can continue.
224  */
225 static struct trace_array       max_tr;
226
227 static DEFINE_PER_CPU(struct trace_array_cpu, max_tr_data);
228
229 int tracing_is_enabled(void)
230 {
231         return tracing_is_on();
232 }
233
234 /*
235  * trace_buf_size is the size in bytes that is allocated
236  * for a buffer. Note, the number of bytes is always rounded
237  * to page size.
238  *
239  * This number is purposely set to a low number of 16384.
240  * If the dump on oops happens, it will be much appreciated
241  * to not have to wait for all that output. Anyway this can be
242  * boot time and run time configurable.
243  */
244 #define TRACE_BUF_SIZE_DEFAULT  1441792UL /* 16384 * 88 (sizeof(entry)) */
245
246 static unsigned long            trace_buf_size = TRACE_BUF_SIZE_DEFAULT;
247
248 /* trace_types holds a link list of available tracers. */
249 static struct tracer            *trace_types __read_mostly;
250
251 /* current_trace points to the tracer that is currently active */
252 static struct tracer            *current_trace __read_mostly;
253
254 /*
255  * trace_types_lock is used to protect the trace_types list.
256  */
257 static DEFINE_MUTEX(trace_types_lock);
258
259 /*
260  * serialize the access of the ring buffer
261  *
262  * ring buffer serializes readers, but it is low level protection.
263  * The validity of the events (which returns by ring_buffer_peek() ..etc)
264  * are not protected by ring buffer.
265  *
266  * The content of events may become garbage if we allow other process consumes
267  * these events concurrently:
268  *   A) the page of the consumed events may become a normal page
269  *      (not reader page) in ring buffer, and this page will be rewrited
270  *      by events producer.
271  *   B) The page of the consumed events may become a page for splice_read,
272  *      and this page will be returned to system.
273  *
274  * These primitives allow multi process access to different cpu ring buffer
275  * concurrently.
276  *
277  * These primitives don't distinguish read-only and read-consume access.
278  * Multi read-only access are also serialized.
279  */
280
281 #ifdef CONFIG_SMP
282 static DECLARE_RWSEM(all_cpu_access_lock);
283 static DEFINE_PER_CPU(struct mutex, cpu_access_lock);
284
285 static inline void trace_access_lock(int cpu)
286 {
287         if (cpu == TRACE_PIPE_ALL_CPU) {
288                 /* gain it for accessing the whole ring buffer. */
289                 down_write(&all_cpu_access_lock);
290         } else {
291                 /* gain it for accessing a cpu ring buffer. */
292
293                 /* Firstly block other trace_access_lock(TRACE_PIPE_ALL_CPU). */
294                 down_read(&all_cpu_access_lock);
295
296                 /* Secondly block other access to this @cpu ring buffer. */
297                 mutex_lock(&per_cpu(cpu_access_lock, cpu));
298         }
299 }
300
301 static inline void trace_access_unlock(int cpu)
302 {
303         if (cpu == TRACE_PIPE_ALL_CPU) {
304                 up_write(&all_cpu_access_lock);
305         } else {
306                 mutex_unlock(&per_cpu(cpu_access_lock, cpu));
307                 up_read(&all_cpu_access_lock);
308         }
309 }
310
311 static inline void trace_access_lock_init(void)
312 {
313         int cpu;
314
315         for_each_possible_cpu(cpu)
316                 mutex_init(&per_cpu(cpu_access_lock, cpu));
317 }
318
319 #else
320
321 static DEFINE_MUTEX(access_lock);
322
323 static inline void trace_access_lock(int cpu)
324 {
325         (void)cpu;
326         mutex_lock(&access_lock);
327 }
328
329 static inline void trace_access_unlock(int cpu)
330 {
331         (void)cpu;
332         mutex_unlock(&access_lock);
333 }
334
335 static inline void trace_access_lock_init(void)
336 {
337 }
338
339 #endif
340
341 /* trace_wait is a waitqueue for tasks blocked on trace_poll */
342 static DECLARE_WAIT_QUEUE_HEAD(trace_wait);
343
344 /* trace_flags holds trace_options default values */
345 unsigned long trace_flags = TRACE_ITER_PRINT_PARENT | TRACE_ITER_PRINTK |
346         TRACE_ITER_ANNOTATE | TRACE_ITER_CONTEXT_INFO | TRACE_ITER_SLEEP_TIME |
347         TRACE_ITER_GRAPH_TIME | TRACE_ITER_RECORD_CMD | TRACE_ITER_OVERWRITE |
348         TRACE_ITER_IRQ_INFO | TRACE_ITER_MARKERS;
349
350 static int trace_stop_count;
351 static DEFINE_RAW_SPINLOCK(tracing_start_lock);
352
353 /**
354  * trace_wake_up - wake up tasks waiting for trace input
355  *
356  * Schedules a delayed work to wake up any task that is blocked on the
357  * trace_wait queue. These is used with trace_poll for tasks polling the
358  * trace.
359  */
360 static void trace_wake_up(struct irq_work *work)
361 {
362         wake_up_all(&trace_wait);
363
364 }
365
366 /**
367  * tracing_on - enable tracing buffers
368  *
369  * This function enables tracing buffers that may have been
370  * disabled with tracing_off.
371  */
372 void tracing_on(void)
373 {
374         if (global_trace.buffer)
375                 ring_buffer_record_on(global_trace.buffer);
376         /*
377          * This flag is only looked at when buffers haven't been
378          * allocated yet. We don't really care about the race
379          * between setting this flag and actually turning
380          * on the buffer.
381          */
382         global_trace.buffer_disabled = 0;
383 }
384 EXPORT_SYMBOL_GPL(tracing_on);
385
386 /**
387  * tracing_off - turn off tracing buffers
388  *
389  * This function stops the tracing buffers from recording data.
390  * It does not disable any overhead the tracers themselves may
391  * be causing. This function simply causes all recording to
392  * the ring buffers to fail.
393  */
394 void tracing_off(void)
395 {
396         if (global_trace.buffer)
397                 ring_buffer_record_off(global_trace.buffer);
398         /*
399          * This flag is only looked at when buffers haven't been
400          * allocated yet. We don't really care about the race
401          * between setting this flag and actually turning
402          * on the buffer.
403          */
404         global_trace.buffer_disabled = 1;
405 }
406 EXPORT_SYMBOL_GPL(tracing_off);
407
408 /**
409  * tracing_is_on - show state of ring buffers enabled
410  */
411 int tracing_is_on(void)
412 {
413         if (global_trace.buffer)
414                 return ring_buffer_record_is_on(global_trace.buffer);
415         return !global_trace.buffer_disabled;
416 }
417 EXPORT_SYMBOL_GPL(tracing_is_on);
418
419 static int __init set_buf_size(char *str)
420 {
421         unsigned long buf_size;
422
423         if (!str)
424                 return 0;
425         buf_size = memparse(str, &str);
426         /* nr_entries can not be zero */
427         if (buf_size == 0)
428                 return 0;
429         trace_buf_size = buf_size;
430         return 1;
431 }
432 __setup("trace_buf_size=", set_buf_size);
433
434 static int __init set_tracing_thresh(char *str)
435 {
436         unsigned long threshold;
437         int ret;
438
439         if (!str)
440                 return 0;
441         ret = kstrtoul(str, 0, &threshold);
442         if (ret < 0)
443                 return 0;
444         tracing_thresh = threshold * 1000;
445         return 1;
446 }
447 __setup("tracing_thresh=", set_tracing_thresh);
448
449 unsigned long nsecs_to_usecs(unsigned long nsecs)
450 {
451         return nsecs / 1000;
452 }
453
454 /* These must match the bit postions in trace_iterator_flags */
455 static const char *trace_options[] = {
456         "print-parent",
457         "sym-offset",
458         "sym-addr",
459         "verbose",
460         "raw",
461         "hex",
462         "bin",
463         "block",
464         "stacktrace",
465         "trace_printk",
466         "ftrace_preempt",
467         "branch",
468         "annotate",
469         "userstacktrace",
470         "sym-userobj",
471         "printk-msg-only",
472         "context-info",
473         "latency-format",
474         "sleep-time",
475         "graph-time",
476         "record-cmd",
477         "overwrite",
478         "disable_on_free",
479         "irq-info",
480         "markers",
481         NULL
482 };
483
484 static struct {
485         u64 (*func)(void);
486         const char *name;
487 } trace_clocks[] = {
488         { trace_clock_local,    "local" },
489         { trace_clock_global,   "global" },
490         { trace_clock_counter,  "counter" },
491 };
492
493 int trace_clock_id;
494
495 /*
496  * trace_parser_get_init - gets the buffer for trace parser
497  */
498 int trace_parser_get_init(struct trace_parser *parser, int size)
499 {
500         memset(parser, 0, sizeof(*parser));
501
502         parser->buffer = kmalloc(size, GFP_KERNEL);
503         if (!parser->buffer)
504                 return 1;
505
506         parser->size = size;
507         return 0;
508 }
509
510 /*
511  * trace_parser_put - frees the buffer for trace parser
512  */
513 void trace_parser_put(struct trace_parser *parser)
514 {
515         kfree(parser->buffer);
516 }
517
518 /*
519  * trace_get_user - reads the user input string separated by  space
520  * (matched by isspace(ch))
521  *
522  * For each string found the 'struct trace_parser' is updated,
523  * and the function returns.
524  *
525  * Returns number of bytes read.
526  *
527  * See kernel/trace/trace.h for 'struct trace_parser' details.
528  */
529 int trace_get_user(struct trace_parser *parser, const char __user *ubuf,
530         size_t cnt, loff_t *ppos)
531 {
532         char ch;
533         size_t read = 0;
534         ssize_t ret;
535
536         if (!*ppos)
537                 trace_parser_clear(parser);
538
539         ret = get_user(ch, ubuf++);
540         if (ret)
541                 goto out;
542
543         read++;
544         cnt--;
545
546         /*
547          * The parser is not finished with the last write,
548          * continue reading the user input without skipping spaces.
549          */
550         if (!parser->cont) {
551                 /* skip white space */
552                 while (cnt && isspace(ch)) {
553                         ret = get_user(ch, ubuf++);
554                         if (ret)
555                                 goto out;
556                         read++;
557                         cnt--;
558                 }
559
560                 /* only spaces were written */
561                 if (isspace(ch)) {
562                         *ppos += read;
563                         ret = read;
564                         goto out;
565                 }
566
567                 parser->idx = 0;
568         }
569
570         /* read the non-space input */
571         while (cnt && !isspace(ch)) {
572                 if (parser->idx < parser->size - 1)
573                         parser->buffer[parser->idx++] = ch;
574                 else {
575                         ret = -EINVAL;
576                         goto out;
577                 }
578                 ret = get_user(ch, ubuf++);
579                 if (ret)
580                         goto out;
581                 read++;
582                 cnt--;
583         }
584
585         /* We either got finished input or we have to wait for another call. */
586         if (isspace(ch)) {
587                 parser->buffer[parser->idx] = 0;
588                 parser->cont = false;
589         } else {
590                 parser->cont = true;
591                 parser->buffer[parser->idx++] = ch;
592         }
593
594         *ppos += read;
595         ret = read;
596
597 out:
598         return ret;
599 }
600
601 ssize_t trace_seq_to_user(struct trace_seq *s, char __user *ubuf, size_t cnt)
602 {
603         int len;
604         int ret;
605
606         if (!cnt)
607                 return 0;
608
609         if (s->len <= s->readpos)
610                 return -EBUSY;
611
612         len = s->len - s->readpos;
613         if (cnt > len)
614                 cnt = len;
615         ret = copy_to_user(ubuf, s->buffer + s->readpos, cnt);
616         if (ret == cnt)
617                 return -EFAULT;
618
619         cnt -= ret;
620
621         s->readpos += cnt;
622         return cnt;
623 }
624
625 static ssize_t trace_seq_to_buffer(struct trace_seq *s, void *buf, size_t cnt)
626 {
627         int len;
628
629         if (s->len <= s->readpos)
630                 return -EBUSY;
631
632         len = s->len - s->readpos;
633         if (cnt > len)
634                 cnt = len;
635         memcpy(buf, s->buffer + s->readpos, cnt);
636
637         s->readpos += cnt;
638         return cnt;
639 }
640
641 /*
642  * ftrace_max_lock is used to protect the swapping of buffers
643  * when taking a max snapshot. The buffers themselves are
644  * protected by per_cpu spinlocks. But the action of the swap
645  * needs its own lock.
646  *
647  * This is defined as a arch_spinlock_t in order to help
648  * with performance when lockdep debugging is enabled.
649  *
650  * It is also used in other places outside the update_max_tr
651  * so it needs to be defined outside of the
652  * CONFIG_TRACER_MAX_TRACE.
653  */
654 static arch_spinlock_t ftrace_max_lock =
655         (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
656
657 unsigned long __read_mostly     tracing_thresh;
658
659 #ifdef CONFIG_TRACER_MAX_TRACE
660 unsigned long __read_mostly     tracing_max_latency;
661
662 /*
663  * Copy the new maximum trace into the separate maximum-trace
664  * structure. (this way the maximum trace is permanently saved,
665  * for later retrieval via /sys/kernel/debug/tracing/latency_trace)
666  */
667 static void
668 __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
669 {
670         struct trace_array_cpu *data = tr->data[cpu];
671         struct trace_array_cpu *max_data;
672
673         max_tr.cpu = cpu;
674         max_tr.time_start = data->preempt_timestamp;
675
676         max_data = max_tr.data[cpu];
677         max_data->saved_latency = tracing_max_latency;
678         max_data->critical_start = data->critical_start;
679         max_data->critical_end = data->critical_end;
680
681         memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN);
682         max_data->pid = tsk->pid;
683         max_data->uid = task_uid(tsk);
684         max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO;
685         max_data->policy = tsk->policy;
686         max_data->rt_priority = tsk->rt_priority;
687
688         /* record this tasks comm */
689         tracing_record_cmdline(tsk);
690 }
691
692 /**
693  * update_max_tr - snapshot all trace buffers from global_trace to max_tr
694  * @tr: tracer
695  * @tsk: the task with the latency
696  * @cpu: The cpu that initiated the trace.
697  *
698  * Flip the buffers between the @tr and the max_tr and record information
699  * about which task was the cause of this latency.
700  */
701 void
702 update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
703 {
704         struct ring_buffer *buf = tr->buffer;
705
706         if (trace_stop_count)
707                 return;
708
709         WARN_ON_ONCE(!irqs_disabled());
710         if (!current_trace->use_max_tr) {
711                 WARN_ON_ONCE(1);
712                 return;
713         }
714         arch_spin_lock(&ftrace_max_lock);
715
716         tr->buffer = max_tr.buffer;
717         max_tr.buffer = buf;
718
719         __update_max_tr(tr, tsk, cpu);
720         arch_spin_unlock(&ftrace_max_lock);
721 }
722
723 /**
724  * update_max_tr_single - only copy one trace over, and reset the rest
725  * @tr - tracer
726  * @tsk - task with the latency
727  * @cpu - the cpu of the buffer to copy.
728  *
729  * Flip the trace of a single CPU buffer between the @tr and the max_tr.
730  */
731 void
732 update_max_tr_single(struct trace_array *tr, struct task_struct *tsk, int cpu)
733 {
734         int ret;
735
736         if (trace_stop_count)
737                 return;
738
739         WARN_ON_ONCE(!irqs_disabled());
740         if (!current_trace->use_max_tr) {
741                 WARN_ON_ONCE(1);
742                 return;
743         }
744
745         arch_spin_lock(&ftrace_max_lock);
746
747         ret = ring_buffer_swap_cpu(max_tr.buffer, tr->buffer, cpu);
748
749         if (ret == -EBUSY) {
750                 /*
751                  * We failed to swap the buffer due to a commit taking
752                  * place on this CPU. We fail to record, but we reset
753                  * the max trace buffer (no one writes directly to it)
754                  * and flag that it failed.
755                  */
756                 trace_array_printk(&max_tr, _THIS_IP_,
757                         "Failed to swap buffers due to commit in progress\n");
758         }
759
760         WARN_ON_ONCE(ret && ret != -EAGAIN && ret != -EBUSY);
761
762         __update_max_tr(tr, tsk, cpu);
763         arch_spin_unlock(&ftrace_max_lock);
764 }
765 #endif /* CONFIG_TRACER_MAX_TRACE */
766
767 static void default_wait_pipe(struct trace_iterator *iter)
768 {
769         DEFINE_WAIT(wait);
770
771         prepare_to_wait(&trace_wait, &wait, TASK_INTERRUPTIBLE);
772
773         /*
774          * The events can happen in critical sections where
775          * checking a work queue can cause deadlocks.
776          * After adding a task to the queue, this flag is set
777          * only to notify events to try to wake up the queue
778          * using irq_work.
779          *
780          * We don't clear it even if the buffer is no longer
781          * empty. The flag only causes the next event to run
782          * irq_work to do the work queue wake up. The worse
783          * that can happen if we race with !trace_empty() is that
784          * an event will cause an irq_work to try to wake up
785          * an empty queue.
786          *
787          * There's no reason to protect this flag either, as
788          * the work queue and irq_work logic will do the necessary
789          * synchronization for the wake ups. The only thing
790          * that is necessary is that the wake up happens after
791          * a task has been queued. It's OK for spurious wake ups.
792          */
793         trace_wakeup_needed = true;
794
795         if (trace_empty(iter))
796                 schedule();
797
798         finish_wait(&trace_wait, &wait);
799 }
800
801 /**
802  * register_tracer - register a tracer with the ftrace system.
803  * @type - the plugin for the tracer
804  *
805  * Register a new plugin tracer.
806  */
807 int register_tracer(struct tracer *type)
808 {
809         struct tracer *t;
810         int ret = 0;
811
812         if (!type->name) {
813                 pr_info("Tracer must have a name\n");
814                 return -1;
815         }
816
817         if (strlen(type->name) >= MAX_TRACER_SIZE) {
818                 pr_info("Tracer has a name longer than %d\n", MAX_TRACER_SIZE);
819                 return -1;
820         }
821
822         mutex_lock(&trace_types_lock);
823
824         tracing_selftest_running = true;
825
826         for (t = trace_types; t; t = t->next) {
827                 if (strcmp(type->name, t->name) == 0) {
828                         /* already found */
829                         pr_info("Tracer %s already registered\n",
830                                 type->name);
831                         ret = -1;
832                         goto out;
833                 }
834         }
835
836         if (!type->set_flag)
837                 type->set_flag = &dummy_set_flag;
838         if (!type->flags)
839                 type->flags = &dummy_tracer_flags;
840         else
841                 if (!type->flags->opts)
842                         type->flags->opts = dummy_tracer_opt;
843         if (!type->wait_pipe)
844                 type->wait_pipe = default_wait_pipe;
845
846
847 #ifdef CONFIG_FTRACE_STARTUP_TEST
848         if (type->selftest && !tracing_selftest_disabled) {
849                 struct tracer *saved_tracer = current_trace;
850                 struct trace_array *tr = &global_trace;
851
852                 /*
853                  * Run a selftest on this tracer.
854                  * Here we reset the trace buffer, and set the current
855                  * tracer to be this tracer. The tracer can then run some
856                  * internal tracing to verify that everything is in order.
857                  * If we fail, we do not register this tracer.
858                  */
859                 tracing_reset_online_cpus(tr);
860
861                 current_trace = type;
862
863                 /* If we expanded the buffers, make sure the max is expanded too */
864                 if (ring_buffer_expanded && type->use_max_tr)
865                         ring_buffer_resize(max_tr.buffer, trace_buf_size,
866                                                 RING_BUFFER_ALL_CPUS);
867
868                 /* the test is responsible for initializing and enabling */
869                 pr_info("Testing tracer %s: ", type->name);
870                 ret = type->selftest(type, tr);
871                 /* the test is responsible for resetting too */
872                 current_trace = saved_tracer;
873                 if (ret) {
874                         printk(KERN_CONT "FAILED!\n");
875                         /* Add the warning after printing 'FAILED' */
876                         WARN_ON(1);
877                         goto out;
878                 }
879                 /* Only reset on passing, to avoid touching corrupted buffers */
880                 tracing_reset_online_cpus(tr);
881
882                 /* Shrink the max buffer again */
883                 if (ring_buffer_expanded && type->use_max_tr)
884                         ring_buffer_resize(max_tr.buffer, 1,
885                                                 RING_BUFFER_ALL_CPUS);
886
887                 printk(KERN_CONT "PASSED\n");
888         }
889 #endif
890
891         type->next = trace_types;
892         trace_types = type;
893
894  out:
895         tracing_selftest_running = false;
896         mutex_unlock(&trace_types_lock);
897
898         if (ret || !default_bootup_tracer)
899                 goto out_unlock;
900
901         if (strncmp(default_bootup_tracer, type->name, MAX_TRACER_SIZE))
902                 goto out_unlock;
903
904         printk(KERN_INFO "Starting tracer '%s'\n", type->name);
905         /* Do we want this tracer to start on bootup? */
906         tracing_set_tracer(type->name);
907         default_bootup_tracer = NULL;
908         /* disable other selftests, since this will break it. */
909         tracing_selftest_disabled = 1;
910 #ifdef CONFIG_FTRACE_STARTUP_TEST
911         printk(KERN_INFO "Disabling FTRACE selftests due to running tracer '%s'\n",
912                type->name);
913 #endif
914
915  out_unlock:
916         return ret;
917 }
918
919 void tracing_reset(struct trace_array *tr, int cpu)
920 {
921         struct ring_buffer *buffer = tr->buffer;
922
923         ring_buffer_record_disable(buffer);
924
925         /* Make sure all commits have finished */
926         synchronize_sched();
927         ring_buffer_reset_cpu(buffer, cpu);
928
929         ring_buffer_record_enable(buffer);
930 }
931
932 void tracing_reset_online_cpus(struct trace_array *tr)
933 {
934         struct ring_buffer *buffer = tr->buffer;
935         int cpu;
936
937         ring_buffer_record_disable(buffer);
938
939         /* Make sure all commits have finished */
940         synchronize_sched();
941
942         tr->time_start = ftrace_now(tr->cpu);
943
944         for_each_online_cpu(cpu)
945                 ring_buffer_reset_cpu(buffer, cpu);
946
947         ring_buffer_record_enable(buffer);
948 }
949
950 void tracing_reset_current(int cpu)
951 {
952         tracing_reset(&global_trace, cpu);
953 }
954
955 void tracing_reset_current_online_cpus(void)
956 {
957         tracing_reset_online_cpus(&global_trace);
958 }
959
960 #define SAVED_CMDLINES 128
961 #define NO_CMDLINE_MAP UINT_MAX
962 static unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
963 static unsigned map_cmdline_to_pid[SAVED_CMDLINES];
964 static char saved_cmdlines[SAVED_CMDLINES][TASK_COMM_LEN];
965 static int cmdline_idx;
966 static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
967
968 /* temporary disable recording */
969 static atomic_t trace_record_cmdline_disabled __read_mostly;
970
971 static void trace_init_cmdlines(void)
972 {
973         memset(&map_pid_to_cmdline, NO_CMDLINE_MAP, sizeof(map_pid_to_cmdline));
974         memset(&map_cmdline_to_pid, NO_CMDLINE_MAP, sizeof(map_cmdline_to_pid));
975         cmdline_idx = 0;
976 }
977
978 int is_tracing_stopped(void)
979 {
980         return trace_stop_count;
981 }
982
983 /**
984  * ftrace_off_permanent - disable all ftrace code permanently
985  *
986  * This should only be called when a serious anomally has
987  * been detected.  This will turn off the function tracing,
988  * ring buffers, and other tracing utilites. It takes no
989  * locks and can be called from any context.
990  */
991 void ftrace_off_permanent(void)
992 {
993         tracing_disabled = 1;
994         ftrace_stop();
995         tracing_off_permanent();
996 }
997
998 /**
999  * tracing_start - quick start of the tracer
1000  *
1001  * If tracing is enabled but was stopped by tracing_stop,
1002  * this will start the tracer back up.
1003  */
1004 void tracing_start(void)
1005 {
1006         struct ring_buffer *buffer;
1007         unsigned long flags;
1008
1009         if (tracing_disabled)
1010                 return;
1011
1012         raw_spin_lock_irqsave(&tracing_start_lock, flags);
1013         if (--trace_stop_count) {
1014                 if (trace_stop_count < 0) {
1015                         /* Someone screwed up their debugging */
1016                         WARN_ON_ONCE(1);
1017                         trace_stop_count = 0;
1018                 }
1019                 goto out;
1020         }
1021
1022         /* Prevent the buffers from switching */
1023         arch_spin_lock(&ftrace_max_lock);
1024
1025         buffer = global_trace.buffer;
1026         if (buffer)
1027                 ring_buffer_record_enable(buffer);
1028
1029         buffer = max_tr.buffer;
1030         if (buffer)
1031                 ring_buffer_record_enable(buffer);
1032
1033         arch_spin_unlock(&ftrace_max_lock);
1034
1035         ftrace_start();
1036  out:
1037         raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1038 }
1039
1040 /**
1041  * tracing_stop - quick stop of the tracer
1042  *
1043  * Light weight way to stop tracing. Use in conjunction with
1044  * tracing_start.
1045  */
1046 void tracing_stop(void)
1047 {
1048         struct ring_buffer *buffer;
1049         unsigned long flags;
1050
1051         ftrace_stop();
1052         raw_spin_lock_irqsave(&tracing_start_lock, flags);
1053         if (trace_stop_count++)
1054                 goto out;
1055
1056         /* Prevent the buffers from switching */
1057         arch_spin_lock(&ftrace_max_lock);
1058
1059         buffer = global_trace.buffer;
1060         if (buffer)
1061                 ring_buffer_record_disable(buffer);
1062
1063         buffer = max_tr.buffer;
1064         if (buffer)
1065                 ring_buffer_record_disable(buffer);
1066
1067         arch_spin_unlock(&ftrace_max_lock);
1068
1069  out:
1070         raw_spin_unlock_irqrestore(&tracing_start_lock, flags);
1071 }
1072
1073 void trace_stop_cmdline_recording(void);
1074
1075 static void trace_save_cmdline(struct task_struct *tsk)
1076 {
1077         unsigned pid, idx;
1078
1079         if (!tsk->pid || unlikely(tsk->pid > PID_MAX_DEFAULT))
1080                 return;
1081
1082         /*
1083          * It's not the end of the world if we don't get
1084          * the lock, but we also don't want to spin
1085          * nor do we want to disable interrupts,
1086          * so if we miss here, then better luck next time.
1087          */
1088         if (!arch_spin_trylock(&trace_cmdline_lock))
1089                 return;
1090
1091         idx = map_pid_to_cmdline[tsk->pid];
1092         if (idx == NO_CMDLINE_MAP) {
1093                 idx = (cmdline_idx + 1) % SAVED_CMDLINES;
1094
1095                 /*
1096                  * Check whether the cmdline buffer at idx has a pid
1097                  * mapped. We are going to overwrite that entry so we
1098                  * need to clear the map_pid_to_cmdline. Otherwise we
1099                  * would read the new comm for the old pid.
1100                  */
1101                 pid = map_cmdline_to_pid[idx];
1102                 if (pid != NO_CMDLINE_MAP)
1103                         map_pid_to_cmdline[pid] = NO_CMDLINE_MAP;
1104
1105                 map_cmdline_to_pid[idx] = tsk->pid;
1106                 map_pid_to_cmdline[tsk->pid] = idx;
1107
1108                 cmdline_idx = idx;
1109         }
1110
1111         memcpy(&saved_cmdlines[idx], tsk->comm, TASK_COMM_LEN);
1112
1113         arch_spin_unlock(&trace_cmdline_lock);
1114 }
1115
1116 void trace_find_cmdline(int pid, char comm[])
1117 {
1118         unsigned map;
1119
1120         if (!pid) {
1121                 strcpy(comm, "<idle>");
1122                 return;
1123         }
1124
1125         if (WARN_ON_ONCE(pid < 0)) {
1126                 strcpy(comm, "<XXX>");
1127                 return;
1128         }
1129
1130         if (pid > PID_MAX_DEFAULT) {
1131                 strcpy(comm, "<...>");
1132                 return;
1133         }
1134
1135         preempt_disable();
1136         arch_spin_lock(&trace_cmdline_lock);
1137         map = map_pid_to_cmdline[pid];
1138         if (map != NO_CMDLINE_MAP)
1139                 strcpy(comm, saved_cmdlines[map]);
1140         else
1141                 strcpy(comm, "<...>");
1142
1143         arch_spin_unlock(&trace_cmdline_lock);
1144         preempt_enable();
1145 }
1146
1147 void tracing_record_cmdline(struct task_struct *tsk)
1148 {
1149         if (atomic_read(&trace_record_cmdline_disabled) || !tracing_is_on())
1150                 return;
1151
1152         if (!__this_cpu_read(trace_cmdline_save))
1153                 return;
1154
1155         __this_cpu_write(trace_cmdline_save, false);
1156
1157         trace_save_cmdline(tsk);
1158 }
1159
1160 void
1161 tracing_generic_entry_update(struct trace_entry *entry, unsigned long flags,
1162                              int pc)
1163 {
1164         struct task_struct *tsk = current;
1165
1166         entry->preempt_count            = pc & 0xff;
1167         entry->pid                      = (tsk) ? tsk->pid : 0;
1168         entry->padding                  = 0;
1169         entry->flags =
1170 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
1171                 (irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
1172 #else
1173                 TRACE_FLAG_IRQS_NOSUPPORT |
1174 #endif
1175                 ((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
1176                 ((pc & SOFTIRQ_MASK) ? TRACE_FLAG_SOFTIRQ : 0) |
1177                 (need_resched() ? TRACE_FLAG_NEED_RESCHED : 0);
1178 }
1179 EXPORT_SYMBOL_GPL(tracing_generic_entry_update);
1180
1181 struct ring_buffer_event *
1182 trace_buffer_lock_reserve(struct ring_buffer *buffer,
1183                           int type,
1184                           unsigned long len,
1185                           unsigned long flags, int pc)
1186 {
1187         struct ring_buffer_event *event;
1188
1189         event = ring_buffer_lock_reserve(buffer, len);
1190         if (event != NULL) {
1191                 struct trace_entry *ent = ring_buffer_event_data(event);
1192
1193                 tracing_generic_entry_update(ent, flags, pc);
1194                 ent->type = type;
1195         }
1196
1197         return event;
1198 }
1199
1200 void
1201 __buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event)
1202 {
1203         __this_cpu_write(trace_cmdline_save, true);
1204         if (trace_wakeup_needed) {
1205                 trace_wakeup_needed = false;
1206                 /* irq_work_queue() supplies it's own memory barriers */
1207                 irq_work_queue(&trace_work_wakeup);
1208         }
1209         ring_buffer_unlock_commit(buffer, event);
1210 }
1211
1212 static inline void
1213 __trace_buffer_unlock_commit(struct ring_buffer *buffer,
1214                              struct ring_buffer_event *event,
1215                              unsigned long flags, int pc)
1216 {
1217         __buffer_unlock_commit(buffer, event);
1218
1219         ftrace_trace_stack(buffer, flags, 6, pc);
1220         ftrace_trace_userstack(buffer, flags, pc);
1221 }
1222
1223 void trace_buffer_unlock_commit(struct ring_buffer *buffer,
1224                                 struct ring_buffer_event *event,
1225                                 unsigned long flags, int pc)
1226 {
1227         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1228 }
1229 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit);
1230
1231 struct ring_buffer_event *
1232 trace_current_buffer_lock_reserve(struct ring_buffer **current_rb,
1233                                   int type, unsigned long len,
1234                                   unsigned long flags, int pc)
1235 {
1236         *current_rb = global_trace.buffer;
1237         return trace_buffer_lock_reserve(*current_rb,
1238                                          type, len, flags, pc);
1239 }
1240 EXPORT_SYMBOL_GPL(trace_current_buffer_lock_reserve);
1241
1242 void trace_current_buffer_unlock_commit(struct ring_buffer *buffer,
1243                                         struct ring_buffer_event *event,
1244                                         unsigned long flags, int pc)
1245 {
1246         __trace_buffer_unlock_commit(buffer, event, flags, pc);
1247 }
1248 EXPORT_SYMBOL_GPL(trace_current_buffer_unlock_commit);
1249
1250 void trace_buffer_unlock_commit_regs(struct ring_buffer *buffer,
1251                                      struct ring_buffer_event *event,
1252                                      unsigned long flags, int pc,
1253                                      struct pt_regs *regs)
1254 {
1255         __buffer_unlock_commit(buffer, event);
1256
1257         ftrace_trace_stack_regs(buffer, flags, 0, pc, regs);
1258         ftrace_trace_userstack(buffer, flags, pc);
1259 }
1260 EXPORT_SYMBOL_GPL(trace_buffer_unlock_commit_regs);
1261
1262 void trace_current_buffer_discard_commit(struct ring_buffer *buffer,
1263                                          struct ring_buffer_event *event)
1264 {
1265         ring_buffer_discard_commit(buffer, event);
1266 }
1267 EXPORT_SYMBOL_GPL(trace_current_buffer_discard_commit);
1268
1269 void
1270 trace_function(struct trace_array *tr,
1271                unsigned long ip, unsigned long parent_ip, unsigned long flags,
1272                int pc)
1273 {
1274         struct ftrace_event_call *call = &event_function;
1275         struct ring_buffer *buffer = tr->buffer;
1276         struct ring_buffer_event *event;
1277         struct ftrace_entry *entry;
1278
1279         /* If we are reading the ring buffer, don't trace */
1280         if (unlikely(__this_cpu_read(ftrace_cpu_disabled)))
1281                 return;
1282
1283         event = trace_buffer_lock_reserve(buffer, TRACE_FN, sizeof(*entry),
1284                                           flags, pc);
1285         if (!event)
1286                 return;
1287         entry   = ring_buffer_event_data(event);
1288         entry->ip                       = ip;
1289         entry->parent_ip                = parent_ip;
1290
1291         if (!filter_check_discard(call, entry, buffer, event))
1292                 __buffer_unlock_commit(buffer, event);
1293 }
1294
1295 void
1296 ftrace(struct trace_array *tr, struct trace_array_cpu *data,
1297        unsigned long ip, unsigned long parent_ip, unsigned long flags,
1298        int pc)
1299 {
1300         if (likely(!atomic_read(&data->disabled)))
1301                 trace_function(tr, ip, parent_ip, flags, pc);
1302 }
1303
1304 #ifdef CONFIG_STACKTRACE
1305
1306 #define FTRACE_STACK_MAX_ENTRIES (PAGE_SIZE / sizeof(unsigned long))
1307 struct ftrace_stack {
1308         unsigned long           calls[FTRACE_STACK_MAX_ENTRIES];
1309 };
1310
1311 static DEFINE_PER_CPU(struct ftrace_stack, ftrace_stack);
1312 static DEFINE_PER_CPU(int, ftrace_stack_reserve);
1313
1314 static void __ftrace_trace_stack(struct ring_buffer *buffer,
1315                                  unsigned long flags,
1316                                  int skip, int pc, struct pt_regs *regs)
1317 {
1318         struct ftrace_event_call *call = &event_kernel_stack;
1319         struct ring_buffer_event *event;
1320         struct stack_entry *entry;
1321         struct stack_trace trace;
1322         int use_stack;
1323         int size = FTRACE_STACK_ENTRIES;
1324
1325         trace.nr_entries        = 0;
1326         trace.skip              = skip;
1327
1328         /*
1329          * Since events can happen in NMIs there's no safe way to
1330          * use the per cpu ftrace_stacks. We reserve it and if an interrupt
1331          * or NMI comes in, it will just have to use the default
1332          * FTRACE_STACK_SIZE.
1333          */
1334         preempt_disable_notrace();
1335
1336         use_stack = ++__get_cpu_var(ftrace_stack_reserve);
1337         /*
1338          * We don't need any atomic variables, just a barrier.
1339          * If an interrupt comes in, we don't care, because it would
1340          * have exited and put the counter back to what we want.
1341          * We just need a barrier to keep gcc from moving things
1342          * around.
1343          */
1344         barrier();
1345         if (use_stack == 1) {
1346                 trace.entries           = &__get_cpu_var(ftrace_stack).calls[0];
1347                 trace.max_entries       = FTRACE_STACK_MAX_ENTRIES;
1348
1349                 if (regs)
1350                         save_stack_trace_regs(regs, &trace);
1351                 else
1352                         save_stack_trace(&trace);
1353
1354                 if (trace.nr_entries > size)
1355                         size = trace.nr_entries;
1356         } else
1357                 /* From now on, use_stack is a boolean */
1358                 use_stack = 0;
1359
1360         size *= sizeof(unsigned long);
1361
1362         event = trace_buffer_lock_reserve(buffer, TRACE_STACK,
1363                                           sizeof(*entry) + size, flags, pc);
1364         if (!event)
1365                 goto out;
1366         entry = ring_buffer_event_data(event);
1367
1368         memset(&entry->caller, 0, size);
1369
1370         if (use_stack)
1371                 memcpy(&entry->caller, trace.entries,
1372                        trace.nr_entries * sizeof(unsigned long));
1373         else {
1374                 trace.max_entries       = FTRACE_STACK_ENTRIES;
1375                 trace.entries           = entry->caller;
1376                 if (regs)
1377                         save_stack_trace_regs(regs, &trace);
1378                 else
1379                         save_stack_trace(&trace);
1380         }
1381
1382         entry->size = trace.nr_entries;
1383
1384         if (!filter_check_discard(call, entry, buffer, event))
1385                 __buffer_unlock_commit(buffer, event);
1386
1387  out:
1388         /* Again, don't let gcc optimize things here */
1389         barrier();
1390         __get_cpu_var(ftrace_stack_reserve)--;
1391         preempt_enable_notrace();
1392
1393 }
1394
1395 void ftrace_trace_stack_regs(struct ring_buffer *buffer, unsigned long flags,
1396                              int skip, int pc, struct pt_regs *regs)
1397 {
1398         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1399                 return;
1400
1401         __ftrace_trace_stack(buffer, flags, skip, pc, regs);
1402 }
1403
1404 void ftrace_trace_stack(struct ring_buffer *buffer, unsigned long flags,
1405                         int skip, int pc)
1406 {
1407         if (!(trace_flags & TRACE_ITER_STACKTRACE))
1408                 return;
1409
1410         __ftrace_trace_stack(buffer, flags, skip, pc, NULL);
1411 }
1412
1413 void __trace_stack(struct trace_array *tr, unsigned long flags, int skip,
1414                    int pc)
1415 {
1416         __ftrace_trace_stack(tr->buffer, flags, skip, pc, NULL);
1417 }
1418
1419 /**
1420  * trace_dump_stack - record a stack back trace in the trace buffer
1421  */
1422 void trace_dump_stack(void)
1423 {
1424         unsigned long flags;
1425
1426         if (tracing_disabled || tracing_selftest_running)
1427                 return;
1428
1429         local_save_flags(flags);
1430
1431         /* skipping 3 traces, seems to get us at the caller of this function */
1432         __ftrace_trace_stack(global_trace.buffer, flags, 3, preempt_count(), NULL);
1433 }
1434
1435 static DEFINE_PER_CPU(int, user_stack_count);
1436
1437 void
1438 ftrace_trace_userstack(struct ring_buffer *buffer, unsigned long flags, int pc)
1439 {
1440         struct ftrace_event_call *call = &event_user_stack;
1441         struct ring_buffer_event *event;
1442         struct userstack_entry *entry;
1443         struct stack_trace trace;
1444
1445         if (!(trace_flags & TRACE_ITER_USERSTACKTRACE))
1446                 return;
1447
1448         /*
1449          * NMIs can not handle page faults, even with fix ups.
1450          * The save user stack can (and often does) fault.
1451          */
1452         if (unlikely(in_nmi()))
1453                 return;
1454
1455         /*
1456          * prevent recursion, since the user stack tracing may
1457          * trigger other kernel events.
1458          */
1459         preempt_disable();
1460         if (__this_cpu_read(user_stack_count))
1461                 goto out;
1462
1463         __this_cpu_inc(user_stack_count);
1464
1465         event = trace_buffer_lock_reserve(buffer, TRACE_USER_STACK,
1466                                           sizeof(*entry), flags, pc);
1467         if (!event)
1468                 goto out_drop_count;
1469         entry   = ring_buffer_event_data(event);
1470
1471         entry->tgid             = current->tgid;
1472         memset(&entry->caller, 0, sizeof(entry->caller));
1473
1474         trace.nr_entries        = 0;
1475         trace.max_entries       = FTRACE_STACK_ENTRIES;
1476         trace.skip              = 0;
1477         trace.entries           = entry->caller;
1478
1479         save_stack_trace_user(&trace);
1480         if (!filter_check_discard(call, entry, buffer, event))
1481                 __buffer_unlock_commit(buffer, event);
1482
1483  out_drop_count:
1484         __this_cpu_dec(user_stack_count);
1485  out:
1486         preempt_enable();
1487 }
1488
1489 #ifdef UNUSED
1490 static void __trace_userstack(struct trace_array *tr, unsigned long flags)
1491 {
1492         ftrace_trace_userstack(tr, flags, preempt_count());
1493 }
1494 #endif /* UNUSED */
1495
1496 #endif /* CONFIG_STACKTRACE */
1497
1498 /* created for use with alloc_percpu */
1499 struct trace_buffer_struct {
1500         char buffer[TRACE_BUF_SIZE];
1501 };
1502
1503 static struct trace_buffer_struct *trace_percpu_buffer;
1504 static struct trace_buffer_struct *trace_percpu_sirq_buffer;
1505 static struct trace_buffer_struct *trace_percpu_irq_buffer;
1506 static struct trace_buffer_struct *trace_percpu_nmi_buffer;
1507
1508 /*
1509  * The buffer used is dependent on the context. There is a per cpu
1510  * buffer for normal context, softirq contex, hard irq context and
1511  * for NMI context. Thise allows for lockless recording.
1512  *
1513  * Note, if the buffers failed to be allocated, then this returns NULL
1514  */
1515 static char *get_trace_buf(void)
1516 {
1517         struct trace_buffer_struct *percpu_buffer;
1518         struct trace_buffer_struct *buffer;
1519
1520         /*
1521          * If we have allocated per cpu buffers, then we do not
1522          * need to do any locking.
1523          */
1524         if (in_nmi())
1525                 percpu_buffer = trace_percpu_nmi_buffer;
1526         else if (in_irq())
1527                 percpu_buffer = trace_percpu_irq_buffer;
1528         else if (in_softirq())
1529                 percpu_buffer = trace_percpu_sirq_buffer;
1530         else
1531                 percpu_buffer = trace_percpu_buffer;
1532
1533         if (!percpu_buffer)
1534                 return NULL;
1535
1536         buffer = per_cpu_ptr(percpu_buffer, smp_processor_id());
1537
1538         return buffer->buffer;
1539 }
1540
1541 static int alloc_percpu_trace_buffer(void)
1542 {
1543         struct trace_buffer_struct *buffers;
1544         struct trace_buffer_struct *sirq_buffers;
1545         struct trace_buffer_struct *irq_buffers;
1546         struct trace_buffer_struct *nmi_buffers;
1547
1548         buffers = alloc_percpu(struct trace_buffer_struct);
1549         if (!buffers)
1550                 goto err_warn;
1551
1552         sirq_buffers = alloc_percpu(struct trace_buffer_struct);
1553         if (!sirq_buffers)
1554                 goto err_sirq;
1555
1556         irq_buffers = alloc_percpu(struct trace_buffer_struct);
1557         if (!irq_buffers)
1558                 goto err_irq;
1559
1560         nmi_buffers = alloc_percpu(struct trace_buffer_struct);
1561         if (!nmi_buffers)
1562                 goto err_nmi;
1563
1564         trace_percpu_buffer = buffers;
1565         trace_percpu_sirq_buffer = sirq_buffers;
1566         trace_percpu_irq_buffer = irq_buffers;
1567         trace_percpu_nmi_buffer = nmi_buffers;
1568
1569         return 0;
1570
1571  err_nmi:
1572         free_percpu(irq_buffers);
1573  err_irq:
1574         free_percpu(sirq_buffers);
1575  err_sirq:
1576         free_percpu(buffers);
1577  err_warn:
1578         WARN(1, "Could not allocate percpu trace_printk buffer");
1579         return -ENOMEM;
1580 }
1581
1582 static int buffers_allocated;
1583
1584 void trace_printk_init_buffers(void)
1585 {
1586         if (buffers_allocated)
1587                 return;
1588
1589         if (alloc_percpu_trace_buffer())
1590                 return;
1591
1592         pr_info("ftrace: Allocated trace_printk buffers\n");
1593
1594         /* Expand the buffers to set size */
1595         tracing_update_buffers();
1596
1597         buffers_allocated = 1;
1598
1599         /*
1600          * trace_printk_init_buffers() can be called by modules.
1601          * If that happens, then we need to start cmdline recording
1602          * directly here. If the global_trace.buffer is already
1603          * allocated here, then this was called by module code.
1604          */
1605         if (global_trace.buffer)
1606                 tracing_start_cmdline_record();
1607 }
1608
1609 void trace_printk_start_comm(void)
1610 {
1611         /* Start tracing comms if trace printk is set */
1612         if (!buffers_allocated)
1613                 return;
1614         tracing_start_cmdline_record();
1615 }
1616
1617 static void trace_printk_start_stop_comm(int enabled)
1618 {
1619         if (!buffers_allocated)
1620                 return;
1621
1622         if (enabled)
1623                 tracing_start_cmdline_record();
1624         else
1625                 tracing_stop_cmdline_record();
1626 }
1627
1628 /**
1629  * trace_vbprintk - write binary msg to tracing buffer
1630  *
1631  */
1632 int trace_vbprintk(unsigned long ip, const char *fmt, va_list args)
1633 {
1634         struct ftrace_event_call *call = &event_bprint;
1635         struct ring_buffer_event *event;
1636         struct ring_buffer *buffer;
1637         struct trace_array *tr = &global_trace;
1638         struct bprint_entry *entry;
1639         unsigned long flags;
1640         char *tbuffer;
1641         int len = 0, size, pc;
1642
1643         if (unlikely(tracing_selftest_running || tracing_disabled))
1644                 return 0;
1645
1646         /* Don't pollute graph traces with trace_vprintk internals */
1647         pause_graph_tracing();
1648
1649         pc = preempt_count();
1650         preempt_disable_notrace();
1651
1652         tbuffer = get_trace_buf();
1653         if (!tbuffer) {
1654                 len = 0;
1655                 goto out;
1656         }
1657
1658         len = vbin_printf((u32 *)tbuffer, TRACE_BUF_SIZE/sizeof(int), fmt, args);
1659
1660         if (len > TRACE_BUF_SIZE/sizeof(int) || len < 0)
1661                 goto out;
1662
1663         local_save_flags(flags);
1664         size = sizeof(*entry) + sizeof(u32) * len;
1665         buffer = tr->buffer;
1666         event = trace_buffer_lock_reserve(buffer, TRACE_BPRINT, size,
1667                                           flags, pc);
1668         if (!event)
1669                 goto out;
1670         entry = ring_buffer_event_data(event);
1671         entry->ip                       = ip;
1672         entry->fmt                      = fmt;
1673
1674         memcpy(entry->buf, tbuffer, sizeof(u32) * len);
1675         if (!filter_check_discard(call, entry, buffer, event)) {
1676                 __buffer_unlock_commit(buffer, event);
1677                 ftrace_trace_stack(buffer, flags, 6, pc);
1678         }
1679
1680 out:
1681         preempt_enable_notrace();
1682         unpause_graph_tracing();
1683
1684         return len;
1685 }
1686 EXPORT_SYMBOL_GPL(trace_vbprintk);
1687
1688 int trace_array_printk(struct trace_array *tr,
1689                        unsigned long ip, const char *fmt, ...)
1690 {
1691         int ret;
1692         va_list ap;
1693
1694         if (!(trace_flags & TRACE_ITER_PRINTK))
1695                 return 0;
1696
1697         va_start(ap, fmt);
1698         ret = trace_array_vprintk(tr, ip, fmt, ap);
1699         va_end(ap);
1700         return ret;
1701 }
1702
1703 int trace_array_vprintk(struct trace_array *tr,
1704                         unsigned long ip, const char *fmt, va_list args)
1705 {
1706         struct ftrace_event_call *call = &event_print;
1707         struct ring_buffer_event *event;
1708         struct ring_buffer *buffer;
1709         int len = 0, size, pc;
1710         struct print_entry *entry;
1711         unsigned long flags;
1712         char *tbuffer;
1713
1714         if (tracing_disabled || tracing_selftest_running)
1715                 return 0;
1716
1717         /* Don't pollute graph traces with trace_vprintk internals */
1718         pause_graph_tracing();
1719
1720         pc = preempt_count();
1721         preempt_disable_notrace();
1722
1723
1724         tbuffer = get_trace_buf();
1725         if (!tbuffer) {
1726                 len = 0;
1727                 goto out;
1728         }
1729
1730         len = vsnprintf(tbuffer, TRACE_BUF_SIZE, fmt, args);
1731         if (len > TRACE_BUF_SIZE)
1732                 goto out;
1733
1734         local_save_flags(flags);
1735         size = sizeof(*entry) + len + 1;
1736         buffer = tr->buffer;
1737         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
1738                                           flags, pc);
1739         if (!event)
1740                 goto out;
1741         entry = ring_buffer_event_data(event);
1742         entry->ip = ip;
1743
1744         memcpy(&entry->buf, tbuffer, len);
1745         entry->buf[len] = '\0';
1746         if (!filter_check_discard(call, entry, buffer, event)) {
1747                 __buffer_unlock_commit(buffer, event);
1748                 ftrace_trace_stack(buffer, flags, 6, pc);
1749         }
1750  out:
1751         preempt_enable_notrace();
1752         unpause_graph_tracing();
1753
1754         return len;
1755 }
1756
1757 int trace_vprintk(unsigned long ip, const char *fmt, va_list args)
1758 {
1759         return trace_array_vprintk(&global_trace, ip, fmt, args);
1760 }
1761 EXPORT_SYMBOL_GPL(trace_vprintk);
1762
1763 static void trace_iterator_increment(struct trace_iterator *iter)
1764 {
1765         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, iter->cpu);
1766
1767         iter->idx++;
1768         if (buf_iter)
1769                 ring_buffer_read(buf_iter, NULL);
1770 }
1771
1772 static struct trace_entry *
1773 peek_next_entry(struct trace_iterator *iter, int cpu, u64 *ts,
1774                 unsigned long *lost_events)
1775 {
1776         struct ring_buffer_event *event;
1777         struct ring_buffer_iter *buf_iter = trace_buffer_iter(iter, cpu);
1778
1779         if (buf_iter)
1780                 event = ring_buffer_iter_peek(buf_iter, ts);
1781         else
1782                 event = ring_buffer_peek(iter->tr->buffer, cpu, ts,
1783                                          lost_events);
1784
1785         if (event) {
1786                 iter->ent_size = ring_buffer_event_length(event);
1787                 return ring_buffer_event_data(event);
1788         }
1789         iter->ent_size = 0;
1790         return NULL;
1791 }
1792
1793 static struct trace_entry *
1794 __find_next_entry(struct trace_iterator *iter, int *ent_cpu,
1795                   unsigned long *missing_events, u64 *ent_ts)
1796 {
1797         struct ring_buffer *buffer = iter->tr->buffer;
1798         struct trace_entry *ent, *next = NULL;
1799         unsigned long lost_events = 0, next_lost = 0;
1800         int cpu_file = iter->cpu_file;
1801         u64 next_ts = 0, ts;
1802         int next_cpu = -1;
1803         int next_size = 0;
1804         int cpu;
1805
1806         /*
1807          * If we are in a per_cpu trace file, don't bother by iterating over
1808          * all cpu and peek directly.
1809          */
1810         if (cpu_file > TRACE_PIPE_ALL_CPU) {
1811                 if (ring_buffer_empty_cpu(buffer, cpu_file))
1812                         return NULL;
1813                 ent = peek_next_entry(iter, cpu_file, ent_ts, missing_events);
1814                 if (ent_cpu)
1815                         *ent_cpu = cpu_file;
1816
1817                 return ent;
1818         }
1819
1820         for_each_tracing_cpu(cpu) {
1821
1822                 if (ring_buffer_empty_cpu(buffer, cpu))
1823                         continue;
1824
1825                 ent = peek_next_entry(iter, cpu, &ts, &lost_events);
1826
1827                 /*
1828                  * Pick the entry with the smallest timestamp:
1829                  */
1830                 if (ent && (!next || ts < next_ts)) {
1831                         next = ent;
1832                         next_cpu = cpu;
1833                         next_ts = ts;
1834                         next_lost = lost_events;
1835                         next_size = iter->ent_size;
1836                 }
1837         }
1838
1839         iter->ent_size = next_size;
1840
1841         if (ent_cpu)
1842                 *ent_cpu = next_cpu;
1843
1844         if (ent_ts)
1845                 *ent_ts = next_ts;
1846
1847         if (missing_events)
1848                 *missing_events = next_lost;
1849
1850         return next;
1851 }
1852
1853 /* Find the next real entry, without updating the iterator itself */
1854 struct trace_entry *trace_find_next_entry(struct trace_iterator *iter,
1855                                           int *ent_cpu, u64 *ent_ts)
1856 {
1857         return __find_next_entry(iter, ent_cpu, NULL, ent_ts);
1858 }
1859
1860 /* Find the next real entry, and increment the iterator to the next entry */
1861 void *trace_find_next_entry_inc(struct trace_iterator *iter)
1862 {
1863         iter->ent = __find_next_entry(iter, &iter->cpu,
1864                                       &iter->lost_events, &iter->ts);
1865
1866         if (iter->ent)
1867                 trace_iterator_increment(iter);
1868
1869         return iter->ent ? iter : NULL;
1870 }
1871
1872 static void trace_consume(struct trace_iterator *iter)
1873 {
1874         ring_buffer_consume(iter->tr->buffer, iter->cpu, &iter->ts,
1875                             &iter->lost_events);
1876 }
1877
1878 static void *s_next(struct seq_file *m, void *v, loff_t *pos)
1879 {
1880         struct trace_iterator *iter = m->private;
1881         int i = (int)*pos;
1882         void *ent;
1883
1884         WARN_ON_ONCE(iter->leftover);
1885
1886         (*pos)++;
1887
1888         /* can't go backwards */
1889         if (iter->idx > i)
1890                 return NULL;
1891
1892         if (iter->idx < 0)
1893                 ent = trace_find_next_entry_inc(iter);
1894         else
1895                 ent = iter;
1896
1897         while (ent && iter->idx < i)
1898                 ent = trace_find_next_entry_inc(iter);
1899
1900         iter->pos = *pos;
1901
1902         return ent;
1903 }
1904
1905 void tracing_iter_reset(struct trace_iterator *iter, int cpu)
1906 {
1907         struct trace_array *tr = iter->tr;
1908         struct ring_buffer_event *event;
1909         struct ring_buffer_iter *buf_iter;
1910         unsigned long entries = 0;
1911         u64 ts;
1912
1913         tr->data[cpu]->skipped_entries = 0;
1914
1915         buf_iter = trace_buffer_iter(iter, cpu);
1916         if (!buf_iter)
1917                 return;
1918
1919         ring_buffer_iter_reset(buf_iter);
1920
1921         /*
1922          * We could have the case with the max latency tracers
1923          * that a reset never took place on a cpu. This is evident
1924          * by the timestamp being before the start of the buffer.
1925          */
1926         while ((event = ring_buffer_iter_peek(buf_iter, &ts))) {
1927                 if (ts >= iter->tr->time_start)
1928                         break;
1929                 entries++;
1930                 ring_buffer_read(buf_iter, NULL);
1931         }
1932
1933         tr->data[cpu]->skipped_entries = entries;
1934 }
1935
1936 /*
1937  * The current tracer is copied to avoid a global locking
1938  * all around.
1939  */
1940 static void *s_start(struct seq_file *m, loff_t *pos)
1941 {
1942         struct trace_iterator *iter = m->private;
1943         static struct tracer *old_tracer;
1944         int cpu_file = iter->cpu_file;
1945         void *p = NULL;
1946         loff_t l = 0;
1947         int cpu;
1948
1949         /* copy the tracer to avoid using a global lock all around */
1950         mutex_lock(&trace_types_lock);
1951         if (unlikely(old_tracer != current_trace && current_trace)) {
1952                 old_tracer = current_trace;
1953                 *iter->trace = *current_trace;
1954         }
1955         mutex_unlock(&trace_types_lock);
1956
1957         atomic_inc(&trace_record_cmdline_disabled);
1958
1959         if (*pos != iter->pos) {
1960                 iter->ent = NULL;
1961                 iter->cpu = 0;
1962                 iter->idx = -1;
1963
1964                 if (cpu_file == TRACE_PIPE_ALL_CPU) {
1965                         for_each_tracing_cpu(cpu)
1966                                 tracing_iter_reset(iter, cpu);
1967                 } else
1968                         tracing_iter_reset(iter, cpu_file);
1969
1970                 iter->leftover = 0;
1971                 for (p = iter; p && l < *pos; p = s_next(m, p, &l))
1972                         ;
1973
1974         } else {
1975                 /*
1976                  * If we overflowed the seq_file before, then we want
1977                  * to just reuse the trace_seq buffer again.
1978                  */
1979                 if (iter->leftover)
1980                         p = iter;
1981                 else {
1982                         l = *pos - 1;
1983                         p = s_next(m, p, &l);
1984                 }
1985         }
1986
1987         trace_event_read_lock();
1988         trace_access_lock(cpu_file);
1989         return p;
1990 }
1991
1992 static void s_stop(struct seq_file *m, void *p)
1993 {
1994         struct trace_iterator *iter = m->private;
1995
1996         atomic_dec(&trace_record_cmdline_disabled);
1997         trace_access_unlock(iter->cpu_file);
1998         trace_event_read_unlock();
1999 }
2000
2001 static void
2002 get_total_entries(struct trace_array *tr, unsigned long *total, unsigned long *entries)
2003 {
2004         unsigned long count;
2005         int cpu;
2006
2007         *total = 0;
2008         *entries = 0;
2009
2010         for_each_tracing_cpu(cpu) {
2011                 count = ring_buffer_entries_cpu(tr->buffer, cpu);
2012                 /*
2013                  * If this buffer has skipped entries, then we hold all
2014                  * entries for the trace and we need to ignore the
2015                  * ones before the time stamp.
2016                  */
2017                 if (tr->data[cpu]->skipped_entries) {
2018                         count -= tr->data[cpu]->skipped_entries;
2019                         /* total is the same as the entries */
2020                         *total += count;
2021                 } else
2022                         *total += count +
2023                                 ring_buffer_overrun_cpu(tr->buffer, cpu);
2024                 *entries += count;
2025         }
2026 }
2027
2028 static void print_lat_help_header(struct seq_file *m)
2029 {
2030         seq_puts(m, "#                  _------=> CPU#            \n");
2031         seq_puts(m, "#                 / _-----=> irqs-off        \n");
2032         seq_puts(m, "#                | / _----=> need-resched    \n");
2033         seq_puts(m, "#                || / _---=> hardirq/softirq \n");
2034         seq_puts(m, "#                ||| / _--=> preempt-depth   \n");
2035         seq_puts(m, "#                |||| /     delay             \n");
2036         seq_puts(m, "#  cmd     pid   ||||| time  |   caller      \n");
2037         seq_puts(m, "#     \\   /      |||||  \\    |   /           \n");
2038 }
2039
2040 static void print_event_info(struct trace_array *tr, struct seq_file *m)
2041 {
2042         unsigned long total;
2043         unsigned long entries;
2044
2045         get_total_entries(tr, &total, &entries);
2046         seq_printf(m, "# entries-in-buffer/entries-written: %lu/%lu   #P:%d\n",
2047                    entries, total, num_online_cpus());
2048         seq_puts(m, "#\n");
2049 }
2050
2051 static void print_func_help_header(struct trace_array *tr, struct seq_file *m)
2052 {
2053         print_event_info(tr, m);
2054         seq_puts(m, "#           TASK-PID   CPU#      TIMESTAMP  FUNCTION\n");
2055         seq_puts(m, "#              | |       |          |         |\n");
2056 }
2057
2058 static void print_func_help_header_irq(struct trace_array *tr, struct seq_file *m)
2059 {
2060         print_event_info(tr, m);
2061         seq_puts(m, "#                              _-----=> irqs-off\n");
2062         seq_puts(m, "#                             / _----=> need-resched\n");
2063         seq_puts(m, "#                            | / _---=> hardirq/softirq\n");
2064         seq_puts(m, "#                            || / _--=> preempt-depth\n");
2065         seq_puts(m, "#                            ||| /     delay\n");
2066         seq_puts(m, "#           TASK-PID   CPU#  ||||    TIMESTAMP  FUNCTION\n");
2067         seq_puts(m, "#              | |       |   ||||       |         |\n");
2068 }
2069
2070 void
2071 print_trace_header(struct seq_file *m, struct trace_iterator *iter)
2072 {
2073         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2074         struct trace_array *tr = iter->tr;
2075         struct trace_array_cpu *data = tr->data[tr->cpu];
2076         struct tracer *type = current_trace;
2077         unsigned long entries;
2078         unsigned long total;
2079         const char *name = "preemption";
2080
2081         if (type)
2082                 name = type->name;
2083
2084         get_total_entries(tr, &total, &entries);
2085
2086         seq_printf(m, "# %s latency trace v1.1.5 on %s\n",
2087                    name, UTS_RELEASE);
2088         seq_puts(m, "# -----------------------------------"
2089                  "---------------------------------\n");
2090         seq_printf(m, "# latency: %lu us, #%lu/%lu, CPU#%d |"
2091                    " (M:%s VP:%d, KP:%d, SP:%d HP:%d",
2092                    nsecs_to_usecs(data->saved_latency),
2093                    entries,
2094                    total,
2095                    tr->cpu,
2096 #if defined(CONFIG_PREEMPT_NONE)
2097                    "server",
2098 #elif defined(CONFIG_PREEMPT_VOLUNTARY)
2099                    "desktop",
2100 #elif defined(CONFIG_PREEMPT)
2101                    "preempt",
2102 #else
2103                    "unknown",
2104 #endif
2105                    /* These are reserved for later use */
2106                    0, 0, 0, 0);
2107 #ifdef CONFIG_SMP
2108         seq_printf(m, " #P:%d)\n", num_online_cpus());
2109 #else
2110         seq_puts(m, ")\n");
2111 #endif
2112         seq_puts(m, "#    -----------------\n");
2113         seq_printf(m, "#    | task: %.16s-%d "
2114                    "(uid:%d nice:%ld policy:%ld rt_prio:%ld)\n",
2115                    data->comm, data->pid,
2116                    from_kuid_munged(seq_user_ns(m), data->uid), data->nice,
2117                    data->policy, data->rt_priority);
2118         seq_puts(m, "#    -----------------\n");
2119
2120         if (data->critical_start) {
2121                 seq_puts(m, "#  => started at: ");
2122                 seq_print_ip_sym(&iter->seq, data->critical_start, sym_flags);
2123                 trace_print_seq(m, &iter->seq);
2124                 seq_puts(m, "\n#  => ended at:   ");
2125                 seq_print_ip_sym(&iter->seq, data->critical_end, sym_flags);
2126                 trace_print_seq(m, &iter->seq);
2127                 seq_puts(m, "\n#\n");
2128         }
2129
2130         seq_puts(m, "#\n");
2131 }
2132
2133 static void test_cpu_buff_start(struct trace_iterator *iter)
2134 {
2135         struct trace_seq *s = &iter->seq;
2136
2137         if (!(trace_flags & TRACE_ITER_ANNOTATE))
2138                 return;
2139
2140         if (!(iter->iter_flags & TRACE_FILE_ANNOTATE))
2141                 return;
2142
2143         if (cpumask_test_cpu(iter->cpu, iter->started))
2144                 return;
2145
2146         if (iter->tr->data[iter->cpu]->skipped_entries)
2147                 return;
2148
2149         cpumask_set_cpu(iter->cpu, iter->started);
2150
2151         /* Don't print started cpu buffer for the first entry of the trace */
2152         if (iter->idx > 1)
2153                 trace_seq_printf(s, "##### CPU %u buffer started ####\n",
2154                                 iter->cpu);
2155 }
2156
2157 static enum print_line_t print_trace_fmt(struct trace_iterator *iter)
2158 {
2159         struct trace_seq *s = &iter->seq;
2160         unsigned long sym_flags = (trace_flags & TRACE_ITER_SYM_MASK);
2161         struct trace_entry *entry;
2162         struct trace_event *event;
2163
2164         entry = iter->ent;
2165
2166         test_cpu_buff_start(iter);
2167
2168         event = ftrace_find_event(entry->type);
2169
2170         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2171                 if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2172                         if (!trace_print_lat_context(iter))
2173                                 goto partial;
2174                 } else {
2175                         if (!trace_print_context(iter))
2176                                 goto partial;
2177                 }
2178         }
2179
2180         if (event)
2181                 return event->funcs->trace(iter, sym_flags, event);
2182
2183         if (!trace_seq_printf(s, "Unknown type %d\n", entry->type))
2184                 goto partial;
2185
2186         return TRACE_TYPE_HANDLED;
2187 partial:
2188         return TRACE_TYPE_PARTIAL_LINE;
2189 }
2190
2191 static enum print_line_t print_raw_fmt(struct trace_iterator *iter)
2192 {
2193         struct trace_seq *s = &iter->seq;
2194         struct trace_entry *entry;
2195         struct trace_event *event;
2196
2197         entry = iter->ent;
2198
2199         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2200                 if (!trace_seq_printf(s, "%d %d %llu ",
2201                                       entry->pid, iter->cpu, iter->ts))
2202                         goto partial;
2203         }
2204
2205         event = ftrace_find_event(entry->type);
2206         if (event)
2207                 return event->funcs->raw(iter, 0, event);
2208
2209         if (!trace_seq_printf(s, "%d ?\n", entry->type))
2210                 goto partial;
2211
2212         return TRACE_TYPE_HANDLED;
2213 partial:
2214         return TRACE_TYPE_PARTIAL_LINE;
2215 }
2216
2217 static enum print_line_t print_hex_fmt(struct trace_iterator *iter)
2218 {
2219         struct trace_seq *s = &iter->seq;
2220         unsigned char newline = '\n';
2221         struct trace_entry *entry;
2222         struct trace_event *event;
2223
2224         entry = iter->ent;
2225
2226         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2227                 SEQ_PUT_HEX_FIELD_RET(s, entry->pid);
2228                 SEQ_PUT_HEX_FIELD_RET(s, iter->cpu);
2229                 SEQ_PUT_HEX_FIELD_RET(s, iter->ts);
2230         }
2231
2232         event = ftrace_find_event(entry->type);
2233         if (event) {
2234                 enum print_line_t ret = event->funcs->hex(iter, 0, event);
2235                 if (ret != TRACE_TYPE_HANDLED)
2236                         return ret;
2237         }
2238
2239         SEQ_PUT_FIELD_RET(s, newline);
2240
2241         return TRACE_TYPE_HANDLED;
2242 }
2243
2244 static enum print_line_t print_bin_fmt(struct trace_iterator *iter)
2245 {
2246         struct trace_seq *s = &iter->seq;
2247         struct trace_entry *entry;
2248         struct trace_event *event;
2249
2250         entry = iter->ent;
2251
2252         if (trace_flags & TRACE_ITER_CONTEXT_INFO) {
2253                 SEQ_PUT_FIELD_RET(s, entry->pid);
2254                 SEQ_PUT_FIELD_RET(s, iter->cpu);
2255                 SEQ_PUT_FIELD_RET(s, iter->ts);
2256         }
2257
2258         event = ftrace_find_event(entry->type);
2259         return event ? event->funcs->binary(iter, 0, event) :
2260                 TRACE_TYPE_HANDLED;
2261 }
2262
2263 int trace_empty(struct trace_iterator *iter)
2264 {
2265         struct ring_buffer_iter *buf_iter;
2266         int cpu;
2267
2268         /* If we are looking at one CPU buffer, only check that one */
2269         if (iter->cpu_file != TRACE_PIPE_ALL_CPU) {
2270                 cpu = iter->cpu_file;
2271                 buf_iter = trace_buffer_iter(iter, cpu);
2272                 if (buf_iter) {
2273                         if (!ring_buffer_iter_empty(buf_iter))
2274                                 return 0;
2275                 } else {
2276                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2277                                 return 0;
2278                 }
2279                 return 1;
2280         }
2281
2282         for_each_tracing_cpu(cpu) {
2283                 buf_iter = trace_buffer_iter(iter, cpu);
2284                 if (buf_iter) {
2285                         if (!ring_buffer_iter_empty(buf_iter))
2286                                 return 0;
2287                 } else {
2288                         if (!ring_buffer_empty_cpu(iter->tr->buffer, cpu))
2289                                 return 0;
2290                 }
2291         }
2292
2293         return 1;
2294 }
2295
2296 /*  Called with trace_event_read_lock() held. */
2297 enum print_line_t print_trace_line(struct trace_iterator *iter)
2298 {
2299         enum print_line_t ret;
2300
2301         if (iter->lost_events &&
2302             !trace_seq_printf(&iter->seq, "CPU:%d [LOST %lu EVENTS]\n",
2303                                  iter->cpu, iter->lost_events))
2304                 return TRACE_TYPE_PARTIAL_LINE;
2305
2306         if (iter->trace && iter->trace->print_line) {
2307                 ret = iter->trace->print_line(iter);
2308                 if (ret != TRACE_TYPE_UNHANDLED)
2309                         return ret;
2310         }
2311
2312         if (iter->ent->type == TRACE_BPRINT &&
2313                         trace_flags & TRACE_ITER_PRINTK &&
2314                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2315                 return trace_print_bprintk_msg_only(iter);
2316
2317         if (iter->ent->type == TRACE_PRINT &&
2318                         trace_flags & TRACE_ITER_PRINTK &&
2319                         trace_flags & TRACE_ITER_PRINTK_MSGONLY)
2320                 return trace_print_printk_msg_only(iter);
2321
2322         if (trace_flags & TRACE_ITER_BIN)
2323                 return print_bin_fmt(iter);
2324
2325         if (trace_flags & TRACE_ITER_HEX)
2326                 return print_hex_fmt(iter);
2327
2328         if (trace_flags & TRACE_ITER_RAW)
2329                 return print_raw_fmt(iter);
2330
2331         return print_trace_fmt(iter);
2332 }
2333
2334 void trace_latency_header(struct seq_file *m)
2335 {
2336         struct trace_iterator *iter = m->private;
2337
2338         /* print nothing if the buffers are empty */
2339         if (trace_empty(iter))
2340                 return;
2341
2342         if (iter->iter_flags & TRACE_FILE_LAT_FMT)
2343                 print_trace_header(m, iter);
2344
2345         if (!(trace_flags & TRACE_ITER_VERBOSE))
2346                 print_lat_help_header(m);
2347 }
2348
2349 void trace_default_header(struct seq_file *m)
2350 {
2351         struct trace_iterator *iter = m->private;
2352
2353         if (!(trace_flags & TRACE_ITER_CONTEXT_INFO))
2354                 return;
2355
2356         if (iter->iter_flags & TRACE_FILE_LAT_FMT) {
2357                 /* print nothing if the buffers are empty */
2358                 if (trace_empty(iter))
2359                         return;
2360                 print_trace_header(m, iter);
2361                 if (!(trace_flags & TRACE_ITER_VERBOSE))
2362                         print_lat_help_header(m);
2363         } else {
2364                 if (!(trace_flags & TRACE_ITER_VERBOSE)) {
2365                         if (trace_flags & TRACE_ITER_IRQ_INFO)
2366                                 print_func_help_header_irq(iter->tr, m);
2367                         else
2368                                 print_func_help_header(iter->tr, m);
2369                 }
2370         }
2371 }
2372
2373 static void test_ftrace_alive(struct seq_file *m)
2374 {
2375         if (!ftrace_is_dead())
2376                 return;
2377         seq_printf(m, "# WARNING: FUNCTION TRACING IS CORRUPTED\n");
2378         seq_printf(m, "#          MAY BE MISSING FUNCTION EVENTS\n");
2379 }
2380
2381 static int s_show(struct seq_file *m, void *v)
2382 {
2383         struct trace_iterator *iter = v;
2384         int ret;
2385
2386         if (iter->ent == NULL) {
2387                 if (iter->tr) {
2388                         seq_printf(m, "# tracer: %s\n", iter->trace->name);
2389                         seq_puts(m, "#\n");
2390                         test_ftrace_alive(m);
2391                 }
2392                 if (iter->trace && iter->trace->print_header)
2393                         iter->trace->print_header(m);
2394                 else
2395                         trace_default_header(m);
2396
2397         } else if (iter->leftover) {
2398                 /*
2399                  * If we filled the seq_file buffer earlier, we
2400                  * want to just show it now.
2401                  */
2402                 ret = trace_print_seq(m, &iter->seq);
2403
2404                 /* ret should this time be zero, but you never know */
2405                 iter->leftover = ret;
2406
2407         } else {
2408                 print_trace_line(iter);
2409                 ret = trace_print_seq(m, &iter->seq);
2410                 /*
2411                  * If we overflow the seq_file buffer, then it will
2412                  * ask us for this data again at start up.
2413                  * Use that instead.
2414                  *  ret is 0 if seq_file write succeeded.
2415                  *        -1 otherwise.
2416                  */
2417                 iter->leftover = ret;
2418         }
2419
2420         return 0;
2421 }
2422
2423 static const struct seq_operations tracer_seq_ops = {
2424         .start          = s_start,
2425         .next           = s_next,
2426         .stop           = s_stop,
2427         .show           = s_show,
2428 };
2429
2430 static struct trace_iterator *
2431 __tracing_open(struct inode *inode, struct file *file)
2432 {
2433         long cpu_file = (long) inode->i_private;
2434         struct trace_iterator *iter;
2435         int cpu;
2436
2437         if (tracing_disabled)
2438                 return ERR_PTR(-ENODEV);
2439
2440         iter = __seq_open_private(file, &tracer_seq_ops, sizeof(*iter));
2441         if (!iter)
2442                 return ERR_PTR(-ENOMEM);
2443
2444         iter->buffer_iter = kzalloc(sizeof(*iter->buffer_iter) * num_possible_cpus(),
2445                                     GFP_KERNEL);
2446         if (!iter->buffer_iter)
2447                 goto release;
2448
2449         /*
2450          * We make a copy of the current tracer to avoid concurrent
2451          * changes on it while we are reading.
2452          */
2453         mutex_lock(&trace_types_lock);
2454         iter->trace = kzalloc(sizeof(*iter->trace), GFP_KERNEL);
2455         if (!iter->trace)
2456                 goto fail;
2457
2458         if (current_trace)
2459                 *iter->trace = *current_trace;
2460
2461         if (!zalloc_cpumask_var(&iter->started, GFP_KERNEL))
2462                 goto fail;
2463
2464         if (current_trace && current_trace->print_max)
2465                 iter->tr = &max_tr;
2466         else
2467                 iter->tr = &global_trace;
2468         iter->pos = -1;
2469         mutex_init(&iter->mutex);
2470         iter->cpu_file = cpu_file;
2471
2472         /* Notify the tracer early; before we stop tracing. */
2473         if (iter->trace && iter->trace->open)
2474                 iter->trace->open(iter);
2475
2476         /* Annotate start of buffers if we had overruns */
2477         if (ring_buffer_overruns(iter->tr->buffer))
2478                 iter->iter_flags |= TRACE_FILE_ANNOTATE;
2479
2480         /* stop the trace while dumping */
2481         tracing_stop();
2482
2483         if (iter->cpu_file == TRACE_PIPE_ALL_CPU) {
2484                 for_each_tracing_cpu(cpu) {
2485                         iter->buffer_iter[cpu] =
2486                                 ring_buffer_read_prepare(iter->tr->buffer, cpu);
2487                 }
2488                 ring_buffer_read_prepare_sync();
2489                 for_each_tracing_cpu(cpu) {
2490                         ring_buffer_read_start(iter->buffer_iter[cpu]);
2491                         tracing_iter_reset(iter, cpu);
2492                 }
2493         } else {
2494                 cpu = iter->cpu_file;
2495                 iter->buffer_iter[cpu] =
2496                         ring_buffer_read_prepare(iter->tr->buffer, cpu);
2497                 ring_buffer_read_prepare_sync();
2498                 ring_buffer_read_start(iter->buffer_iter[cpu]);
2499                 tracing_iter_reset(iter, cpu);
2500         }
2501
2502         mutex_unlock(&trace_types_lock);
2503
2504         return iter;
2505
2506  fail:
2507         mutex_unlock(&trace_types_lock);
2508         kfree(iter->trace);
2509         kfree(iter->buffer_iter);
2510 release:
2511         seq_release_private(inode, file);
2512         return ERR_PTR(-ENOMEM);
2513 }
2514
2515 int tracing_open_generic(struct inode *inode, struct file *filp)
2516 {
2517         if (tracing_disabled)
2518                 return -ENODEV;
2519
2520         filp->private_data = inode->i_private;
2521         return 0;
2522 }
2523
2524 static int tracing_release(struct inode *inode, struct file *file)
2525 {
2526         struct seq_file *m = file->private_data;
2527         struct trace_iterator *iter;
2528         int cpu;
2529
2530         if (!(file->f_mode & FMODE_READ))
2531                 return 0;
2532
2533         iter = m->private;
2534
2535         mutex_lock(&trace_types_lock);
2536         for_each_tracing_cpu(cpu) {
2537                 if (iter->buffer_iter[cpu])
2538                         ring_buffer_read_finish(iter->buffer_iter[cpu]);
2539         }
2540
2541         if (iter->trace && iter->trace->close)
2542                 iter->trace->close(iter);
2543
2544         /* reenable tracing if it was previously enabled */
2545         tracing_start();
2546         mutex_unlock(&trace_types_lock);
2547
2548         mutex_destroy(&iter->mutex);
2549         free_cpumask_var(iter->started);
2550         kfree(iter->trace);
2551         kfree(iter->buffer_iter);
2552         seq_release_private(inode, file);
2553         return 0;
2554 }
2555
2556 static int tracing_open(struct inode *inode, struct file *file)
2557 {
2558         struct trace_iterator *iter;
2559         int ret = 0;
2560
2561         /* If this file was open for write, then erase contents */
2562         if ((file->f_mode & FMODE_WRITE) &&
2563             (file->f_flags & O_TRUNC)) {
2564                 long cpu = (long) inode->i_private;
2565
2566                 if (cpu == TRACE_PIPE_ALL_CPU)
2567                         tracing_reset_online_cpus(&global_trace);
2568                 else
2569                         tracing_reset(&global_trace, cpu);
2570         }
2571
2572         if (file->f_mode & FMODE_READ) {
2573                 iter = __tracing_open(inode, file);
2574                 if (IS_ERR(iter))
2575                         ret = PTR_ERR(iter);
2576                 else if (trace_flags & TRACE_ITER_LATENCY_FMT)
2577                         iter->iter_flags |= TRACE_FILE_LAT_FMT;
2578         }
2579         return ret;
2580 }
2581
2582 static void *
2583 t_next(struct seq_file *m, void *v, loff_t *pos)
2584 {
2585         struct tracer *t = v;
2586
2587         (*pos)++;
2588
2589         if (t)
2590                 t = t->next;
2591
2592         return t;
2593 }
2594
2595 static void *t_start(struct seq_file *m, loff_t *pos)
2596 {
2597         struct tracer *t;
2598         loff_t l = 0;
2599
2600         mutex_lock(&trace_types_lock);
2601         for (t = trace_types; t && l < *pos; t = t_next(m, t, &l))
2602                 ;
2603
2604         return t;
2605 }
2606
2607 static void t_stop(struct seq_file *m, void *p)
2608 {
2609         mutex_unlock(&trace_types_lock);
2610 }
2611
2612 static int t_show(struct seq_file *m, void *v)
2613 {
2614         struct tracer *t = v;
2615
2616         if (!t)
2617                 return 0;
2618
2619         seq_printf(m, "%s", t->name);
2620         if (t->next)
2621                 seq_putc(m, ' ');
2622         else
2623                 seq_putc(m, '\n');
2624
2625         return 0;
2626 }
2627
2628 static const struct seq_operations show_traces_seq_ops = {
2629         .start          = t_start,
2630         .next           = t_next,
2631         .stop           = t_stop,
2632         .show           = t_show,
2633 };
2634
2635 static int show_traces_open(struct inode *inode, struct file *file)
2636 {
2637         if (tracing_disabled)
2638                 return -ENODEV;
2639
2640         return seq_open(file, &show_traces_seq_ops);
2641 }
2642
2643 static ssize_t
2644 tracing_write_stub(struct file *filp, const char __user *ubuf,
2645                    size_t count, loff_t *ppos)
2646 {
2647         return count;
2648 }
2649
2650 static loff_t tracing_seek(struct file *file, loff_t offset, int origin)
2651 {
2652         if (file->f_mode & FMODE_READ)
2653                 return seq_lseek(file, offset, origin);
2654         else
2655                 return 0;
2656 }
2657
2658 static const struct file_operations tracing_fops = {
2659         .open           = tracing_open,
2660         .read           = seq_read,
2661         .write          = tracing_write_stub,
2662         .llseek         = tracing_seek,
2663         .release        = tracing_release,
2664 };
2665
2666 static const struct file_operations show_traces_fops = {
2667         .open           = show_traces_open,
2668         .read           = seq_read,
2669         .release        = seq_release,
2670         .llseek         = seq_lseek,
2671 };
2672
2673 /*
2674  * Only trace on a CPU if the bitmask is set:
2675  */
2676 static cpumask_var_t tracing_cpumask;
2677
2678 /*
2679  * The tracer itself will not take this lock, but still we want
2680  * to provide a consistent cpumask to user-space:
2681  */
2682 static DEFINE_MUTEX(tracing_cpumask_update_lock);
2683
2684 /*
2685  * Temporary storage for the character representation of the
2686  * CPU bitmask (and one more byte for the newline):
2687  */
2688 static char mask_str[NR_CPUS + 1];
2689
2690 static ssize_t
2691 tracing_cpumask_read(struct file *filp, char __user *ubuf,
2692                      size_t count, loff_t *ppos)
2693 {
2694         int len;
2695
2696         mutex_lock(&tracing_cpumask_update_lock);
2697
2698         len = cpumask_scnprintf(mask_str, count, tracing_cpumask);
2699         if (count - len < 2) {
2700                 count = -EINVAL;
2701                 goto out_err;
2702         }
2703         len += sprintf(mask_str + len, "\n");
2704         count = simple_read_from_buffer(ubuf, count, ppos, mask_str, NR_CPUS+1);
2705
2706 out_err:
2707         mutex_unlock(&tracing_cpumask_update_lock);
2708
2709         return count;
2710 }
2711
2712 static ssize_t
2713 tracing_cpumask_write(struct file *filp, const char __user *ubuf,
2714                       size_t count, loff_t *ppos)
2715 {
2716         int err, cpu;
2717         cpumask_var_t tracing_cpumask_new;
2718
2719         if (!alloc_cpumask_var(&tracing_cpumask_new, GFP_KERNEL))
2720                 return -ENOMEM;
2721
2722         err = cpumask_parse_user(ubuf, count, tracing_cpumask_new);
2723         if (err)
2724                 goto err_unlock;
2725
2726         mutex_lock(&tracing_cpumask_update_lock);
2727
2728         local_irq_disable();
2729         arch_spin_lock(&ftrace_max_lock);
2730         for_each_tracing_cpu(cpu) {
2731                 /*
2732                  * Increase/decrease the disabled counter if we are
2733                  * about to flip a bit in the cpumask:
2734                  */
2735                 if (cpumask_test_cpu(cpu, tracing_cpumask) &&
2736                                 !cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2737                         atomic_inc(&global_trace.data[cpu]->disabled);
2738                         ring_buffer_record_disable_cpu(global_trace.buffer, cpu);
2739                 }
2740                 if (!cpumask_test_cpu(cpu, tracing_cpumask) &&
2741                                 cpumask_test_cpu(cpu, tracing_cpumask_new)) {
2742                         atomic_dec(&global_trace.data[cpu]->disabled);
2743                         ring_buffer_record_enable_cpu(global_trace.buffer, cpu);
2744                 }
2745         }
2746         arch_spin_unlock(&ftrace_max_lock);
2747         local_irq_enable();
2748
2749         cpumask_copy(tracing_cpumask, tracing_cpumask_new);
2750
2751         mutex_unlock(&tracing_cpumask_update_lock);
2752         free_cpumask_var(tracing_cpumask_new);
2753
2754         return count;
2755
2756 err_unlock:
2757         free_cpumask_var(tracing_cpumask_new);
2758
2759         return err;
2760 }
2761
2762 static const struct file_operations tracing_cpumask_fops = {
2763         .open           = tracing_open_generic,
2764         .read           = tracing_cpumask_read,
2765         .write          = tracing_cpumask_write,
2766         .llseek         = generic_file_llseek,
2767 };
2768
2769 static int tracing_trace_options_show(struct seq_file *m, void *v)
2770 {
2771         struct tracer_opt *trace_opts;
2772         u32 tracer_flags;
2773         int i;
2774
2775         mutex_lock(&trace_types_lock);
2776         tracer_flags = current_trace->flags->val;
2777         trace_opts = current_trace->flags->opts;
2778
2779         for (i = 0; trace_options[i]; i++) {
2780                 if (trace_flags & (1 << i))
2781                         seq_printf(m, "%s\n", trace_options[i]);
2782                 else
2783                         seq_printf(m, "no%s\n", trace_options[i]);
2784         }
2785
2786         for (i = 0; trace_opts[i].name; i++) {
2787                 if (tracer_flags & trace_opts[i].bit)
2788                         seq_printf(m, "%s\n", trace_opts[i].name);
2789                 else
2790                         seq_printf(m, "no%s\n", trace_opts[i].name);
2791         }
2792         mutex_unlock(&trace_types_lock);
2793
2794         return 0;
2795 }
2796
2797 static int __set_tracer_option(struct tracer *trace,
2798                                struct tracer_flags *tracer_flags,
2799                                struct tracer_opt *opts, int neg)
2800 {
2801         int ret;
2802
2803         ret = trace->set_flag(tracer_flags->val, opts->bit, !neg);
2804         if (ret)
2805                 return ret;
2806
2807         if (neg)
2808                 tracer_flags->val &= ~opts->bit;
2809         else
2810                 tracer_flags->val |= opts->bit;
2811         return 0;
2812 }
2813
2814 /* Try to assign a tracer specific option */
2815 static int set_tracer_option(struct tracer *trace, char *cmp, int neg)
2816 {
2817         struct tracer_flags *tracer_flags = trace->flags;
2818         struct tracer_opt *opts = NULL;
2819         int i;
2820
2821         for (i = 0; tracer_flags->opts[i].name; i++) {
2822                 opts = &tracer_flags->opts[i];
2823
2824                 if (strcmp(cmp, opts->name) == 0)
2825                         return __set_tracer_option(trace, trace->flags,
2826                                                    opts, neg);
2827         }
2828
2829         return -EINVAL;
2830 }
2831
2832 static void set_tracer_flags(unsigned int mask, int enabled)
2833 {
2834         /* do nothing if flag is already set */
2835         if (!!(trace_flags & mask) == !!enabled)
2836                 return;
2837
2838         if (enabled)
2839                 trace_flags |= mask;
2840         else
2841                 trace_flags &= ~mask;
2842
2843         if (mask == TRACE_ITER_RECORD_CMD)
2844                 trace_event_enable_cmd_record(enabled);
2845
2846         if (mask == TRACE_ITER_OVERWRITE)
2847                 ring_buffer_change_overwrite(global_trace.buffer, enabled);
2848
2849         if (mask == TRACE_ITER_PRINTK)
2850                 trace_printk_start_stop_comm(enabled);
2851 }
2852
2853 static int trace_set_options(char *option)
2854 {
2855         char *cmp;
2856         int neg = 0;
2857         int ret = 0;
2858         int i;
2859
2860         cmp = strstrip(option);
2861
2862         if (strncmp(cmp, "no", 2) == 0) {
2863                 neg = 1;
2864                 cmp += 2;
2865         }
2866
2867         for (i = 0; trace_options[i]; i++) {
2868                 if (strcmp(cmp, trace_options[i]) == 0) {
2869                         set_tracer_flags(1 << i, !neg);
2870                         break;
2871                 }
2872         }
2873
2874         /* If no option could be set, test the specific tracer options */
2875         if (!trace_options[i]) {
2876                 mutex_lock(&trace_types_lock);
2877                 ret = set_tracer_option(current_trace, cmp, neg);
2878                 mutex_unlock(&trace_types_lock);
2879         }
2880
2881         return ret;
2882 }
2883
2884 static ssize_t
2885 tracing_trace_options_write(struct file *filp, const char __user *ubuf,
2886                         size_t cnt, loff_t *ppos)
2887 {
2888         char buf[64];
2889
2890         if (cnt >= sizeof(buf))
2891                 return -EINVAL;
2892
2893         if (copy_from_user(&buf, ubuf, cnt))
2894                 return -EFAULT;
2895
2896         trace_set_options(buf);
2897
2898         *ppos += cnt;
2899
2900         return cnt;
2901 }
2902
2903 static int tracing_trace_options_open(struct inode *inode, struct file *file)
2904 {
2905         if (tracing_disabled)
2906                 return -ENODEV;
2907         return single_open(file, tracing_trace_options_show, NULL);
2908 }
2909
2910 static const struct file_operations tracing_iter_fops = {
2911         .open           = tracing_trace_options_open,
2912         .read           = seq_read,
2913         .llseek         = seq_lseek,
2914         .release        = single_release,
2915         .write          = tracing_trace_options_write,
2916 };
2917
2918 static const char readme_msg[] =
2919         "tracing mini-HOWTO:\n\n"
2920         "# mount -t debugfs nodev /sys/kernel/debug\n\n"
2921         "# cat /sys/kernel/debug/tracing/available_tracers\n"
2922         "wakeup wakeup_rt preemptirqsoff preemptoff irqsoff function nop\n\n"
2923         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2924         "nop\n"
2925         "# echo wakeup > /sys/kernel/debug/tracing/current_tracer\n"
2926         "# cat /sys/kernel/debug/tracing/current_tracer\n"
2927         "wakeup\n"
2928         "# cat /sys/kernel/debug/tracing/trace_options\n"
2929         "noprint-parent nosym-offset nosym-addr noverbose\n"
2930         "# echo print-parent > /sys/kernel/debug/tracing/trace_options\n"
2931         "# echo 1 > /sys/kernel/debug/tracing/tracing_on\n"
2932         "# cat /sys/kernel/debug/tracing/trace > /tmp/trace.txt\n"
2933         "# echo 0 > /sys/kernel/debug/tracing/tracing_on\n"
2934 ;
2935
2936 static ssize_t
2937 tracing_readme_read(struct file *filp, char __user *ubuf,
2938                        size_t cnt, loff_t *ppos)
2939 {
2940         return simple_read_from_buffer(ubuf, cnt, ppos,
2941                                         readme_msg, strlen(readme_msg));
2942 }
2943
2944 static const struct file_operations tracing_readme_fops = {
2945         .open           = tracing_open_generic,
2946         .read           = tracing_readme_read,
2947         .llseek         = generic_file_llseek,
2948 };
2949
2950 static ssize_t
2951 tracing_saved_cmdlines_read(struct file *file, char __user *ubuf,
2952                                 size_t cnt, loff_t *ppos)
2953 {
2954         char *buf_comm;
2955         char *file_buf;
2956         char *buf;
2957         int len = 0;
2958         int pid;
2959         int i;
2960
2961         file_buf = kmalloc(SAVED_CMDLINES*(16+TASK_COMM_LEN), GFP_KERNEL);
2962         if (!file_buf)
2963                 return -ENOMEM;
2964
2965         buf_comm = kmalloc(TASK_COMM_LEN, GFP_KERNEL);
2966         if (!buf_comm) {
2967                 kfree(file_buf);
2968                 return -ENOMEM;
2969         }
2970
2971         buf = file_buf;
2972
2973         for (i = 0; i < SAVED_CMDLINES; i++) {
2974                 int r;
2975
2976                 pid = map_cmdline_to_pid[i];
2977                 if (pid == -1 || pid == NO_CMDLINE_MAP)
2978                         continue;
2979
2980                 trace_find_cmdline(pid, buf_comm);
2981                 r = sprintf(buf, "%d %s\n", pid, buf_comm);
2982                 buf += r;
2983                 len += r;
2984         }
2985
2986         len = simple_read_from_buffer(ubuf, cnt, ppos,
2987                                       file_buf, len);
2988
2989         kfree(file_buf);
2990         kfree(buf_comm);
2991
2992         return len;
2993 }
2994
2995 static const struct file_operations tracing_saved_cmdlines_fops = {
2996     .open       = tracing_open_generic,
2997     .read       = tracing_saved_cmdlines_read,
2998     .llseek     = generic_file_llseek,
2999 };
3000
3001 static ssize_t
3002 tracing_set_trace_read(struct file *filp, char __user *ubuf,
3003                        size_t cnt, loff_t *ppos)
3004 {
3005         char buf[MAX_TRACER_SIZE+2];
3006         int r;
3007
3008         mutex_lock(&trace_types_lock);
3009         if (current_trace)
3010                 r = sprintf(buf, "%s\n", current_trace->name);
3011         else
3012                 r = sprintf(buf, "\n");
3013         mutex_unlock(&trace_types_lock);
3014
3015         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3016 }
3017
3018 int tracer_init(struct tracer *t, struct trace_array *tr)
3019 {
3020         tracing_reset_online_cpus(tr);
3021         return t->init(tr);
3022 }
3023
3024 static void set_buffer_entries(struct trace_array *tr, unsigned long val)
3025 {
3026         int cpu;
3027         for_each_tracing_cpu(cpu)
3028                 tr->data[cpu]->entries = val;
3029 }
3030
3031 static int __tracing_resize_ring_buffer(unsigned long size, int cpu)
3032 {
3033         int ret;
3034
3035         /*
3036          * If kernel or user changes the size of the ring buffer
3037          * we use the size that was given, and we can forget about
3038          * expanding it later.
3039          */
3040         ring_buffer_expanded = 1;
3041
3042         /* May be called before buffers are initialized */
3043         if (!global_trace.buffer)
3044                 return 0;
3045
3046         ret = ring_buffer_resize(global_trace.buffer, size, cpu);
3047         if (ret < 0)
3048                 return ret;
3049
3050         if (!current_trace->use_max_tr)
3051                 goto out;
3052
3053         ret = ring_buffer_resize(max_tr.buffer, size, cpu);
3054         if (ret < 0) {
3055                 int r = 0;
3056
3057                 if (cpu == RING_BUFFER_ALL_CPUS) {
3058                         int i;
3059                         for_each_tracing_cpu(i) {
3060                                 r = ring_buffer_resize(global_trace.buffer,
3061                                                 global_trace.data[i]->entries,
3062                                                 i);
3063                                 if (r < 0)
3064                                         break;
3065                         }
3066                 } else {
3067                         r = ring_buffer_resize(global_trace.buffer,
3068                                                 global_trace.data[cpu]->entries,
3069                                                 cpu);
3070                 }
3071
3072                 if (r < 0) {
3073                         /*
3074                          * AARGH! We are left with different
3075                          * size max buffer!!!!
3076                          * The max buffer is our "snapshot" buffer.
3077                          * When a tracer needs a snapshot (one of the
3078                          * latency tracers), it swaps the max buffer
3079                          * with the saved snap shot. We succeeded to
3080                          * update the size of the main buffer, but failed to
3081                          * update the size of the max buffer. But when we tried
3082                          * to reset the main buffer to the original size, we
3083                          * failed there too. This is very unlikely to
3084                          * happen, but if it does, warn and kill all
3085                          * tracing.
3086                          */
3087                         WARN_ON(1);
3088                         tracing_disabled = 1;
3089                 }
3090                 return ret;
3091         }
3092
3093         if (cpu == RING_BUFFER_ALL_CPUS)
3094                 set_buffer_entries(&max_tr, size);
3095         else
3096                 max_tr.data[cpu]->entries = size;
3097
3098  out:
3099         if (cpu == RING_BUFFER_ALL_CPUS)
3100                 set_buffer_entries(&global_trace, size);
3101         else
3102                 global_trace.data[cpu]->entries = size;
3103
3104         return ret;
3105 }
3106
3107 static ssize_t tracing_resize_ring_buffer(unsigned long size, int cpu_id)
3108 {
3109         int ret = size;
3110
3111         mutex_lock(&trace_types_lock);
3112
3113         if (cpu_id != RING_BUFFER_ALL_CPUS) {
3114                 /* make sure, this cpu is enabled in the mask */
3115                 if (!cpumask_test_cpu(cpu_id, tracing_buffer_mask)) {
3116                         ret = -EINVAL;
3117                         goto out;
3118                 }
3119         }
3120
3121         ret = __tracing_resize_ring_buffer(size, cpu_id);
3122         if (ret < 0)
3123                 ret = -ENOMEM;
3124
3125 out:
3126         mutex_unlock(&trace_types_lock);
3127
3128         return ret;
3129 }
3130
3131
3132 /**
3133  * tracing_update_buffers - used by tracing facility to expand ring buffers
3134  *
3135  * To save on memory when the tracing is never used on a system with it
3136  * configured in. The ring buffers are set to a minimum size. But once
3137  * a user starts to use the tracing facility, then they need to grow
3138  * to their default size.
3139  *
3140  * This function is to be called when a tracer is about to be used.
3141  */
3142 int tracing_update_buffers(void)
3143 {
3144         int ret = 0;
3145
3146         mutex_lock(&trace_types_lock);
3147         if (!ring_buffer_expanded)
3148                 ret = __tracing_resize_ring_buffer(trace_buf_size,
3149                                                 RING_BUFFER_ALL_CPUS);
3150         mutex_unlock(&trace_types_lock);
3151
3152         return ret;
3153 }
3154
3155 struct trace_option_dentry;
3156
3157 static struct trace_option_dentry *
3158 create_trace_option_files(struct tracer *tracer);
3159
3160 static void
3161 destroy_trace_option_files(struct trace_option_dentry *topts);
3162
3163 static int tracing_set_tracer(const char *buf)
3164 {
3165         static struct trace_option_dentry *topts;
3166         struct trace_array *tr = &global_trace;
3167         struct tracer *t;
3168         int ret = 0;
3169
3170         mutex_lock(&trace_types_lock);
3171
3172         if (!ring_buffer_expanded) {
3173                 ret = __tracing_resize_ring_buffer(trace_buf_size,
3174                                                 RING_BUFFER_ALL_CPUS);
3175                 if (ret < 0)
3176                         goto out;
3177                 ret = 0;
3178         }
3179
3180         for (t = trace_types; t; t = t->next) {
3181                 if (strcmp(t->name, buf) == 0)
3182                         break;
3183         }
3184         if (!t) {
3185                 ret = -EINVAL;
3186                 goto out;
3187         }
3188         if (t == current_trace)
3189                 goto out;
3190
3191         trace_branch_disable();
3192         if (current_trace && current_trace->reset)
3193                 current_trace->reset(tr);
3194         if (current_trace && current_trace->use_max_tr) {
3195                 /*
3196                  * We don't free the ring buffer. instead, resize it because
3197                  * The max_tr ring buffer has some state (e.g. ring->clock) and
3198                  * we want preserve it.
3199                  */
3200                 ring_buffer_resize(max_tr.buffer, 1, RING_BUFFER_ALL_CPUS);
3201                 set_buffer_entries(&max_tr, 1);
3202         }
3203         destroy_trace_option_files(topts);
3204
3205         current_trace = &nop_trace;
3206
3207         topts = create_trace_option_files(t);
3208         if (t->use_max_tr) {
3209                 int cpu;
3210                 /* we need to make per cpu buffer sizes equivalent */
3211                 for_each_tracing_cpu(cpu) {
3212                         ret = ring_buffer_resize(max_tr.buffer,
3213                                                 global_trace.data[cpu]->entries,
3214                                                 cpu);
3215                         if (ret < 0)
3216                                 goto out;
3217                         max_tr.data[cpu]->entries =
3218                                         global_trace.data[cpu]->entries;
3219                 }
3220         }
3221
3222         if (t->init) {
3223                 ret = tracer_init(t, tr);
3224                 if (ret)
3225                         goto out;
3226         }
3227
3228         current_trace = t;
3229         trace_branch_enable(tr);
3230  out:
3231         mutex_unlock(&trace_types_lock);
3232
3233         return ret;
3234 }
3235
3236 static ssize_t
3237 tracing_set_trace_write(struct file *filp, const char __user *ubuf,
3238                         size_t cnt, loff_t *ppos)
3239 {
3240         char buf[MAX_TRACER_SIZE+1];
3241         int i;
3242         size_t ret;
3243         int err;
3244
3245         ret = cnt;
3246
3247         if (cnt > MAX_TRACER_SIZE)
3248                 cnt = MAX_TRACER_SIZE;
3249
3250         if (copy_from_user(&buf, ubuf, cnt))
3251                 return -EFAULT;
3252
3253         buf[cnt] = 0;
3254
3255         /* strip ending whitespace. */
3256         for (i = cnt - 1; i > 0 && isspace(buf[i]); i--)
3257                 buf[i] = 0;
3258
3259         err = tracing_set_tracer(buf);
3260         if (err)
3261                 return err;
3262
3263         *ppos += ret;
3264
3265         return ret;
3266 }
3267
3268 static ssize_t
3269 tracing_max_lat_read(struct file *filp, char __user *ubuf,
3270                      size_t cnt, loff_t *ppos)
3271 {
3272         unsigned long *ptr = filp->private_data;
3273         char buf[64];
3274         int r;
3275
3276         r = snprintf(buf, sizeof(buf), "%ld\n",
3277                      *ptr == (unsigned long)-1 ? -1 : nsecs_to_usecs(*ptr));
3278         if (r > sizeof(buf))
3279                 r = sizeof(buf);
3280         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3281 }
3282
3283 static ssize_t
3284 tracing_max_lat_write(struct file *filp, const char __user *ubuf,
3285                       size_t cnt, loff_t *ppos)
3286 {
3287         unsigned long *ptr = filp->private_data;
3288         unsigned long val;
3289         int ret;
3290
3291         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3292         if (ret)
3293                 return ret;
3294
3295         *ptr = val * 1000;
3296
3297         return cnt;
3298 }
3299
3300 static int tracing_open_pipe(struct inode *inode, struct file *filp)
3301 {
3302         long cpu_file = (long) inode->i_private;
3303         struct trace_iterator *iter;
3304         int ret = 0;
3305
3306         if (tracing_disabled)
3307                 return -ENODEV;
3308
3309         mutex_lock(&trace_types_lock);
3310
3311         /* create a buffer to store the information to pass to userspace */
3312         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
3313         if (!iter) {
3314                 ret = -ENOMEM;
3315                 goto out;
3316         }
3317
3318         /*
3319          * We make a copy of the current tracer to avoid concurrent
3320          * changes on it while we are reading.
3321          */
3322         iter->trace = kmalloc(sizeof(*iter->trace), GFP_KERNEL);
3323         if (!iter->trace) {
3324                 ret = -ENOMEM;
3325                 goto fail;
3326         }
3327         if (current_trace)
3328                 *iter->trace = *current_trace;
3329
3330         if (!alloc_cpumask_var(&iter->started, GFP_KERNEL)) {
3331                 ret = -ENOMEM;
3332                 goto fail;
3333         }
3334
3335         /* trace pipe does not show start of buffer */
3336         cpumask_setall(iter->started);
3337
3338         if (trace_flags & TRACE_ITER_LATENCY_FMT)
3339                 iter->iter_flags |= TRACE_FILE_LAT_FMT;
3340
3341         iter->cpu_file = cpu_file;
3342         iter->tr = &global_trace;
3343         mutex_init(&iter->mutex);
3344         filp->private_data = iter;
3345
3346         if (iter->trace->pipe_open)
3347                 iter->trace->pipe_open(iter);
3348
3349         nonseekable_open(inode, filp);
3350 out:
3351         mutex_unlock(&trace_types_lock);
3352         return ret;
3353
3354 fail:
3355         kfree(iter->trace);
3356         kfree(iter);
3357         mutex_unlock(&trace_types_lock);
3358         return ret;
3359 }
3360
3361 static int tracing_release_pipe(struct inode *inode, struct file *file)
3362 {
3363         struct trace_iterator *iter = file->private_data;
3364
3365         mutex_lock(&trace_types_lock);
3366
3367         if (iter->trace->pipe_close)
3368                 iter->trace->pipe_close(iter);
3369
3370         mutex_unlock(&trace_types_lock);
3371
3372         free_cpumask_var(iter->started);
3373         mutex_destroy(&iter->mutex);
3374         kfree(iter->trace);
3375         kfree(iter);
3376
3377         return 0;
3378 }
3379
3380 static unsigned int
3381 tracing_poll_pipe(struct file *filp, poll_table *poll_table)
3382 {
3383         struct trace_iterator *iter = filp->private_data;
3384
3385         if (trace_flags & TRACE_ITER_BLOCK) {
3386                 /*
3387                  * Always select as readable when in blocking mode
3388                  */
3389                 return POLLIN | POLLRDNORM;
3390         } else {
3391                 if (!trace_empty(iter))
3392                         return POLLIN | POLLRDNORM;
3393                 poll_wait(filp, &trace_wait, poll_table);
3394                 if (!trace_empty(iter))
3395                         return POLLIN | POLLRDNORM;
3396
3397                 return 0;
3398         }
3399 }
3400
3401 /*
3402  * This is a make-shift waitqueue.
3403  * A tracer might use this callback on some rare cases:
3404  *
3405  *  1) the current tracer might hold the runqueue lock when it wakes up
3406  *     a reader, hence a deadlock (sched, function, and function graph tracers)
3407  *  2) the function tracers, trace all functions, we don't want
3408  *     the overhead of calling wake_up and friends
3409  *     (and tracing them too)
3410  *
3411  *     Anyway, this is really very primitive wakeup.
3412  */
3413 void poll_wait_pipe(struct trace_iterator *iter)
3414 {
3415         set_current_state(TASK_INTERRUPTIBLE);
3416         /* sleep for 100 msecs, and try again. */
3417         schedule_timeout(HZ / 10);
3418 }
3419
3420 /* Must be called with trace_types_lock mutex held. */
3421 static int tracing_wait_pipe(struct file *filp)
3422 {
3423         struct trace_iterator *iter = filp->private_data;
3424
3425         while (trace_empty(iter)) {
3426
3427                 if ((filp->f_flags & O_NONBLOCK)) {
3428                         return -EAGAIN;
3429                 }
3430
3431                 mutex_unlock(&iter->mutex);
3432
3433                 iter->trace->wait_pipe(iter);
3434
3435                 mutex_lock(&iter->mutex);
3436
3437                 if (signal_pending(current))
3438                         return -EINTR;
3439
3440                 /*
3441                  * We block until we read something and tracing is enabled.
3442                  * We still block if tracing is disabled, but we have never
3443                  * read anything. This allows a user to cat this file, and
3444                  * then enable tracing. But after we have read something,
3445                  * we give an EOF when tracing is again disabled.
3446                  *
3447                  * iter->pos will be 0 if we haven't read anything.
3448                  */
3449                 if (tracing_is_enabled() && iter->pos)
3450                         break;
3451         }
3452
3453         return 1;
3454 }
3455
3456 /*
3457  * Consumer reader.
3458  */
3459 static ssize_t
3460 tracing_read_pipe(struct file *filp, char __user *ubuf,
3461                   size_t cnt, loff_t *ppos)
3462 {
3463         struct trace_iterator *iter = filp->private_data;
3464         static struct tracer *old_tracer;
3465         ssize_t sret;
3466
3467         /* return any leftover data */
3468         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3469         if (sret != -EBUSY)
3470                 return sret;
3471
3472         trace_seq_init(&iter->seq);
3473
3474         /* copy the tracer to avoid using a global lock all around */
3475         mutex_lock(&trace_types_lock);
3476         if (unlikely(old_tracer != current_trace && current_trace)) {
3477                 old_tracer = current_trace;
3478                 *iter->trace = *current_trace;
3479         }
3480         mutex_unlock(&trace_types_lock);
3481
3482         /*
3483          * Avoid more than one consumer on a single file descriptor
3484          * This is just a matter of traces coherency, the ring buffer itself
3485          * is protected.
3486          */
3487         mutex_lock(&iter->mutex);
3488         if (iter->trace->read) {
3489                 sret = iter->trace->read(iter, filp, ubuf, cnt, ppos);
3490                 if (sret)
3491                         goto out;
3492         }
3493
3494 waitagain:
3495         sret = tracing_wait_pipe(filp);
3496         if (sret <= 0)
3497                 goto out;
3498
3499         /* stop when tracing is finished */
3500         if (trace_empty(iter)) {
3501                 sret = 0;
3502                 goto out;
3503         }
3504
3505         if (cnt >= PAGE_SIZE)
3506                 cnt = PAGE_SIZE - 1;
3507
3508         /* reset all but tr, trace, and overruns */
3509         memset(&iter->seq, 0,
3510                sizeof(struct trace_iterator) -
3511                offsetof(struct trace_iterator, seq));
3512         iter->pos = -1;
3513
3514         trace_event_read_lock();
3515         trace_access_lock(iter->cpu_file);
3516         while (trace_find_next_entry_inc(iter) != NULL) {
3517                 enum print_line_t ret;
3518                 int len = iter->seq.len;
3519
3520                 ret = print_trace_line(iter);
3521                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3522                         /* don't print partial lines */
3523                         iter->seq.len = len;
3524                         break;
3525                 }
3526                 if (ret != TRACE_TYPE_NO_CONSUME)
3527                         trace_consume(iter);
3528
3529                 if (iter->seq.len >= cnt)
3530                         break;
3531
3532                 /*
3533                  * Setting the full flag means we reached the trace_seq buffer
3534                  * size and we should leave by partial output condition above.
3535                  * One of the trace_seq_* functions is not used properly.
3536                  */
3537                 WARN_ONCE(iter->seq.full, "full flag set for trace type %d",
3538                           iter->ent->type);
3539         }
3540         trace_access_unlock(iter->cpu_file);
3541         trace_event_read_unlock();
3542
3543         /* Now copy what we have to the user */
3544         sret = trace_seq_to_user(&iter->seq, ubuf, cnt);
3545         if (iter->seq.readpos >= iter->seq.len)
3546                 trace_seq_init(&iter->seq);
3547
3548         /*
3549          * If there was nothing to send to user, in spite of consuming trace
3550          * entries, go back to wait for more entries.
3551          */
3552         if (sret == -EBUSY)
3553                 goto waitagain;
3554
3555 out:
3556         mutex_unlock(&iter->mutex);
3557
3558         return sret;
3559 }
3560
3561 static void tracing_pipe_buf_release(struct pipe_inode_info *pipe,
3562                                      struct pipe_buffer *buf)
3563 {
3564         __free_page(buf->page);
3565 }
3566
3567 static void tracing_spd_release_pipe(struct splice_pipe_desc *spd,
3568                                      unsigned int idx)
3569 {
3570         __free_page(spd->pages[idx]);
3571 }
3572
3573 static const struct pipe_buf_operations tracing_pipe_buf_ops = {
3574         .can_merge              = 0,
3575         .map                    = generic_pipe_buf_map,
3576         .unmap                  = generic_pipe_buf_unmap,
3577         .confirm                = generic_pipe_buf_confirm,
3578         .release                = tracing_pipe_buf_release,
3579         .steal                  = generic_pipe_buf_steal,
3580         .get                    = generic_pipe_buf_get,
3581 };
3582
3583 static size_t
3584 tracing_fill_pipe_page(size_t rem, struct trace_iterator *iter)
3585 {
3586         size_t count;
3587         int ret;
3588
3589         /* Seq buffer is page-sized, exactly what we need. */
3590         for (;;) {
3591                 count = iter->seq.len;
3592                 ret = print_trace_line(iter);
3593                 count = iter->seq.len - count;
3594                 if (rem < count) {
3595                         rem = 0;
3596                         iter->seq.len -= count;
3597                         break;
3598                 }
3599                 if (ret == TRACE_TYPE_PARTIAL_LINE) {
3600                         iter->seq.len -= count;
3601                         break;
3602                 }
3603
3604                 if (ret != TRACE_TYPE_NO_CONSUME)
3605                         trace_consume(iter);
3606                 rem -= count;
3607                 if (!trace_find_next_entry_inc(iter))   {
3608                         rem = 0;
3609                         iter->ent = NULL;
3610                         break;
3611                 }
3612         }
3613
3614         return rem;
3615 }
3616
3617 static ssize_t tracing_splice_read_pipe(struct file *filp,
3618                                         loff_t *ppos,
3619                                         struct pipe_inode_info *pipe,
3620                                         size_t len,
3621                                         unsigned int flags)
3622 {
3623         struct page *pages_def[PIPE_DEF_BUFFERS];
3624         struct partial_page partial_def[PIPE_DEF_BUFFERS];
3625         struct trace_iterator *iter = filp->private_data;
3626         struct splice_pipe_desc spd = {
3627                 .pages          = pages_def,
3628                 .partial        = partial_def,
3629                 .nr_pages       = 0, /* This gets updated below. */
3630                 .nr_pages_max   = PIPE_DEF_BUFFERS,
3631                 .flags          = flags,
3632                 .ops            = &tracing_pipe_buf_ops,
3633                 .spd_release    = tracing_spd_release_pipe,
3634         };
3635         static struct tracer *old_tracer;
3636         ssize_t ret;
3637         size_t rem;
3638         unsigned int i;
3639
3640         if (splice_grow_spd(pipe, &spd))
3641                 return -ENOMEM;
3642
3643         /* copy the tracer to avoid using a global lock all around */
3644         mutex_lock(&trace_types_lock);
3645         if (unlikely(old_tracer != current_trace && current_trace)) {
3646                 old_tracer = current_trace;
3647                 *iter->trace = *current_trace;
3648         }
3649         mutex_unlock(&trace_types_lock);
3650
3651         mutex_lock(&iter->mutex);
3652
3653         if (iter->trace->splice_read) {
3654                 ret = iter->trace->splice_read(iter, filp,
3655                                                ppos, pipe, len, flags);
3656                 if (ret)
3657                         goto out_err;
3658         }
3659
3660         ret = tracing_wait_pipe(filp);
3661         if (ret <= 0)
3662                 goto out_err;
3663
3664         if (!iter->ent && !trace_find_next_entry_inc(iter)) {
3665                 ret = -EFAULT;
3666                 goto out_err;
3667         }
3668
3669         trace_event_read_lock();
3670         trace_access_lock(iter->cpu_file);
3671
3672         /* Fill as many pages as possible. */
3673         for (i = 0, rem = len; i < pipe->buffers && rem; i++) {
3674                 spd.pages[i] = alloc_page(GFP_KERNEL);
3675                 if (!spd.pages[i])
3676                         break;
3677
3678                 rem = tracing_fill_pipe_page(rem, iter);
3679
3680                 /* Copy the data into the page, so we can start over. */
3681                 ret = trace_seq_to_buffer(&iter->seq,
3682                                           page_address(spd.pages[i]),
3683                                           iter->seq.len);
3684                 if (ret < 0) {
3685                         __free_page(spd.pages[i]);
3686                         break;
3687                 }
3688                 spd.partial[i].offset = 0;
3689                 spd.partial[i].len = iter->seq.len;
3690
3691                 trace_seq_init(&iter->seq);
3692         }
3693
3694         trace_access_unlock(iter->cpu_file);
3695         trace_event_read_unlock();
3696         mutex_unlock(&iter->mutex);
3697
3698         spd.nr_pages = i;
3699
3700         ret = splice_to_pipe(pipe, &spd);
3701 out:
3702         splice_shrink_spd(&spd);
3703         return ret;
3704
3705 out_err:
3706         mutex_unlock(&iter->mutex);
3707         goto out;
3708 }
3709
3710 struct ftrace_entries_info {
3711         struct trace_array      *tr;
3712         int                     cpu;
3713 };
3714
3715 static int tracing_entries_open(struct inode *inode, struct file *filp)
3716 {
3717         struct ftrace_entries_info *info;
3718
3719         if (tracing_disabled)
3720                 return -ENODEV;
3721
3722         info = kzalloc(sizeof(*info), GFP_KERNEL);
3723         if (!info)
3724                 return -ENOMEM;
3725
3726         info->tr = &global_trace;
3727         info->cpu = (unsigned long)inode->i_private;
3728
3729         filp->private_data = info;
3730
3731         return 0;
3732 }
3733
3734 static ssize_t
3735 tracing_entries_read(struct file *filp, char __user *ubuf,
3736                      size_t cnt, loff_t *ppos)
3737 {
3738         struct ftrace_entries_info *info = filp->private_data;
3739         struct trace_array *tr = info->tr;
3740         char buf[64];
3741         int r = 0;
3742         ssize_t ret;
3743
3744         mutex_lock(&trace_types_lock);
3745
3746         if (info->cpu == RING_BUFFER_ALL_CPUS) {
3747                 int cpu, buf_size_same;
3748                 unsigned long size;
3749
3750                 size = 0;
3751                 buf_size_same = 1;
3752                 /* check if all cpu sizes are same */
3753                 for_each_tracing_cpu(cpu) {
3754                         /* fill in the size from first enabled cpu */
3755                         if (size == 0)
3756                                 size = tr->data[cpu]->entries;
3757                         if (size != tr->data[cpu]->entries) {
3758                                 buf_size_same = 0;
3759                                 break;
3760                         }
3761                 }
3762
3763                 if (buf_size_same) {
3764                         if (!ring_buffer_expanded)
3765                                 r = sprintf(buf, "%lu (expanded: %lu)\n",
3766                                             size >> 10,
3767                                             trace_buf_size >> 10);
3768                         else
3769                                 r = sprintf(buf, "%lu\n", size >> 10);
3770                 } else
3771                         r = sprintf(buf, "X\n");
3772         } else
3773                 r = sprintf(buf, "%lu\n", tr->data[info->cpu]->entries >> 10);
3774
3775         mutex_unlock(&trace_types_lock);
3776
3777         ret = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3778         return ret;
3779 }
3780
3781 static ssize_t
3782 tracing_entries_write(struct file *filp, const char __user *ubuf,
3783                       size_t cnt, loff_t *ppos)
3784 {
3785         struct ftrace_entries_info *info = filp->private_data;
3786         unsigned long val;
3787         int ret;
3788
3789         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
3790         if (ret)
3791                 return ret;
3792
3793         /* must have at least 1 entry */
3794         if (!val)
3795                 return -EINVAL;
3796
3797         /* value is in KB */
3798         val <<= 10;
3799
3800         ret = tracing_resize_ring_buffer(val, info->cpu);
3801         if (ret < 0)
3802                 return ret;
3803
3804         *ppos += cnt;
3805
3806         return cnt;
3807 }
3808
3809 static int
3810 tracing_entries_release(struct inode *inode, struct file *filp)
3811 {
3812         struct ftrace_entries_info *info = filp->private_data;
3813
3814         kfree(info);
3815
3816         return 0;
3817 }
3818
3819 static ssize_t
3820 tracing_total_entries_read(struct file *filp, char __user *ubuf,
3821                                 size_t cnt, loff_t *ppos)
3822 {
3823         struct trace_array *tr = filp->private_data;
3824         char buf[64];
3825         int r, cpu;
3826         unsigned long size = 0, expanded_size = 0;
3827
3828         mutex_lock(&trace_types_lock);
3829         for_each_tracing_cpu(cpu) {
3830                 size += tr->data[cpu]->entries >> 10;
3831                 if (!ring_buffer_expanded)
3832                         expanded_size += trace_buf_size >> 10;
3833         }
3834         if (ring_buffer_expanded)
3835                 r = sprintf(buf, "%lu\n", size);
3836         else
3837                 r = sprintf(buf, "%lu (expanded: %lu)\n", size, expanded_size);
3838         mutex_unlock(&trace_types_lock);
3839
3840         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
3841 }
3842
3843 static ssize_t
3844 tracing_free_buffer_write(struct file *filp, const char __user *ubuf,
3845                           size_t cnt, loff_t *ppos)
3846 {
3847         /*
3848          * There is no need to read what the user has written, this function
3849          * is just to make sure that there is no error when "echo" is used
3850          */
3851
3852         *ppos += cnt;
3853
3854         return cnt;
3855 }
3856
3857 static int
3858 tracing_free_buffer_release(struct inode *inode, struct file *filp)
3859 {
3860         /* disable tracing ? */
3861         if (trace_flags & TRACE_ITER_STOP_ON_FREE)
3862                 tracing_off();
3863         /* resize the ring buffer to 0 */
3864         tracing_resize_ring_buffer(0, RING_BUFFER_ALL_CPUS);
3865
3866         return 0;
3867 }
3868
3869 static ssize_t
3870 tracing_mark_write(struct file *filp, const char __user *ubuf,
3871                                         size_t cnt, loff_t *fpos)
3872 {
3873         unsigned long addr = (unsigned long)ubuf;
3874         struct ring_buffer_event *event;
3875         struct ring_buffer *buffer;
3876         struct print_entry *entry;
3877         unsigned long irq_flags;
3878         struct page *pages[2];
3879         void *map_page[2];
3880         int nr_pages = 1;
3881         ssize_t written;
3882         int offset;
3883         int size;
3884         int len;
3885         int ret;
3886         int i;
3887
3888         if (tracing_disabled)
3889                 return -EINVAL;
3890
3891         if (!(trace_flags & TRACE_ITER_MARKERS))
3892                 return -EINVAL;
3893
3894         if (cnt > TRACE_BUF_SIZE)
3895                 cnt = TRACE_BUF_SIZE;
3896
3897         /*
3898          * Userspace is injecting traces into the kernel trace buffer.
3899          * We want to be as non intrusive as possible.
3900          * To do so, we do not want to allocate any special buffers
3901          * or take any locks, but instead write the userspace data
3902          * straight into the ring buffer.
3903          *
3904          * First we need to pin the userspace buffer into memory,
3905          * which, most likely it is, because it just referenced it.
3906          * But there's no guarantee that it is. By using get_user_pages_fast()
3907          * and kmap_atomic/kunmap_atomic() we can get access to the
3908          * pages directly. We then write the data directly into the
3909          * ring buffer.
3910          */
3911         BUILD_BUG_ON(TRACE_BUF_SIZE >= PAGE_SIZE);
3912
3913         /* check if we cross pages */
3914         if ((addr & PAGE_MASK) != ((addr + cnt) & PAGE_MASK))
3915                 nr_pages = 2;
3916
3917         offset = addr & (PAGE_SIZE - 1);
3918         addr &= PAGE_MASK;
3919
3920         ret = get_user_pages_fast(addr, nr_pages, 0, pages);
3921         if (ret < nr_pages) {
3922                 while (--ret >= 0)
3923                         put_page(pages[ret]);
3924                 written = -EFAULT;
3925                 goto out;
3926         }
3927
3928         for (i = 0; i < nr_pages; i++)
3929                 map_page[i] = kmap_atomic(pages[i]);
3930
3931         local_save_flags(irq_flags);
3932         size = sizeof(*entry) + cnt + 2; /* possible \n added */
3933         buffer = global_trace.buffer;
3934         event = trace_buffer_lock_reserve(buffer, TRACE_PRINT, size,
3935                                           irq_flags, preempt_count());
3936         if (!event) {
3937                 /* Ring buffer disabled, return as if not open for write */
3938                 written = -EBADF;
3939                 goto out_unlock;
3940         }
3941
3942         entry = ring_buffer_event_data(event);
3943         entry->ip = _THIS_IP_;
3944
3945         if (nr_pages == 2) {
3946                 len = PAGE_SIZE - offset;
3947                 memcpy(&entry->buf, map_page[0] + offset, len);
3948                 memcpy(&entry->buf[len], map_page[1], cnt - len);
3949         } else
3950                 memcpy(&entry->buf, map_page[0] + offset, cnt);
3951
3952         if (entry->buf[cnt - 1] != '\n') {
3953                 entry->buf[cnt] = '\n';
3954                 entry->buf[cnt + 1] = '\0';
3955         } else
3956                 entry->buf[cnt] = '\0';
3957
3958         __buffer_unlock_commit(buffer, event);
3959
3960         written = cnt;
3961
3962         *fpos += written;
3963
3964  out_unlock:
3965         for (i = 0; i < nr_pages; i++){
3966                 kunmap_atomic(map_page[i]);
3967                 put_page(pages[i]);
3968         }
3969  out:
3970         return written;
3971 }
3972
3973 static int tracing_clock_show(struct seq_file *m, void *v)
3974 {
3975         int i;
3976
3977         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++)
3978                 seq_printf(m,
3979                         "%s%s%s%s", i ? " " : "",
3980                         i == trace_clock_id ? "[" : "", trace_clocks[i].name,
3981                         i == trace_clock_id ? "]" : "");
3982         seq_putc(m, '\n');
3983
3984         return 0;
3985 }
3986
3987 static ssize_t tracing_clock_write(struct file *filp, const char __user *ubuf,
3988                                    size_t cnt, loff_t *fpos)
3989 {
3990         char buf[64];
3991         const char *clockstr;
3992         int i;
3993
3994         if (cnt >= sizeof(buf))
3995                 return -EINVAL;
3996
3997         if (copy_from_user(&buf, ubuf, cnt))
3998                 return -EFAULT;
3999
4000         buf[cnt] = 0;
4001
4002         clockstr = strstrip(buf);
4003
4004         for (i = 0; i < ARRAY_SIZE(trace_clocks); i++) {
4005                 if (strcmp(trace_clocks[i].name, clockstr) == 0)
4006                         break;
4007         }
4008         if (i == ARRAY_SIZE(trace_clocks))
4009                 return -EINVAL;
4010
4011         trace_clock_id = i;
4012
4013         mutex_lock(&trace_types_lock);
4014
4015         ring_buffer_set_clock(global_trace.buffer, trace_clocks[i].func);
4016         if (max_tr.buffer)
4017                 ring_buffer_set_clock(max_tr.buffer, trace_clocks[i].func);
4018
4019         /*
4020          * New clock may not be consistent with the previous clock.
4021          * Reset the buffer so that it doesn't have incomparable timestamps.
4022          */
4023         tracing_reset_online_cpus(&global_trace);
4024         if (max_tr.buffer)
4025                 tracing_reset_online_cpus(&max_tr);
4026
4027         mutex_unlock(&trace_types_lock);
4028
4029         *fpos += cnt;
4030
4031         return cnt;
4032 }
4033
4034 static int tracing_clock_open(struct inode *inode, struct file *file)
4035 {
4036         if (tracing_disabled)
4037                 return -ENODEV;
4038         return single_open(file, tracing_clock_show, NULL);
4039 }
4040
4041 static const struct file_operations tracing_max_lat_fops = {
4042         .open           = tracing_open_generic,
4043         .read           = tracing_max_lat_read,
4044         .write          = tracing_max_lat_write,
4045         .llseek         = generic_file_llseek,
4046 };
4047
4048 static const struct file_operations set_tracer_fops = {
4049         .open           = tracing_open_generic,
4050         .read           = tracing_set_trace_read,
4051         .write          = tracing_set_trace_write,
4052         .llseek         = generic_file_llseek,
4053 };
4054
4055 static const struct file_operations tracing_pipe_fops = {
4056         .open           = tracing_open_pipe,
4057         .poll           = tracing_poll_pipe,
4058         .read           = tracing_read_pipe,
4059         .splice_read    = tracing_splice_read_pipe,
4060         .release        = tracing_release_pipe,
4061         .llseek         = no_llseek,
4062 };
4063
4064 static const struct file_operations tracing_entries_fops = {
4065         .open           = tracing_entries_open,
4066         .read           = tracing_entries_read,
4067         .write          = tracing_entries_write,
4068         .release        = tracing_entries_release,
4069         .llseek         = generic_file_llseek,
4070 };
4071
4072 static const struct file_operations tracing_total_entries_fops = {
4073         .open           = tracing_open_generic,
4074         .read           = tracing_total_entries_read,
4075         .llseek         = generic_file_llseek,
4076 };
4077
4078 static const struct file_operations tracing_free_buffer_fops = {
4079         .write          = tracing_free_buffer_write,
4080         .release        = tracing_free_buffer_release,
4081 };
4082
4083 static const struct file_operations tracing_mark_fops = {
4084         .open           = tracing_open_generic,
4085         .write          = tracing_mark_write,
4086         .llseek         = generic_file_llseek,
4087 };
4088
4089 static const struct file_operations trace_clock_fops = {
4090         .open           = tracing_clock_open,
4091         .read           = seq_read,
4092         .llseek         = seq_lseek,
4093         .release        = single_release,
4094         .write          = tracing_clock_write,
4095 };
4096
4097 struct ftrace_buffer_info {
4098         struct trace_array      *tr;
4099         void                    *spare;
4100         int                     cpu;
4101         unsigned int            read;
4102 };
4103
4104 static int tracing_buffers_open(struct inode *inode, struct file *filp)
4105 {
4106         int cpu = (int)(long)inode->i_private;
4107         struct ftrace_buffer_info *info;
4108
4109         if (tracing_disabled)
4110                 return -ENODEV;
4111
4112         info = kzalloc(sizeof(*info), GFP_KERNEL);
4113         if (!info)
4114                 return -ENOMEM;
4115
4116         info->tr        = &global_trace;
4117         info->cpu       = cpu;
4118         info->spare     = NULL;
4119         /* Force reading ring buffer for first read */
4120         info->read      = (unsigned int)-1;
4121
4122         filp->private_data = info;
4123
4124         return nonseekable_open(inode, filp);
4125 }
4126
4127 static ssize_t
4128 tracing_buffers_read(struct file *filp, char __user *ubuf,
4129                      size_t count, loff_t *ppos)
4130 {
4131         struct ftrace_buffer_info *info = filp->private_data;
4132         ssize_t ret;
4133         size_t size;
4134
4135         if (!count)
4136                 return 0;
4137
4138         if (!info->spare)
4139                 info->spare = ring_buffer_alloc_read_page(info->tr->buffer, info->cpu);
4140         if (!info->spare)
4141                 return -ENOMEM;
4142
4143         /* Do we have previous read data to read? */
4144         if (info->read < PAGE_SIZE)
4145                 goto read;
4146
4147         trace_access_lock(info->cpu);
4148         ret = ring_buffer_read_page(info->tr->buffer,
4149                                     &info->spare,
4150                                     count,
4151                                     info->cpu, 0);
4152         trace_access_unlock(info->cpu);
4153         if (ret < 0)
4154                 return 0;
4155
4156         info->read = 0;
4157
4158 read:
4159         size = PAGE_SIZE - info->read;
4160         if (size > count)
4161                 size = count;
4162
4163         ret = copy_to_user(ubuf, info->spare + info->read, size);
4164         if (ret == size)
4165                 return -EFAULT;
4166         size -= ret;
4167
4168         *ppos += size;
4169         info->read += size;
4170
4171         return size;
4172 }
4173
4174 static int tracing_buffers_release(struct inode *inode, struct file *file)
4175 {
4176         struct ftrace_buffer_info *info = file->private_data;
4177
4178         if (info->spare)
4179                 ring_buffer_free_read_page(info->tr->buffer, info->spare);
4180         kfree(info);
4181
4182         return 0;
4183 }
4184
4185 struct buffer_ref {
4186         struct ring_buffer      *buffer;
4187         void                    *page;
4188         int                     ref;
4189 };
4190
4191 static void buffer_pipe_buf_release(struct pipe_inode_info *pipe,
4192                                     struct pipe_buffer *buf)
4193 {
4194         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4195
4196         if (--ref->ref)
4197                 return;
4198
4199         ring_buffer_free_read_page(ref->buffer, ref->page);
4200         kfree(ref);
4201         buf->private = 0;
4202 }
4203
4204 static void buffer_pipe_buf_get(struct pipe_inode_info *pipe,
4205                                 struct pipe_buffer *buf)
4206 {
4207         struct buffer_ref *ref = (struct buffer_ref *)buf->private;
4208
4209         ref->ref++;
4210 }
4211
4212 /* Pipe buffer operations for a buffer. */
4213 static const struct pipe_buf_operations buffer_pipe_buf_ops = {
4214         .can_merge              = 0,
4215         .map                    = generic_pipe_buf_map,
4216         .unmap                  = generic_pipe_buf_unmap,
4217         .confirm                = generic_pipe_buf_confirm,
4218         .release                = buffer_pipe_buf_release,
4219         .steal                  = generic_pipe_buf_steal,
4220         .get                    = buffer_pipe_buf_get,
4221 };
4222
4223 /*
4224  * Callback from splice_to_pipe(), if we need to release some pages
4225  * at the end of the spd in case we error'ed out in filling the pipe.
4226  */
4227 static void buffer_spd_release(struct splice_pipe_desc *spd, unsigned int i)
4228 {
4229         struct buffer_ref *ref =
4230                 (struct buffer_ref *)spd->partial[i].private;
4231
4232         if (--ref->ref)
4233                 return;
4234
4235         ring_buffer_free_read_page(ref->buffer, ref->page);
4236         kfree(ref);
4237         spd->partial[i].private = 0;
4238 }
4239
4240 static ssize_t
4241 tracing_buffers_splice_read(struct file *file, loff_t *ppos,
4242                             struct pipe_inode_info *pipe, size_t len,
4243                             unsigned int flags)
4244 {
4245         struct ftrace_buffer_info *info = file->private_data;
4246         struct partial_page partial_def[PIPE_DEF_BUFFERS];
4247         struct page *pages_def[PIPE_DEF_BUFFERS];
4248         struct splice_pipe_desc spd = {
4249                 .pages          = pages_def,
4250                 .partial        = partial_def,
4251                 .nr_pages_max   = PIPE_DEF_BUFFERS,
4252                 .flags          = flags,
4253                 .ops            = &buffer_pipe_buf_ops,
4254                 .spd_release    = buffer_spd_release,
4255         };
4256         struct buffer_ref *ref;
4257         int entries, size, i;
4258         size_t ret;
4259
4260         if (splice_grow_spd(pipe, &spd))
4261                 return -ENOMEM;
4262
4263         if (*ppos & (PAGE_SIZE - 1)) {
4264                 WARN_ONCE(1, "Ftrace: previous read must page-align\n");
4265                 ret = -EINVAL;
4266                 goto out;
4267         }
4268
4269         if (len & (PAGE_SIZE - 1)) {
4270                 WARN_ONCE(1, "Ftrace: splice_read should page-align\n");
4271                 if (len < PAGE_SIZE) {
4272                         ret = -EINVAL;
4273                         goto out;
4274                 }
4275                 len &= PAGE_MASK;
4276         }
4277
4278         trace_access_lock(info->cpu);
4279         entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4280
4281         for (i = 0; i < pipe->buffers && len && entries; i++, len -= PAGE_SIZE) {
4282                 struct page *page;
4283                 int r;
4284
4285                 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
4286                 if (!ref)
4287                         break;
4288
4289                 ref->ref = 1;
4290                 ref->buffer = info->tr->buffer;
4291                 ref->page = ring_buffer_alloc_read_page(ref->buffer, info->cpu);
4292                 if (!ref->page) {
4293                         kfree(ref);
4294                         break;
4295                 }
4296
4297                 r = ring_buffer_read_page(ref->buffer, &ref->page,
4298                                           len, info->cpu, 1);
4299                 if (r < 0) {
4300                         ring_buffer_free_read_page(ref->buffer, ref->page);
4301                         kfree(ref);
4302                         break;
4303                 }
4304
4305                 /*
4306                  * zero out any left over data, this is going to
4307                  * user land.
4308                  */
4309                 size = ring_buffer_page_len(ref->page);
4310                 if (size < PAGE_SIZE)
4311                         memset(ref->page + size, 0, PAGE_SIZE - size);
4312
4313                 page = virt_to_page(ref->page);
4314
4315                 spd.pages[i] = page;
4316                 spd.partial[i].len = PAGE_SIZE;
4317                 spd.partial[i].offset = 0;
4318                 spd.partial[i].private = (unsigned long)ref;
4319                 spd.nr_pages++;
4320                 *ppos += PAGE_SIZE;
4321
4322                 entries = ring_buffer_entries_cpu(info->tr->buffer, info->cpu);
4323         }
4324
4325         trace_access_unlock(info->cpu);
4326         spd.nr_pages = i;
4327
4328         /* did we read anything? */
4329         if (!spd.nr_pages) {
4330                 if (flags & SPLICE_F_NONBLOCK)
4331                         ret = -EAGAIN;
4332                 else
4333                         ret = 0;
4334                 /* TODO: block */
4335                 goto out;
4336         }
4337
4338         ret = splice_to_pipe(pipe, &spd);
4339         splice_shrink_spd(&spd);
4340 out:
4341         return ret;
4342 }
4343
4344 static const struct file_operations tracing_buffers_fops = {
4345         .open           = tracing_buffers_open,
4346         .read           = tracing_buffers_read,
4347         .release        = tracing_buffers_release,
4348         .splice_read    = tracing_buffers_splice_read,
4349         .llseek         = no_llseek,
4350 };
4351
4352 static ssize_t
4353 tracing_stats_read(struct file *filp, char __user *ubuf,
4354                    size_t count, loff_t *ppos)
4355 {
4356         unsigned long cpu = (unsigned long)filp->private_data;
4357         struct trace_array *tr = &global_trace;
4358         struct trace_seq *s;
4359         unsigned long cnt;
4360         unsigned long long t;
4361         unsigned long usec_rem;
4362
4363         s = kmalloc(sizeof(*s), GFP_KERNEL);
4364         if (!s)
4365                 return -ENOMEM;
4366
4367         trace_seq_init(s);
4368
4369         cnt = ring_buffer_entries_cpu(tr->buffer, cpu);
4370         trace_seq_printf(s, "entries: %ld\n", cnt);
4371
4372         cnt = ring_buffer_overrun_cpu(tr->buffer, cpu);
4373         trace_seq_printf(s, "overrun: %ld\n", cnt);
4374
4375         cnt = ring_buffer_commit_overrun_cpu(tr->buffer, cpu);
4376         trace_seq_printf(s, "commit overrun: %ld\n", cnt);
4377
4378         cnt = ring_buffer_bytes_cpu(tr->buffer, cpu);
4379         trace_seq_printf(s, "bytes: %ld\n", cnt);
4380
4381         t = ns2usecs(ring_buffer_oldest_event_ts(tr->buffer, cpu));
4382         usec_rem = do_div(t, USEC_PER_SEC);
4383         trace_seq_printf(s, "oldest event ts: %5llu.%06lu\n", t, usec_rem);
4384
4385         t = ns2usecs(ring_buffer_time_stamp(tr->buffer, cpu));
4386         usec_rem = do_div(t, USEC_PER_SEC);
4387         trace_seq_printf(s, "now ts: %5llu.%06lu\n", t, usec_rem);
4388
4389         cnt = ring_buffer_dropped_events_cpu(tr->buffer, cpu);
4390         trace_seq_printf(s, "dropped events: %ld\n", cnt);
4391
4392         count = simple_read_from_buffer(ubuf, count, ppos, s->buffer, s->len);
4393
4394         kfree(s);
4395
4396         return count;
4397 }
4398
4399 static const struct file_operations tracing_stats_fops = {
4400         .open           = tracing_open_generic,
4401         .read           = tracing_stats_read,
4402         .llseek         = generic_file_llseek,
4403 };
4404
4405 #ifdef CONFIG_DYNAMIC_FTRACE
4406
4407 int __weak ftrace_arch_read_dyn_info(char *buf, int size)
4408 {
4409         return 0;
4410 }
4411
4412 static ssize_t
4413 tracing_read_dyn_info(struct file *filp, char __user *ubuf,
4414                   size_t cnt, loff_t *ppos)
4415 {
4416         static char ftrace_dyn_info_buffer[1024];
4417         static DEFINE_MUTEX(dyn_info_mutex);
4418         unsigned long *p = filp->private_data;
4419         char *buf = ftrace_dyn_info_buffer;
4420         int size = ARRAY_SIZE(ftrace_dyn_info_buffer);
4421         int r;
4422
4423         mutex_lock(&dyn_info_mutex);
4424         r = sprintf(buf, "%ld ", *p);
4425
4426         r += ftrace_arch_read_dyn_info(buf+r, (size-1)-r);
4427         buf[r++] = '\n';
4428
4429         r = simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4430
4431         mutex_unlock(&dyn_info_mutex);
4432
4433         return r;
4434 }
4435
4436 static const struct file_operations tracing_dyn_info_fops = {
4437         .open           = tracing_open_generic,
4438         .read           = tracing_read_dyn_info,
4439         .llseek         = generic_file_llseek,
4440 };
4441 #endif
4442
4443 static struct dentry *d_tracer;
4444
4445 struct dentry *tracing_init_dentry(void)
4446 {
4447         static int once;
4448
4449         if (d_tracer)
4450                 return d_tracer;
4451
4452         if (!debugfs_initialized())
4453                 return NULL;
4454
4455         d_tracer = debugfs_create_dir("tracing", NULL);
4456
4457         if (!d_tracer && !once) {
4458                 once = 1;
4459                 pr_warning("Could not create debugfs directory 'tracing'\n");
4460                 return NULL;
4461         }
4462
4463         return d_tracer;
4464 }
4465
4466 static struct dentry *d_percpu;
4467
4468 struct dentry *tracing_dentry_percpu(void)
4469 {
4470         static int once;
4471         struct dentry *d_tracer;
4472
4473         if (d_percpu)
4474                 return d_percpu;
4475
4476         d_tracer = tracing_init_dentry();
4477
4478         if (!d_tracer)
4479                 return NULL;
4480
4481         d_percpu = debugfs_create_dir("per_cpu", d_tracer);
4482
4483         if (!d_percpu && !once) {
4484                 once = 1;
4485                 pr_warning("Could not create debugfs directory 'per_cpu'\n");
4486                 return NULL;
4487         }
4488
4489         return d_percpu;
4490 }
4491
4492 static void tracing_init_debugfs_percpu(long cpu)
4493 {
4494         struct dentry *d_percpu = tracing_dentry_percpu();
4495         struct dentry *d_cpu;
4496         char cpu_dir[30]; /* 30 characters should be more than enough */
4497
4498         if (!d_percpu)
4499                 return;
4500
4501         snprintf(cpu_dir, 30, "cpu%ld", cpu);
4502         d_cpu = debugfs_create_dir(cpu_dir, d_percpu);
4503         if (!d_cpu) {
4504                 pr_warning("Could not create debugfs '%s' entry\n", cpu_dir);
4505                 return;
4506         }
4507
4508         /* per cpu trace_pipe */
4509         trace_create_file("trace_pipe", 0444, d_cpu,
4510                         (void *) cpu, &tracing_pipe_fops);
4511
4512         /* per cpu trace */
4513         trace_create_file("trace", 0644, d_cpu,
4514                         (void *) cpu, &tracing_fops);
4515
4516         trace_create_file("trace_pipe_raw", 0444, d_cpu,
4517                         (void *) cpu, &tracing_buffers_fops);
4518
4519         trace_create_file("stats", 0444, d_cpu,
4520                         (void *) cpu, &tracing_stats_fops);
4521
4522         trace_create_file("buffer_size_kb", 0444, d_cpu,
4523                         (void *) cpu, &tracing_entries_fops);
4524 }
4525
4526 #ifdef CONFIG_FTRACE_SELFTEST
4527 /* Let selftest have access to static functions in this file */
4528 #include "trace_selftest.c"
4529 #endif
4530
4531 struct trace_option_dentry {
4532         struct tracer_opt               *opt;
4533         struct tracer_flags             *flags;
4534         struct dentry                   *entry;
4535 };
4536
4537 static ssize_t
4538 trace_options_read(struct file *filp, char __user *ubuf, size_t cnt,
4539                         loff_t *ppos)
4540 {
4541         struct trace_option_dentry *topt = filp->private_data;
4542         char *buf;
4543
4544         if (topt->flags->val & topt->opt->bit)
4545                 buf = "1\n";
4546         else
4547                 buf = "0\n";
4548
4549         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4550 }
4551
4552 static ssize_t
4553 trace_options_write(struct file *filp, const char __user *ubuf, size_t cnt,
4554                          loff_t *ppos)
4555 {
4556         struct trace_option_dentry *topt = filp->private_data;
4557         unsigned long val;
4558         int ret;
4559
4560         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4561         if (ret)
4562                 return ret;
4563
4564         if (val != 0 && val != 1)
4565                 return -EINVAL;
4566
4567         if (!!(topt->flags->val & topt->opt->bit) != val) {
4568                 mutex_lock(&trace_types_lock);
4569                 ret = __set_tracer_option(current_trace, topt->flags,
4570                                           topt->opt, !val);
4571                 mutex_unlock(&trace_types_lock);
4572                 if (ret)
4573                         return ret;
4574         }
4575
4576         *ppos += cnt;
4577
4578         return cnt;
4579 }
4580
4581
4582 static const struct file_operations trace_options_fops = {
4583         .open = tracing_open_generic,
4584         .read = trace_options_read,
4585         .write = trace_options_write,
4586         .llseek = generic_file_llseek,
4587 };
4588
4589 static ssize_t
4590 trace_options_core_read(struct file *filp, char __user *ubuf, size_t cnt,
4591                         loff_t *ppos)
4592 {
4593         long index = (long)filp->private_data;
4594         char *buf;
4595
4596         if (trace_flags & (1 << index))
4597                 buf = "1\n";
4598         else
4599                 buf = "0\n";
4600
4601         return simple_read_from_buffer(ubuf, cnt, ppos, buf, 2);
4602 }
4603
4604 static ssize_t
4605 trace_options_core_write(struct file *filp, const char __user *ubuf, size_t cnt,
4606                          loff_t *ppos)
4607 {
4608         long index = (long)filp->private_data;
4609         unsigned long val;
4610         int ret;
4611
4612         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4613         if (ret)
4614                 return ret;
4615
4616         if (val != 0 && val != 1)
4617                 return -EINVAL;
4618         set_tracer_flags(1 << index, val);
4619
4620         *ppos += cnt;
4621
4622         return cnt;
4623 }
4624
4625 static const struct file_operations trace_options_core_fops = {
4626         .open = tracing_open_generic,
4627         .read = trace_options_core_read,
4628         .write = trace_options_core_write,
4629         .llseek = generic_file_llseek,
4630 };
4631
4632 struct dentry *trace_create_file(const char *name,
4633                                  umode_t mode,
4634                                  struct dentry *parent,
4635                                  void *data,
4636                                  const struct file_operations *fops)
4637 {
4638         struct dentry *ret;
4639
4640         ret = debugfs_create_file(name, mode, parent, data, fops);
4641         if (!ret)
4642                 pr_warning("Could not create debugfs '%s' entry\n", name);
4643
4644         return ret;
4645 }
4646
4647
4648 static struct dentry *trace_options_init_dentry(void)
4649 {
4650         struct dentry *d_tracer;
4651         static struct dentry *t_options;
4652
4653         if (t_options)
4654                 return t_options;
4655
4656         d_tracer = tracing_init_dentry();
4657         if (!d_tracer)
4658                 return NULL;
4659
4660         t_options = debugfs_create_dir("options", d_tracer);
4661         if (!t_options) {
4662                 pr_warning("Could not create debugfs directory 'options'\n");
4663                 return NULL;
4664         }
4665
4666         return t_options;
4667 }
4668
4669 static void
4670 create_trace_option_file(struct trace_option_dentry *topt,
4671                          struct tracer_flags *flags,
4672                          struct tracer_opt *opt)
4673 {
4674         struct dentry *t_options;
4675
4676         t_options = trace_options_init_dentry();
4677         if (!t_options)
4678                 return;
4679
4680         topt->flags = flags;
4681         topt->opt = opt;
4682
4683         topt->entry = trace_create_file(opt->name, 0644, t_options, topt,
4684                                     &trace_options_fops);
4685
4686 }
4687
4688 static struct trace_option_dentry *
4689 create_trace_option_files(struct tracer *tracer)
4690 {
4691         struct trace_option_dentry *topts;
4692         struct tracer_flags *flags;
4693         struct tracer_opt *opts;
4694         int cnt;
4695
4696         if (!tracer)
4697                 return NULL;
4698
4699         flags = tracer->flags;
4700
4701         if (!flags || !flags->opts)
4702                 return NULL;
4703
4704         opts = flags->opts;
4705
4706         for (cnt = 0; opts[cnt].name; cnt++)
4707                 ;
4708
4709         topts = kcalloc(cnt + 1, sizeof(*topts), GFP_KERNEL);
4710         if (!topts)
4711                 return NULL;
4712
4713         for (cnt = 0; opts[cnt].name; cnt++)
4714                 create_trace_option_file(&topts[cnt], flags,
4715                                          &opts[cnt]);
4716
4717         return topts;
4718 }
4719
4720 static void
4721 destroy_trace_option_files(struct trace_option_dentry *topts)
4722 {
4723         int cnt;
4724
4725         if (!topts)
4726                 return;
4727
4728         for (cnt = 0; topts[cnt].opt; cnt++) {
4729                 if (topts[cnt].entry)
4730                         debugfs_remove(topts[cnt].entry);
4731         }
4732
4733         kfree(topts);
4734 }
4735
4736 static struct dentry *
4737 create_trace_option_core_file(const char *option, long index)
4738 {
4739         struct dentry *t_options;
4740
4741         t_options = trace_options_init_dentry();
4742         if (!t_options)
4743                 return NULL;
4744
4745         return trace_create_file(option, 0644, t_options, (void *)index,
4746                                     &trace_options_core_fops);
4747 }
4748
4749 static __init void create_trace_options_dir(void)
4750 {
4751         struct dentry *t_options;
4752         int i;
4753
4754         t_options = trace_options_init_dentry();
4755         if (!t_options)
4756                 return;
4757
4758         for (i = 0; trace_options[i]; i++)
4759                 create_trace_option_core_file(trace_options[i], i);
4760 }
4761
4762 static ssize_t
4763 rb_simple_read(struct file *filp, char __user *ubuf,
4764                size_t cnt, loff_t *ppos)
4765 {
4766         struct trace_array *tr = filp->private_data;
4767         struct ring_buffer *buffer = tr->buffer;
4768         char buf[64];
4769         int r;
4770
4771         if (buffer)
4772                 r = ring_buffer_record_is_on(buffer);
4773         else
4774                 r = 0;
4775
4776         r = sprintf(buf, "%d\n", r);
4777
4778         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
4779 }
4780
4781 static ssize_t
4782 rb_simple_write(struct file *filp, const char __user *ubuf,
4783                 size_t cnt, loff_t *ppos)
4784 {
4785         struct trace_array *tr = filp->private_data;
4786         struct ring_buffer *buffer = tr->buffer;
4787         unsigned long val;
4788         int ret;
4789
4790         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
4791         if (ret)
4792                 return ret;
4793
4794         if (buffer) {
4795                 if (val)
4796                         ring_buffer_record_on(buffer);
4797                 else
4798                         ring_buffer_record_off(buffer);
4799         }
4800
4801         (*ppos)++;
4802
4803         return cnt;
4804 }
4805
4806 static const struct file_operations rb_simple_fops = {
4807         .open           = tracing_open_generic,
4808         .read           = rb_simple_read,
4809         .write          = rb_simple_write,
4810         .llseek         = default_llseek,
4811 };
4812
4813 static __init int tracer_init_debugfs(void)
4814 {
4815         struct dentry *d_tracer;
4816         int cpu;
4817
4818         trace_access_lock_init();
4819
4820         d_tracer = tracing_init_dentry();
4821
4822         trace_create_file("trace_options", 0644, d_tracer,
4823                         NULL, &tracing_iter_fops);
4824
4825         trace_create_file("tracing_cpumask", 0644, d_tracer,
4826                         NULL, &tracing_cpumask_fops);
4827
4828         trace_create_file("trace", 0644, d_tracer,
4829                         (void *) TRACE_PIPE_ALL_CPU, &tracing_fops);
4830
4831         trace_create_file("available_tracers", 0444, d_tracer,
4832                         &global_trace, &show_traces_fops);
4833
4834         trace_create_file("current_tracer", 0644, d_tracer,
4835                         &global_trace, &set_tracer_fops);
4836
4837 #ifdef CONFIG_TRACER_MAX_TRACE
4838         trace_create_file("tracing_max_latency", 0644, d_tracer,
4839                         &tracing_max_latency, &tracing_max_lat_fops);
4840 #endif
4841
4842         trace_create_file("tracing_thresh", 0644, d_tracer,
4843                         &tracing_thresh, &tracing_max_lat_fops);
4844
4845         trace_create_file("README", 0444, d_tracer,
4846                         NULL, &tracing_readme_fops);
4847
4848         trace_create_file("trace_pipe", 0444, d_tracer,
4849                         (void *) TRACE_PIPE_ALL_CPU, &tracing_pipe_fops);
4850
4851         trace_create_file("buffer_size_kb", 0644, d_tracer,
4852                         (void *) RING_BUFFER_ALL_CPUS, &tracing_entries_fops);
4853
4854         trace_create_file("buffer_total_size_kb", 0444, d_tracer,
4855                         &global_trace, &tracing_total_entries_fops);
4856
4857         trace_create_file("free_buffer", 0644, d_tracer,
4858                         &global_trace, &tracing_free_buffer_fops);
4859
4860         trace_create_file("trace_marker", 0220, d_tracer,
4861                         NULL, &tracing_mark_fops);
4862
4863         trace_create_file("saved_cmdlines", 0444, d_tracer,
4864                         NULL, &tracing_saved_cmdlines_fops);
4865
4866         trace_create_file("trace_clock", 0644, d_tracer, NULL,
4867                           &trace_clock_fops);
4868
4869         trace_create_file("tracing_on", 0644, d_tracer,
4870                             &global_trace, &rb_simple_fops);
4871
4872 #ifdef CONFIG_DYNAMIC_FTRACE
4873         trace_create_file("dyn_ftrace_total_info", 0444, d_tracer,
4874                         &ftrace_update_tot_cnt, &tracing_dyn_info_fops);
4875 #endif
4876
4877         create_trace_options_dir();
4878
4879         for_each_tracing_cpu(cpu)
4880                 tracing_init_debugfs_percpu(cpu);
4881
4882         return 0;
4883 }
4884
4885 static int trace_panic_handler(struct notifier_block *this,
4886                                unsigned long event, void *unused)
4887 {
4888         if (ftrace_dump_on_oops)
4889                 ftrace_dump(ftrace_dump_on_oops);
4890         return NOTIFY_OK;
4891 }
4892
4893 static struct notifier_block trace_panic_notifier = {
4894         .notifier_call  = trace_panic_handler,
4895         .next           = NULL,
4896         .priority       = 150   /* priority: INT_MAX >= x >= 0 */
4897 };
4898
4899 static int trace_die_handler(struct notifier_block *self,
4900                              unsigned long val,
4901                              void *data)
4902 {
4903         switch (val) {
4904         case DIE_OOPS:
4905                 if (ftrace_dump_on_oops)
4906                         ftrace_dump(ftrace_dump_on_oops);
4907                 break;
4908         default:
4909                 break;
4910         }
4911         return NOTIFY_OK;
4912 }
4913
4914 static struct notifier_block trace_die_notifier = {
4915         .notifier_call = trace_die_handler,
4916         .priority = 200
4917 };
4918
4919 /*
4920  * printk is set to max of 1024, we really don't need it that big.
4921  * Nothing should be printing 1000 characters anyway.
4922  */
4923 #define TRACE_MAX_PRINT         1000
4924
4925 /*
4926  * Define here KERN_TRACE so that we have one place to modify
4927  * it if we decide to change what log level the ftrace dump
4928  * should be at.
4929  */
4930 #define KERN_TRACE              KERN_EMERG
4931
4932 void
4933 trace_printk_seq(struct trace_seq *s)
4934 {
4935         /* Probably should print a warning here. */
4936         if (s->len >= 1000)
4937                 s->len = 1000;
4938
4939         /* should be zero ended, but we are paranoid. */
4940         s->buffer[s->len] = 0;
4941
4942         printk(KERN_TRACE "%s", s->buffer);
4943
4944         trace_seq_init(s);
4945 }
4946
4947 void trace_init_global_iter(struct trace_iterator *iter)
4948 {
4949         iter->tr = &global_trace;
4950         iter->trace = current_trace;
4951         iter->cpu_file = TRACE_PIPE_ALL_CPU;
4952 }
4953
4954 static void
4955 __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode)
4956 {
4957         static arch_spinlock_t ftrace_dump_lock =
4958                 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
4959         /* use static because iter can be a bit big for the stack */
4960         static struct trace_iterator iter;
4961         unsigned int old_userobj;
4962         static int dump_ran;
4963         unsigned long flags;
4964         int cnt = 0, cpu;
4965
4966         /* only one dump */
4967         local_irq_save(flags);
4968         arch_spin_lock(&ftrace_dump_lock);
4969         if (dump_ran)
4970                 goto out;
4971
4972         dump_ran = 1;
4973
4974         tracing_off();
4975
4976         /* Did function tracer already get disabled? */
4977         if (ftrace_is_dead()) {
4978                 printk("# WARNING: FUNCTION TRACING IS CORRUPTED\n");
4979                 printk("#          MAY BE MISSING FUNCTION EVENTS\n");
4980         }
4981
4982         if (disable_tracing)
4983                 ftrace_kill();
4984
4985         trace_init_global_iter(&iter);
4986
4987         for_each_tracing_cpu(cpu) {
4988                 atomic_inc(&iter.tr->data[cpu]->disabled);
4989         }
4990
4991         old_userobj = trace_flags & TRACE_ITER_SYM_USEROBJ;
4992
4993         /* don't look at user memory in panic mode */
4994         trace_flags &= ~TRACE_ITER_SYM_USEROBJ;
4995
4996         /* Simulate the iterator */
4997         iter.tr = &global_trace;
4998         iter.trace = current_trace;
4999
5000         switch (oops_dump_mode) {
5001         case DUMP_ALL:
5002                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
5003                 break;
5004         case DUMP_ORIG:
5005                 iter.cpu_file = raw_smp_processor_id();
5006                 break;
5007         case DUMP_NONE:
5008                 goto out_enable;
5009         default:
5010                 printk(KERN_TRACE "Bad dumping mode, switching to all CPUs dump\n");
5011                 iter.cpu_file = TRACE_PIPE_ALL_CPU;
5012         }
5013
5014         printk(KERN_TRACE "Dumping ftrace buffer:\n");
5015
5016         /*
5017          * We need to stop all tracing on all CPUS to read the
5018          * the next buffer. This is a bit expensive, but is
5019          * not done often. We fill all what we can read,
5020          * and then release the locks again.
5021          */
5022
5023         while (!trace_empty(&iter)) {
5024
5025                 if (!cnt)
5026                         printk(KERN_TRACE "---------------------------------\n");
5027
5028                 cnt++;
5029
5030                 /* reset all but tr, trace, and overruns */
5031                 memset(&iter.seq, 0,
5032                        sizeof(struct trace_iterator) -
5033                        offsetof(struct trace_iterator, seq));
5034                 iter.iter_flags |= TRACE_FILE_LAT_FMT;
5035                 iter.pos = -1;
5036
5037                 if (trace_find_next_entry_inc(&iter) != NULL) {
5038                         int ret;
5039
5040                         ret = print_trace_line(&iter);
5041                         if (ret != TRACE_TYPE_NO_CONSUME)
5042                                 trace_consume(&iter);
5043                 }
5044                 touch_nmi_watchdog();
5045
5046                 trace_printk_seq(&iter.seq);
5047         }
5048
5049         if (!cnt)
5050                 printk(KERN_TRACE "   (ftrace buffer empty)\n");
5051         else
5052                 printk(KERN_TRACE "---------------------------------\n");
5053
5054  out_enable:
5055         /* Re-enable tracing if requested */
5056         if (!disable_tracing) {
5057                 trace_flags |= old_userobj;
5058
5059                 for_each_tracing_cpu(cpu) {
5060                         atomic_dec(&iter.tr->data[cpu]->disabled);
5061                 }
5062                 tracing_on();
5063         }
5064
5065  out:
5066         arch_spin_unlock(&ftrace_dump_lock);
5067         local_irq_restore(flags);
5068 }
5069
5070 /* By default: disable tracing after the dump */
5071 void ftrace_dump(enum ftrace_dump_mode oops_dump_mode)
5072 {
5073         __ftrace_dump(true, oops_dump_mode);
5074 }
5075 EXPORT_SYMBOL_GPL(ftrace_dump);
5076
5077 __init static int tracer_alloc_buffers(void)
5078 {
5079         int ring_buf_size;
5080         enum ring_buffer_flags rb_flags;
5081         int i;
5082         int ret = -ENOMEM;
5083
5084
5085         if (!alloc_cpumask_var(&tracing_buffer_mask, GFP_KERNEL))
5086                 goto out;
5087
5088         if (!alloc_cpumask_var(&tracing_cpumask, GFP_KERNEL))
5089                 goto out_free_buffer_mask;
5090
5091         /* Only allocate trace_printk buffers if a trace_printk exists */
5092         if (__stop___trace_bprintk_fmt != __start___trace_bprintk_fmt)
5093                 /* Must be called before global_trace.buffer is allocated */
5094                 trace_printk_init_buffers();
5095
5096         /* To save memory, keep the ring buffer size to its minimum */
5097         if (ring_buffer_expanded)
5098                 ring_buf_size = trace_buf_size;
5099         else
5100                 ring_buf_size = 1;
5101
5102         rb_flags = trace_flags & TRACE_ITER_OVERWRITE ? RB_FL_OVERWRITE : 0;
5103
5104         cpumask_copy(tracing_buffer_mask, cpu_possible_mask);
5105         cpumask_copy(tracing_cpumask, cpu_all_mask);
5106
5107         /* TODO: make the number of buffers hot pluggable with CPUS */
5108         global_trace.buffer = ring_buffer_alloc(ring_buf_size, rb_flags);
5109         if (!global_trace.buffer) {
5110                 printk(KERN_ERR "tracer: failed to allocate ring buffer!\n");
5111                 WARN_ON(1);
5112                 goto out_free_cpumask;
5113         }
5114         if (global_trace.buffer_disabled)
5115                 tracing_off();
5116
5117
5118 #ifdef CONFIG_TRACER_MAX_TRACE
5119         max_tr.buffer = ring_buffer_alloc(1, rb_flags);
5120         if (!max_tr.buffer) {
5121                 printk(KERN_ERR "tracer: failed to allocate max ring buffer!\n");
5122                 WARN_ON(1);
5123                 ring_buffer_free(global_trace.buffer);
5124                 goto out_free_cpumask;
5125         }
5126 #endif
5127
5128         /* Allocate the first page for all buffers */
5129         for_each_tracing_cpu(i) {
5130                 global_trace.data[i] = &per_cpu(global_trace_cpu, i);
5131                 max_tr.data[i] = &per_cpu(max_tr_data, i);
5132         }
5133
5134         set_buffer_entries(&global_trace,
5135                            ring_buffer_size(global_trace.buffer, 0));
5136 #ifdef CONFIG_TRACER_MAX_TRACE
5137         set_buffer_entries(&max_tr, 1);
5138 #endif
5139
5140         trace_init_cmdlines();
5141         init_irq_work(&trace_work_wakeup, trace_wake_up);
5142
5143         register_tracer(&nop_trace);
5144         current_trace = &nop_trace;
5145         /* All seems OK, enable tracing */
5146         tracing_disabled = 0;
5147
5148         atomic_notifier_chain_register(&panic_notifier_list,
5149                                        &trace_panic_notifier);
5150
5151         register_die_notifier(&trace_die_notifier);
5152
5153         while (trace_boot_options) {
5154                 char *option;
5155
5156                 option = strsep(&trace_boot_options, ",");
5157                 trace_set_options(option);
5158         }
5159
5160         return 0;
5161
5162 out_free_cpumask:
5163         free_cpumask_var(tracing_cpumask);
5164 out_free_buffer_mask:
5165         free_cpumask_var(tracing_buffer_mask);
5166 out:
5167         return ret;
5168 }
5169
5170 __init static int clear_boot_tracer(void)
5171 {
5172         /*
5173          * The default tracer at boot buffer is an init section.
5174          * This function is called in lateinit. If we did not
5175          * find the boot tracer, then clear it out, to prevent
5176          * later registration from accessing the buffer that is
5177          * about to be freed.
5178          */
5179         if (!default_bootup_tracer)
5180                 return 0;
5181
5182         printk(KERN_INFO "ftrace bootup tracer '%s' not registered.\n",
5183                default_bootup_tracer);
5184         default_bootup_tracer = NULL;
5185
5186         return 0;
5187 }
5188
5189 early_initcall(tracer_alloc_buffers);
5190 fs_initcall(tracer_init_debugfs);
5191 late_initcall(clear_boot_tracer);