Merge remote-tracking branch 'regmap/fix/rbtree' into regmap-linus
[cascardo/linux.git] / kernel / trace / trace_functions_graph.c
1 /*
2  *
3  * Function graph tracer.
4  * Copyright (c) 2008-2009 Frederic Weisbecker <fweisbec@gmail.com>
5  * Mostly borrowed from function tracer which
6  * is Copyright (c) Steven Rostedt <srostedt@redhat.com>
7  *
8  */
9 #include <linux/uaccess.h>
10 #include <linux/ftrace.h>
11 #include <linux/interrupt.h>
12 #include <linux/slab.h>
13 #include <linux/fs.h>
14
15 #include "trace.h"
16 #include "trace_output.h"
17
18 static bool kill_ftrace_graph;
19
20 /**
21  * ftrace_graph_is_dead - returns true if ftrace_graph_stop() was called
22  *
23  * ftrace_graph_stop() is called when a severe error is detected in
24  * the function graph tracing. This function is called by the critical
25  * paths of function graph to keep those paths from doing any more harm.
26  */
27 bool ftrace_graph_is_dead(void)
28 {
29         return kill_ftrace_graph;
30 }
31
32 /**
33  * ftrace_graph_stop - set to permanently disable function graph tracincg
34  *
35  * In case of an error int function graph tracing, this is called
36  * to try to keep function graph tracing from causing any more harm.
37  * Usually this is pretty severe and this is called to try to at least
38  * get a warning out to the user.
39  */
40 void ftrace_graph_stop(void)
41 {
42         kill_ftrace_graph = true;
43 }
44
45 /* When set, irq functions will be ignored */
46 static int ftrace_graph_skip_irqs;
47
48 struct fgraph_cpu_data {
49         pid_t           last_pid;
50         int             depth;
51         int             depth_irq;
52         int             ignore;
53         unsigned long   enter_funcs[FTRACE_RETFUNC_DEPTH];
54 };
55
56 struct fgraph_data {
57         struct fgraph_cpu_data __percpu *cpu_data;
58
59         /* Place to preserve last processed entry. */
60         struct ftrace_graph_ent_entry   ent;
61         struct ftrace_graph_ret_entry   ret;
62         int                             failed;
63         int                             cpu;
64 };
65
66 #define TRACE_GRAPH_INDENT      2
67
68 static unsigned int max_depth;
69
70 static struct tracer_opt trace_opts[] = {
71         /* Display overruns? (for self-debug purpose) */
72         { TRACER_OPT(funcgraph-overrun, TRACE_GRAPH_PRINT_OVERRUN) },
73         /* Display CPU ? */
74         { TRACER_OPT(funcgraph-cpu, TRACE_GRAPH_PRINT_CPU) },
75         /* Display Overhead ? */
76         { TRACER_OPT(funcgraph-overhead, TRACE_GRAPH_PRINT_OVERHEAD) },
77         /* Display proc name/pid */
78         { TRACER_OPT(funcgraph-proc, TRACE_GRAPH_PRINT_PROC) },
79         /* Display duration of execution */
80         { TRACER_OPT(funcgraph-duration, TRACE_GRAPH_PRINT_DURATION) },
81         /* Display absolute time of an entry */
82         { TRACER_OPT(funcgraph-abstime, TRACE_GRAPH_PRINT_ABS_TIME) },
83         /* Display interrupts */
84         { TRACER_OPT(funcgraph-irqs, TRACE_GRAPH_PRINT_IRQS) },
85         /* Display function name after trailing } */
86         { TRACER_OPT(funcgraph-tail, TRACE_GRAPH_PRINT_TAIL) },
87         /* Include sleep time (scheduled out) between entry and return */
88         { TRACER_OPT(sleep-time, TRACE_GRAPH_SLEEP_TIME) },
89         /* Include time within nested functions */
90         { TRACER_OPT(graph-time, TRACE_GRAPH_GRAPH_TIME) },
91         { } /* Empty entry */
92 };
93
94 static struct tracer_flags tracer_flags = {
95         /* Don't display overruns, proc, or tail by default */
96         .val = TRACE_GRAPH_PRINT_CPU | TRACE_GRAPH_PRINT_OVERHEAD |
97                TRACE_GRAPH_PRINT_DURATION | TRACE_GRAPH_PRINT_IRQS |
98                TRACE_GRAPH_SLEEP_TIME | TRACE_GRAPH_GRAPH_TIME,
99         .opts = trace_opts
100 };
101
102 static struct trace_array *graph_array;
103
104 /*
105  * DURATION column is being also used to display IRQ signs,
106  * following values are used by print_graph_irq and others
107  * to fill in space into DURATION column.
108  */
109 enum {
110         FLAGS_FILL_FULL  = 1 << TRACE_GRAPH_PRINT_FILL_SHIFT,
111         FLAGS_FILL_START = 2 << TRACE_GRAPH_PRINT_FILL_SHIFT,
112         FLAGS_FILL_END   = 3 << TRACE_GRAPH_PRINT_FILL_SHIFT,
113 };
114
115 static void
116 print_graph_duration(struct trace_array *tr, unsigned long long duration,
117                      struct trace_seq *s, u32 flags);
118
119 /* Add a function return address to the trace stack on thread info.*/
120 int
121 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
122                          unsigned long frame_pointer)
123 {
124         unsigned long long calltime;
125         int index;
126
127         if (unlikely(ftrace_graph_is_dead()))
128                 return -EBUSY;
129
130         if (!current->ret_stack)
131                 return -EBUSY;
132
133         /*
134          * We must make sure the ret_stack is tested before we read
135          * anything else.
136          */
137         smp_rmb();
138
139         /* The return trace stack is full */
140         if (current->curr_ret_stack == FTRACE_RETFUNC_DEPTH - 1) {
141                 atomic_inc(&current->trace_overrun);
142                 return -EBUSY;
143         }
144
145         /*
146          * The curr_ret_stack is an index to ftrace return stack of
147          * current task.  Its value should be in [0, FTRACE_RETFUNC_
148          * DEPTH) when the function graph tracer is used.  To support
149          * filtering out specific functions, it makes the index
150          * negative by subtracting huge value (FTRACE_NOTRACE_DEPTH)
151          * so when it sees a negative index the ftrace will ignore
152          * the record.  And the index gets recovered when returning
153          * from the filtered function by adding the FTRACE_NOTRACE_
154          * DEPTH and then it'll continue to record functions normally.
155          *
156          * The curr_ret_stack is initialized to -1 and get increased
157          * in this function.  So it can be less than -1 only if it was
158          * filtered out via ftrace_graph_notrace_addr() which can be
159          * set from set_graph_notrace file in tracefs by user.
160          */
161         if (current->curr_ret_stack < -1)
162                 return -EBUSY;
163
164         calltime = trace_clock_local();
165
166         index = ++current->curr_ret_stack;
167         if (ftrace_graph_notrace_addr(func))
168                 current->curr_ret_stack -= FTRACE_NOTRACE_DEPTH;
169         barrier();
170         current->ret_stack[index].ret = ret;
171         current->ret_stack[index].func = func;
172         current->ret_stack[index].calltime = calltime;
173         current->ret_stack[index].subtime = 0;
174         current->ret_stack[index].fp = frame_pointer;
175         *depth = current->curr_ret_stack;
176
177         return 0;
178 }
179
180 /* Retrieve a function return address to the trace stack on thread info.*/
181 static void
182 ftrace_pop_return_trace(struct ftrace_graph_ret *trace, unsigned long *ret,
183                         unsigned long frame_pointer)
184 {
185         int index;
186
187         index = current->curr_ret_stack;
188
189         /*
190          * A negative index here means that it's just returned from a
191          * notrace'd function.  Recover index to get an original
192          * return address.  See ftrace_push_return_trace().
193          *
194          * TODO: Need to check whether the stack gets corrupted.
195          */
196         if (index < 0)
197                 index += FTRACE_NOTRACE_DEPTH;
198
199         if (unlikely(index < 0 || index >= FTRACE_RETFUNC_DEPTH)) {
200                 ftrace_graph_stop();
201                 WARN_ON(1);
202                 /* Might as well panic, otherwise we have no where to go */
203                 *ret = (unsigned long)panic;
204                 return;
205         }
206
207 #if defined(CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST) && !defined(CC_USING_FENTRY)
208         /*
209          * The arch may choose to record the frame pointer used
210          * and check it here to make sure that it is what we expect it
211          * to be. If gcc does not set the place holder of the return
212          * address in the frame pointer, and does a copy instead, then
213          * the function graph trace will fail. This test detects this
214          * case.
215          *
216          * Currently, x86_32 with optimize for size (-Os) makes the latest
217          * gcc do the above.
218          *
219          * Note, -mfentry does not use frame pointers, and this test
220          *  is not needed if CC_USING_FENTRY is set.
221          */
222         if (unlikely(current->ret_stack[index].fp != frame_pointer)) {
223                 ftrace_graph_stop();
224                 WARN(1, "Bad frame pointer: expected %lx, received %lx\n"
225                      "  from func %ps return to %lx\n",
226                      current->ret_stack[index].fp,
227                      frame_pointer,
228                      (void *)current->ret_stack[index].func,
229                      current->ret_stack[index].ret);
230                 *ret = (unsigned long)panic;
231                 return;
232         }
233 #endif
234
235         *ret = current->ret_stack[index].ret;
236         trace->func = current->ret_stack[index].func;
237         trace->calltime = current->ret_stack[index].calltime;
238         trace->overrun = atomic_read(&current->trace_overrun);
239         trace->depth = index;
240 }
241
242 /*
243  * Send the trace to the ring-buffer.
244  * @return the original return address.
245  */
246 unsigned long ftrace_return_to_handler(unsigned long frame_pointer)
247 {
248         struct ftrace_graph_ret trace;
249         unsigned long ret;
250
251         ftrace_pop_return_trace(&trace, &ret, frame_pointer);
252         trace.rettime = trace_clock_local();
253         barrier();
254         current->curr_ret_stack--;
255         /*
256          * The curr_ret_stack can be less than -1 only if it was
257          * filtered out and it's about to return from the function.
258          * Recover the index and continue to trace normal functions.
259          */
260         if (current->curr_ret_stack < -1) {
261                 current->curr_ret_stack += FTRACE_NOTRACE_DEPTH;
262                 return ret;
263         }
264
265         /*
266          * The trace should run after decrementing the ret counter
267          * in case an interrupt were to come in. We don't want to
268          * lose the interrupt if max_depth is set.
269          */
270         ftrace_graph_return(&trace);
271
272         if (unlikely(!ret)) {
273                 ftrace_graph_stop();
274                 WARN_ON(1);
275                 /* Might as well panic. What else to do? */
276                 ret = (unsigned long)panic;
277         }
278
279         return ret;
280 }
281
282 int __trace_graph_entry(struct trace_array *tr,
283                                 struct ftrace_graph_ent *trace,
284                                 unsigned long flags,
285                                 int pc)
286 {
287         struct trace_event_call *call = &event_funcgraph_entry;
288         struct ring_buffer_event *event;
289         struct ring_buffer *buffer = tr->trace_buffer.buffer;
290         struct ftrace_graph_ent_entry *entry;
291
292         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_ENT,
293                                           sizeof(*entry), flags, pc);
294         if (!event)
295                 return 0;
296         entry   = ring_buffer_event_data(event);
297         entry->graph_ent                        = *trace;
298         if (!call_filter_check_discard(call, entry, buffer, event))
299                 __buffer_unlock_commit(buffer, event);
300
301         return 1;
302 }
303
304 static inline int ftrace_graph_ignore_irqs(void)
305 {
306         if (!ftrace_graph_skip_irqs || trace_recursion_test(TRACE_IRQ_BIT))
307                 return 0;
308
309         return in_irq();
310 }
311
312 int trace_graph_entry(struct ftrace_graph_ent *trace)
313 {
314         struct trace_array *tr = graph_array;
315         struct trace_array_cpu *data;
316         unsigned long flags;
317         long disabled;
318         int ret;
319         int cpu;
320         int pc;
321
322         if (!ftrace_trace_task(tr))
323                 return 0;
324
325         /* trace it when it is-nested-in or is a function enabled. */
326         if ((!(trace->depth || ftrace_graph_addr(trace->func)) ||
327              ftrace_graph_ignore_irqs()) || (trace->depth < 0) ||
328             (max_depth && trace->depth >= max_depth))
329                 return 0;
330
331         /*
332          * Do not trace a function if it's filtered by set_graph_notrace.
333          * Make the index of ret stack negative to indicate that it should
334          * ignore further functions.  But it needs its own ret stack entry
335          * to recover the original index in order to continue tracing after
336          * returning from the function.
337          */
338         if (ftrace_graph_notrace_addr(trace->func))
339                 return 1;
340
341         /*
342          * Stop here if tracing_threshold is set. We only write function return
343          * events to the ring buffer.
344          */
345         if (tracing_thresh)
346                 return 1;
347
348         local_irq_save(flags);
349         cpu = raw_smp_processor_id();
350         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
351         disabled = atomic_inc_return(&data->disabled);
352         if (likely(disabled == 1)) {
353                 pc = preempt_count();
354                 ret = __trace_graph_entry(tr, trace, flags, pc);
355         } else {
356                 ret = 0;
357         }
358
359         atomic_dec(&data->disabled);
360         local_irq_restore(flags);
361
362         return ret;
363 }
364
365 static void
366 __trace_graph_function(struct trace_array *tr,
367                 unsigned long ip, unsigned long flags, int pc)
368 {
369         u64 time = trace_clock_local();
370         struct ftrace_graph_ent ent = {
371                 .func  = ip,
372                 .depth = 0,
373         };
374         struct ftrace_graph_ret ret = {
375                 .func     = ip,
376                 .depth    = 0,
377                 .calltime = time,
378                 .rettime  = time,
379         };
380
381         __trace_graph_entry(tr, &ent, flags, pc);
382         __trace_graph_return(tr, &ret, flags, pc);
383 }
384
385 void
386 trace_graph_function(struct trace_array *tr,
387                 unsigned long ip, unsigned long parent_ip,
388                 unsigned long flags, int pc)
389 {
390         __trace_graph_function(tr, ip, flags, pc);
391 }
392
393 void __trace_graph_return(struct trace_array *tr,
394                                 struct ftrace_graph_ret *trace,
395                                 unsigned long flags,
396                                 int pc)
397 {
398         struct trace_event_call *call = &event_funcgraph_exit;
399         struct ring_buffer_event *event;
400         struct ring_buffer *buffer = tr->trace_buffer.buffer;
401         struct ftrace_graph_ret_entry *entry;
402
403         event = trace_buffer_lock_reserve(buffer, TRACE_GRAPH_RET,
404                                           sizeof(*entry), flags, pc);
405         if (!event)
406                 return;
407         entry   = ring_buffer_event_data(event);
408         entry->ret                              = *trace;
409         if (!call_filter_check_discard(call, entry, buffer, event))
410                 __buffer_unlock_commit(buffer, event);
411 }
412
413 void trace_graph_return(struct ftrace_graph_ret *trace)
414 {
415         struct trace_array *tr = graph_array;
416         struct trace_array_cpu *data;
417         unsigned long flags;
418         long disabled;
419         int cpu;
420         int pc;
421
422         local_irq_save(flags);
423         cpu = raw_smp_processor_id();
424         data = per_cpu_ptr(tr->trace_buffer.data, cpu);
425         disabled = atomic_inc_return(&data->disabled);
426         if (likely(disabled == 1)) {
427                 pc = preempt_count();
428                 __trace_graph_return(tr, trace, flags, pc);
429         }
430         atomic_dec(&data->disabled);
431         local_irq_restore(flags);
432 }
433
434 void set_graph_array(struct trace_array *tr)
435 {
436         graph_array = tr;
437
438         /* Make graph_array visible before we start tracing */
439
440         smp_mb();
441 }
442
443 static void trace_graph_thresh_return(struct ftrace_graph_ret *trace)
444 {
445         if (tracing_thresh &&
446             (trace->rettime - trace->calltime < tracing_thresh))
447                 return;
448         else
449                 trace_graph_return(trace);
450 }
451
452 static int graph_trace_init(struct trace_array *tr)
453 {
454         int ret;
455
456         set_graph_array(tr);
457         if (tracing_thresh)
458                 ret = register_ftrace_graph(&trace_graph_thresh_return,
459                                             &trace_graph_entry);
460         else
461                 ret = register_ftrace_graph(&trace_graph_return,
462                                             &trace_graph_entry);
463         if (ret)
464                 return ret;
465         tracing_start_cmdline_record();
466
467         return 0;
468 }
469
470 static void graph_trace_reset(struct trace_array *tr)
471 {
472         tracing_stop_cmdline_record();
473         unregister_ftrace_graph();
474 }
475
476 static int graph_trace_update_thresh(struct trace_array *tr)
477 {
478         graph_trace_reset(tr);
479         return graph_trace_init(tr);
480 }
481
482 static int max_bytes_for_cpu;
483
484 static void print_graph_cpu(struct trace_seq *s, int cpu)
485 {
486         /*
487          * Start with a space character - to make it stand out
488          * to the right a bit when trace output is pasted into
489          * email:
490          */
491         trace_seq_printf(s, " %*d) ", max_bytes_for_cpu, cpu);
492 }
493
494 #define TRACE_GRAPH_PROCINFO_LENGTH     14
495
496 static void print_graph_proc(struct trace_seq *s, pid_t pid)
497 {
498         char comm[TASK_COMM_LEN];
499         /* sign + log10(MAX_INT) + '\0' */
500         char pid_str[11];
501         int spaces = 0;
502         int len;
503         int i;
504
505         trace_find_cmdline(pid, comm);
506         comm[7] = '\0';
507         sprintf(pid_str, "%d", pid);
508
509         /* 1 stands for the "-" character */
510         len = strlen(comm) + strlen(pid_str) + 1;
511
512         if (len < TRACE_GRAPH_PROCINFO_LENGTH)
513                 spaces = TRACE_GRAPH_PROCINFO_LENGTH - len;
514
515         /* First spaces to align center */
516         for (i = 0; i < spaces / 2; i++)
517                 trace_seq_putc(s, ' ');
518
519         trace_seq_printf(s, "%s-%s", comm, pid_str);
520
521         /* Last spaces to align center */
522         for (i = 0; i < spaces - (spaces / 2); i++)
523                 trace_seq_putc(s, ' ');
524 }
525
526
527 static void print_graph_lat_fmt(struct trace_seq *s, struct trace_entry *entry)
528 {
529         trace_seq_putc(s, ' ');
530         trace_print_lat_fmt(s, entry);
531 }
532
533 /* If the pid changed since the last trace, output this event */
534 static void
535 verif_pid(struct trace_seq *s, pid_t pid, int cpu, struct fgraph_data *data)
536 {
537         pid_t prev_pid;
538         pid_t *last_pid;
539
540         if (!data)
541                 return;
542
543         last_pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
544
545         if (*last_pid == pid)
546                 return;
547
548         prev_pid = *last_pid;
549         *last_pid = pid;
550
551         if (prev_pid == -1)
552                 return;
553 /*
554  * Context-switch trace line:
555
556  ------------------------------------------
557  | 1)  migration/0--1  =>  sshd-1755
558  ------------------------------------------
559
560  */
561         trace_seq_puts(s, " ------------------------------------------\n");
562         print_graph_cpu(s, cpu);
563         print_graph_proc(s, prev_pid);
564         trace_seq_puts(s, " => ");
565         print_graph_proc(s, pid);
566         trace_seq_puts(s, "\n ------------------------------------------\n\n");
567 }
568
569 static struct ftrace_graph_ret_entry *
570 get_return_for_leaf(struct trace_iterator *iter,
571                 struct ftrace_graph_ent_entry *curr)
572 {
573         struct fgraph_data *data = iter->private;
574         struct ring_buffer_iter *ring_iter = NULL;
575         struct ring_buffer_event *event;
576         struct ftrace_graph_ret_entry *next;
577
578         /*
579          * If the previous output failed to write to the seq buffer,
580          * then we just reuse the data from before.
581          */
582         if (data && data->failed) {
583                 curr = &data->ent;
584                 next = &data->ret;
585         } else {
586
587                 ring_iter = trace_buffer_iter(iter, iter->cpu);
588
589                 /* First peek to compare current entry and the next one */
590                 if (ring_iter)
591                         event = ring_buffer_iter_peek(ring_iter, NULL);
592                 else {
593                         /*
594                          * We need to consume the current entry to see
595                          * the next one.
596                          */
597                         ring_buffer_consume(iter->trace_buffer->buffer, iter->cpu,
598                                             NULL, NULL);
599                         event = ring_buffer_peek(iter->trace_buffer->buffer, iter->cpu,
600                                                  NULL, NULL);
601                 }
602
603                 if (!event)
604                         return NULL;
605
606                 next = ring_buffer_event_data(event);
607
608                 if (data) {
609                         /*
610                          * Save current and next entries for later reference
611                          * if the output fails.
612                          */
613                         data->ent = *curr;
614                         /*
615                          * If the next event is not a return type, then
616                          * we only care about what type it is. Otherwise we can
617                          * safely copy the entire event.
618                          */
619                         if (next->ent.type == TRACE_GRAPH_RET)
620                                 data->ret = *next;
621                         else
622                                 data->ret.ent.type = next->ent.type;
623                 }
624         }
625
626         if (next->ent.type != TRACE_GRAPH_RET)
627                 return NULL;
628
629         if (curr->ent.pid != next->ent.pid ||
630                         curr->graph_ent.func != next->ret.func)
631                 return NULL;
632
633         /* this is a leaf, now advance the iterator */
634         if (ring_iter)
635                 ring_buffer_read(ring_iter, NULL);
636
637         return next;
638 }
639
640 static void print_graph_abs_time(u64 t, struct trace_seq *s)
641 {
642         unsigned long usecs_rem;
643
644         usecs_rem = do_div(t, NSEC_PER_SEC);
645         usecs_rem /= 1000;
646
647         trace_seq_printf(s, "%5lu.%06lu |  ",
648                          (unsigned long)t, usecs_rem);
649 }
650
651 static void
652 print_graph_irq(struct trace_iterator *iter, unsigned long addr,
653                 enum trace_type type, int cpu, pid_t pid, u32 flags)
654 {
655         struct trace_array *tr = iter->tr;
656         struct trace_seq *s = &iter->seq;
657         struct trace_entry *ent = iter->ent;
658
659         if (addr < (unsigned long)__irqentry_text_start ||
660                 addr >= (unsigned long)__irqentry_text_end)
661                 return;
662
663         if (tr->trace_flags & TRACE_ITER_CONTEXT_INFO) {
664                 /* Absolute time */
665                 if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
666                         print_graph_abs_time(iter->ts, s);
667
668                 /* Cpu */
669                 if (flags & TRACE_GRAPH_PRINT_CPU)
670                         print_graph_cpu(s, cpu);
671
672                 /* Proc */
673                 if (flags & TRACE_GRAPH_PRINT_PROC) {
674                         print_graph_proc(s, pid);
675                         trace_seq_puts(s, " | ");
676                 }
677
678                 /* Latency format */
679                 if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
680                         print_graph_lat_fmt(s, ent);
681         }
682
683         /* No overhead */
684         print_graph_duration(tr, 0, s, flags | FLAGS_FILL_START);
685
686         if (type == TRACE_GRAPH_ENT)
687                 trace_seq_puts(s, "==========>");
688         else
689                 trace_seq_puts(s, "<==========");
690
691         print_graph_duration(tr, 0, s, flags | FLAGS_FILL_END);
692         trace_seq_putc(s, '\n');
693 }
694
695 void
696 trace_print_graph_duration(unsigned long long duration, struct trace_seq *s)
697 {
698         unsigned long nsecs_rem = do_div(duration, 1000);
699         /* log10(ULONG_MAX) + '\0' */
700         char usecs_str[21];
701         char nsecs_str[5];
702         int len;
703         int i;
704
705         sprintf(usecs_str, "%lu", (unsigned long) duration);
706
707         /* Print msecs */
708         trace_seq_printf(s, "%s", usecs_str);
709
710         len = strlen(usecs_str);
711
712         /* Print nsecs (we don't want to exceed 7 numbers) */
713         if (len < 7) {
714                 size_t slen = min_t(size_t, sizeof(nsecs_str), 8UL - len);
715
716                 snprintf(nsecs_str, slen, "%03lu", nsecs_rem);
717                 trace_seq_printf(s, ".%s", nsecs_str);
718                 len += strlen(nsecs_str) + 1;
719         }
720
721         trace_seq_puts(s, " us ");
722
723         /* Print remaining spaces to fit the row's width */
724         for (i = len; i < 8; i++)
725                 trace_seq_putc(s, ' ');
726 }
727
728 static void
729 print_graph_duration(struct trace_array *tr, unsigned long long duration,
730                      struct trace_seq *s, u32 flags)
731 {
732         if (!(flags & TRACE_GRAPH_PRINT_DURATION) ||
733             !(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
734                 return;
735
736         /* No real adata, just filling the column with spaces */
737         switch (flags & TRACE_GRAPH_PRINT_FILL_MASK) {
738         case FLAGS_FILL_FULL:
739                 trace_seq_puts(s, "              |  ");
740                 return;
741         case FLAGS_FILL_START:
742                 trace_seq_puts(s, "  ");
743                 return;
744         case FLAGS_FILL_END:
745                 trace_seq_puts(s, " |");
746                 return;
747         }
748
749         /* Signal a overhead of time execution to the output */
750         if (flags & TRACE_GRAPH_PRINT_OVERHEAD)
751                 trace_seq_printf(s, "%c ", trace_find_mark(duration));
752         else
753                 trace_seq_puts(s, "  ");
754
755         trace_print_graph_duration(duration, s);
756         trace_seq_puts(s, "|  ");
757 }
758
759 /* Case of a leaf function on its call entry */
760 static enum print_line_t
761 print_graph_entry_leaf(struct trace_iterator *iter,
762                 struct ftrace_graph_ent_entry *entry,
763                 struct ftrace_graph_ret_entry *ret_entry,
764                 struct trace_seq *s, u32 flags)
765 {
766         struct fgraph_data *data = iter->private;
767         struct trace_array *tr = iter->tr;
768         struct ftrace_graph_ret *graph_ret;
769         struct ftrace_graph_ent *call;
770         unsigned long long duration;
771         int i;
772
773         graph_ret = &ret_entry->ret;
774         call = &entry->graph_ent;
775         duration = graph_ret->rettime - graph_ret->calltime;
776
777         if (data) {
778                 struct fgraph_cpu_data *cpu_data;
779                 int cpu = iter->cpu;
780
781                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
782
783                 /*
784                  * Comments display at + 1 to depth. Since
785                  * this is a leaf function, keep the comments
786                  * equal to this depth.
787                  */
788                 cpu_data->depth = call->depth - 1;
789
790                 /* No need to keep this function around for this depth */
791                 if (call->depth < FTRACE_RETFUNC_DEPTH)
792                         cpu_data->enter_funcs[call->depth] = 0;
793         }
794
795         /* Overhead and duration */
796         print_graph_duration(tr, duration, s, flags);
797
798         /* Function */
799         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
800                 trace_seq_putc(s, ' ');
801
802         trace_seq_printf(s, "%ps();\n", (void *)call->func);
803
804         return trace_handle_return(s);
805 }
806
807 static enum print_line_t
808 print_graph_entry_nested(struct trace_iterator *iter,
809                          struct ftrace_graph_ent_entry *entry,
810                          struct trace_seq *s, int cpu, u32 flags)
811 {
812         struct ftrace_graph_ent *call = &entry->graph_ent;
813         struct fgraph_data *data = iter->private;
814         struct trace_array *tr = iter->tr;
815         int i;
816
817         if (data) {
818                 struct fgraph_cpu_data *cpu_data;
819                 int cpu = iter->cpu;
820
821                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
822                 cpu_data->depth = call->depth;
823
824                 /* Save this function pointer to see if the exit matches */
825                 if (call->depth < FTRACE_RETFUNC_DEPTH)
826                         cpu_data->enter_funcs[call->depth] = call->func;
827         }
828
829         /* No time */
830         print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
831
832         /* Function */
833         for (i = 0; i < call->depth * TRACE_GRAPH_INDENT; i++)
834                 trace_seq_putc(s, ' ');
835
836         trace_seq_printf(s, "%ps() {\n", (void *)call->func);
837
838         if (trace_seq_has_overflowed(s))
839                 return TRACE_TYPE_PARTIAL_LINE;
840
841         /*
842          * we already consumed the current entry to check the next one
843          * and see if this is a leaf.
844          */
845         return TRACE_TYPE_NO_CONSUME;
846 }
847
848 static void
849 print_graph_prologue(struct trace_iterator *iter, struct trace_seq *s,
850                      int type, unsigned long addr, u32 flags)
851 {
852         struct fgraph_data *data = iter->private;
853         struct trace_entry *ent = iter->ent;
854         struct trace_array *tr = iter->tr;
855         int cpu = iter->cpu;
856
857         /* Pid */
858         verif_pid(s, ent->pid, cpu, data);
859
860         if (type)
861                 /* Interrupt */
862                 print_graph_irq(iter, addr, type, cpu, ent->pid, flags);
863
864         if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
865                 return;
866
867         /* Absolute time */
868         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
869                 print_graph_abs_time(iter->ts, s);
870
871         /* Cpu */
872         if (flags & TRACE_GRAPH_PRINT_CPU)
873                 print_graph_cpu(s, cpu);
874
875         /* Proc */
876         if (flags & TRACE_GRAPH_PRINT_PROC) {
877                 print_graph_proc(s, ent->pid);
878                 trace_seq_puts(s, " | ");
879         }
880
881         /* Latency format */
882         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT)
883                 print_graph_lat_fmt(s, ent);
884
885         return;
886 }
887
888 /*
889  * Entry check for irq code
890  *
891  * returns 1 if
892  *  - we are inside irq code
893  *  - we just entered irq code
894  *
895  * retunns 0 if
896  *  - funcgraph-interrupts option is set
897  *  - we are not inside irq code
898  */
899 static int
900 check_irq_entry(struct trace_iterator *iter, u32 flags,
901                 unsigned long addr, int depth)
902 {
903         int cpu = iter->cpu;
904         int *depth_irq;
905         struct fgraph_data *data = iter->private;
906
907         /*
908          * If we are either displaying irqs, or we got called as
909          * a graph event and private data does not exist,
910          * then we bypass the irq check.
911          */
912         if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
913             (!data))
914                 return 0;
915
916         depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
917
918         /*
919          * We are inside the irq code
920          */
921         if (*depth_irq >= 0)
922                 return 1;
923
924         if ((addr < (unsigned long)__irqentry_text_start) ||
925             (addr >= (unsigned long)__irqentry_text_end))
926                 return 0;
927
928         /*
929          * We are entering irq code.
930          */
931         *depth_irq = depth;
932         return 1;
933 }
934
935 /*
936  * Return check for irq code
937  *
938  * returns 1 if
939  *  - we are inside irq code
940  *  - we just left irq code
941  *
942  * returns 0 if
943  *  - funcgraph-interrupts option is set
944  *  - we are not inside irq code
945  */
946 static int
947 check_irq_return(struct trace_iterator *iter, u32 flags, int depth)
948 {
949         int cpu = iter->cpu;
950         int *depth_irq;
951         struct fgraph_data *data = iter->private;
952
953         /*
954          * If we are either displaying irqs, or we got called as
955          * a graph event and private data does not exist,
956          * then we bypass the irq check.
957          */
958         if ((flags & TRACE_GRAPH_PRINT_IRQS) ||
959             (!data))
960                 return 0;
961
962         depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
963
964         /*
965          * We are not inside the irq code.
966          */
967         if (*depth_irq == -1)
968                 return 0;
969
970         /*
971          * We are inside the irq code, and this is returning entry.
972          * Let's not trace it and clear the entry depth, since
973          * we are out of irq code.
974          *
975          * This condition ensures that we 'leave the irq code' once
976          * we are out of the entry depth. Thus protecting us from
977          * the RETURN entry loss.
978          */
979         if (*depth_irq >= depth) {
980                 *depth_irq = -1;
981                 return 1;
982         }
983
984         /*
985          * We are inside the irq code, and this is not the entry.
986          */
987         return 1;
988 }
989
990 static enum print_line_t
991 print_graph_entry(struct ftrace_graph_ent_entry *field, struct trace_seq *s,
992                         struct trace_iterator *iter, u32 flags)
993 {
994         struct fgraph_data *data = iter->private;
995         struct ftrace_graph_ent *call = &field->graph_ent;
996         struct ftrace_graph_ret_entry *leaf_ret;
997         static enum print_line_t ret;
998         int cpu = iter->cpu;
999
1000         if (check_irq_entry(iter, flags, call->func, call->depth))
1001                 return TRACE_TYPE_HANDLED;
1002
1003         print_graph_prologue(iter, s, TRACE_GRAPH_ENT, call->func, flags);
1004
1005         leaf_ret = get_return_for_leaf(iter, field);
1006         if (leaf_ret)
1007                 ret = print_graph_entry_leaf(iter, field, leaf_ret, s, flags);
1008         else
1009                 ret = print_graph_entry_nested(iter, field, s, cpu, flags);
1010
1011         if (data) {
1012                 /*
1013                  * If we failed to write our output, then we need to make
1014                  * note of it. Because we already consumed our entry.
1015                  */
1016                 if (s->full) {
1017                         data->failed = 1;
1018                         data->cpu = cpu;
1019                 } else
1020                         data->failed = 0;
1021         }
1022
1023         return ret;
1024 }
1025
1026 static enum print_line_t
1027 print_graph_return(struct ftrace_graph_ret *trace, struct trace_seq *s,
1028                    struct trace_entry *ent, struct trace_iterator *iter,
1029                    u32 flags)
1030 {
1031         unsigned long long duration = trace->rettime - trace->calltime;
1032         struct fgraph_data *data = iter->private;
1033         struct trace_array *tr = iter->tr;
1034         pid_t pid = ent->pid;
1035         int cpu = iter->cpu;
1036         int func_match = 1;
1037         int i;
1038
1039         if (check_irq_return(iter, flags, trace->depth))
1040                 return TRACE_TYPE_HANDLED;
1041
1042         if (data) {
1043                 struct fgraph_cpu_data *cpu_data;
1044                 int cpu = iter->cpu;
1045
1046                 cpu_data = per_cpu_ptr(data->cpu_data, cpu);
1047
1048                 /*
1049                  * Comments display at + 1 to depth. This is the
1050                  * return from a function, we now want the comments
1051                  * to display at the same level of the bracket.
1052                  */
1053                 cpu_data->depth = trace->depth - 1;
1054
1055                 if (trace->depth < FTRACE_RETFUNC_DEPTH) {
1056                         if (cpu_data->enter_funcs[trace->depth] != trace->func)
1057                                 func_match = 0;
1058                         cpu_data->enter_funcs[trace->depth] = 0;
1059                 }
1060         }
1061
1062         print_graph_prologue(iter, s, 0, 0, flags);
1063
1064         /* Overhead and duration */
1065         print_graph_duration(tr, duration, s, flags);
1066
1067         /* Closing brace */
1068         for (i = 0; i < trace->depth * TRACE_GRAPH_INDENT; i++)
1069                 trace_seq_putc(s, ' ');
1070
1071         /*
1072          * If the return function does not have a matching entry,
1073          * then the entry was lost. Instead of just printing
1074          * the '}' and letting the user guess what function this
1075          * belongs to, write out the function name. Always do
1076          * that if the funcgraph-tail option is enabled.
1077          */
1078         if (func_match && !(flags & TRACE_GRAPH_PRINT_TAIL))
1079                 trace_seq_puts(s, "}\n");
1080         else
1081                 trace_seq_printf(s, "} /* %ps */\n", (void *)trace->func);
1082
1083         /* Overrun */
1084         if (flags & TRACE_GRAPH_PRINT_OVERRUN)
1085                 trace_seq_printf(s, " (Overruns: %lu)\n",
1086                                  trace->overrun);
1087
1088         print_graph_irq(iter, trace->func, TRACE_GRAPH_RET,
1089                         cpu, pid, flags);
1090
1091         return trace_handle_return(s);
1092 }
1093
1094 static enum print_line_t
1095 print_graph_comment(struct trace_seq *s, struct trace_entry *ent,
1096                     struct trace_iterator *iter, u32 flags)
1097 {
1098         struct trace_array *tr = iter->tr;
1099         unsigned long sym_flags = (tr->trace_flags & TRACE_ITER_SYM_MASK);
1100         struct fgraph_data *data = iter->private;
1101         struct trace_event *event;
1102         int depth = 0;
1103         int ret;
1104         int i;
1105
1106         if (data)
1107                 depth = per_cpu_ptr(data->cpu_data, iter->cpu)->depth;
1108
1109         print_graph_prologue(iter, s, 0, 0, flags);
1110
1111         /* No time */
1112         print_graph_duration(tr, 0, s, flags | FLAGS_FILL_FULL);
1113
1114         /* Indentation */
1115         if (depth > 0)
1116                 for (i = 0; i < (depth + 1) * TRACE_GRAPH_INDENT; i++)
1117                         trace_seq_putc(s, ' ');
1118
1119         /* The comment */
1120         trace_seq_puts(s, "/* ");
1121
1122         switch (iter->ent->type) {
1123         case TRACE_BPRINT:
1124                 ret = trace_print_bprintk_msg_only(iter);
1125                 if (ret != TRACE_TYPE_HANDLED)
1126                         return ret;
1127                 break;
1128         case TRACE_PRINT:
1129                 ret = trace_print_printk_msg_only(iter);
1130                 if (ret != TRACE_TYPE_HANDLED)
1131                         return ret;
1132                 break;
1133         default:
1134                 event = ftrace_find_event(ent->type);
1135                 if (!event)
1136                         return TRACE_TYPE_UNHANDLED;
1137
1138                 ret = event->funcs->trace(iter, sym_flags, event);
1139                 if (ret != TRACE_TYPE_HANDLED)
1140                         return ret;
1141         }
1142
1143         if (trace_seq_has_overflowed(s))
1144                 goto out;
1145
1146         /* Strip ending newline */
1147         if (s->buffer[s->seq.len - 1] == '\n') {
1148                 s->buffer[s->seq.len - 1] = '\0';
1149                 s->seq.len--;
1150         }
1151
1152         trace_seq_puts(s, " */\n");
1153  out:
1154         return trace_handle_return(s);
1155 }
1156
1157
1158 enum print_line_t
1159 print_graph_function_flags(struct trace_iterator *iter, u32 flags)
1160 {
1161         struct ftrace_graph_ent_entry *field;
1162         struct fgraph_data *data = iter->private;
1163         struct trace_entry *entry = iter->ent;
1164         struct trace_seq *s = &iter->seq;
1165         int cpu = iter->cpu;
1166         int ret;
1167
1168         if (data && per_cpu_ptr(data->cpu_data, cpu)->ignore) {
1169                 per_cpu_ptr(data->cpu_data, cpu)->ignore = 0;
1170                 return TRACE_TYPE_HANDLED;
1171         }
1172
1173         /*
1174          * If the last output failed, there's a possibility we need
1175          * to print out the missing entry which would never go out.
1176          */
1177         if (data && data->failed) {
1178                 field = &data->ent;
1179                 iter->cpu = data->cpu;
1180                 ret = print_graph_entry(field, s, iter, flags);
1181                 if (ret == TRACE_TYPE_HANDLED && iter->cpu != cpu) {
1182                         per_cpu_ptr(data->cpu_data, iter->cpu)->ignore = 1;
1183                         ret = TRACE_TYPE_NO_CONSUME;
1184                 }
1185                 iter->cpu = cpu;
1186                 return ret;
1187         }
1188
1189         switch (entry->type) {
1190         case TRACE_GRAPH_ENT: {
1191                 /*
1192                  * print_graph_entry() may consume the current event,
1193                  * thus @field may become invalid, so we need to save it.
1194                  * sizeof(struct ftrace_graph_ent_entry) is very small,
1195                  * it can be safely saved at the stack.
1196                  */
1197                 struct ftrace_graph_ent_entry saved;
1198                 trace_assign_type(field, entry);
1199                 saved = *field;
1200                 return print_graph_entry(&saved, s, iter, flags);
1201         }
1202         case TRACE_GRAPH_RET: {
1203                 struct ftrace_graph_ret_entry *field;
1204                 trace_assign_type(field, entry);
1205                 return print_graph_return(&field->ret, s, entry, iter, flags);
1206         }
1207         case TRACE_STACK:
1208         case TRACE_FN:
1209                 /* dont trace stack and functions as comments */
1210                 return TRACE_TYPE_UNHANDLED;
1211
1212         default:
1213                 return print_graph_comment(s, entry, iter, flags);
1214         }
1215
1216         return TRACE_TYPE_HANDLED;
1217 }
1218
1219 static enum print_line_t
1220 print_graph_function(struct trace_iterator *iter)
1221 {
1222         return print_graph_function_flags(iter, tracer_flags.val);
1223 }
1224
1225 static enum print_line_t
1226 print_graph_function_event(struct trace_iterator *iter, int flags,
1227                            struct trace_event *event)
1228 {
1229         return print_graph_function(iter);
1230 }
1231
1232 static void print_lat_header(struct seq_file *s, u32 flags)
1233 {
1234         static const char spaces[] = "                " /* 16 spaces */
1235                 "    "                                  /* 4 spaces */
1236                 "                 ";                    /* 17 spaces */
1237         int size = 0;
1238
1239         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1240                 size += 16;
1241         if (flags & TRACE_GRAPH_PRINT_CPU)
1242                 size += 4;
1243         if (flags & TRACE_GRAPH_PRINT_PROC)
1244                 size += 17;
1245
1246         seq_printf(s, "#%.*s  _-----=> irqs-off        \n", size, spaces);
1247         seq_printf(s, "#%.*s / _----=> need-resched    \n", size, spaces);
1248         seq_printf(s, "#%.*s| / _---=> hardirq/softirq \n", size, spaces);
1249         seq_printf(s, "#%.*s|| / _--=> preempt-depth   \n", size, spaces);
1250         seq_printf(s, "#%.*s||| /                      \n", size, spaces);
1251 }
1252
1253 static void __print_graph_headers_flags(struct trace_array *tr,
1254                                         struct seq_file *s, u32 flags)
1255 {
1256         int lat = tr->trace_flags & TRACE_ITER_LATENCY_FMT;
1257
1258         if (lat)
1259                 print_lat_header(s, flags);
1260
1261         /* 1st line */
1262         seq_putc(s, '#');
1263         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1264                 seq_puts(s, "     TIME       ");
1265         if (flags & TRACE_GRAPH_PRINT_CPU)
1266                 seq_puts(s, " CPU");
1267         if (flags & TRACE_GRAPH_PRINT_PROC)
1268                 seq_puts(s, "  TASK/PID       ");
1269         if (lat)
1270                 seq_puts(s, "||||");
1271         if (flags & TRACE_GRAPH_PRINT_DURATION)
1272                 seq_puts(s, "  DURATION   ");
1273         seq_puts(s, "               FUNCTION CALLS\n");
1274
1275         /* 2nd line */
1276         seq_putc(s, '#');
1277         if (flags & TRACE_GRAPH_PRINT_ABS_TIME)
1278                 seq_puts(s, "      |         ");
1279         if (flags & TRACE_GRAPH_PRINT_CPU)
1280                 seq_puts(s, " |  ");
1281         if (flags & TRACE_GRAPH_PRINT_PROC)
1282                 seq_puts(s, "   |    |        ");
1283         if (lat)
1284                 seq_puts(s, "||||");
1285         if (flags & TRACE_GRAPH_PRINT_DURATION)
1286                 seq_puts(s, "   |   |      ");
1287         seq_puts(s, "               |   |   |   |\n");
1288 }
1289
1290 static void print_graph_headers(struct seq_file *s)
1291 {
1292         print_graph_headers_flags(s, tracer_flags.val);
1293 }
1294
1295 void print_graph_headers_flags(struct seq_file *s, u32 flags)
1296 {
1297         struct trace_iterator *iter = s->private;
1298         struct trace_array *tr = iter->tr;
1299
1300         if (!(tr->trace_flags & TRACE_ITER_CONTEXT_INFO))
1301                 return;
1302
1303         if (tr->trace_flags & TRACE_ITER_LATENCY_FMT) {
1304                 /* print nothing if the buffers are empty */
1305                 if (trace_empty(iter))
1306                         return;
1307
1308                 print_trace_header(s, iter);
1309         }
1310
1311         __print_graph_headers_flags(tr, s, flags);
1312 }
1313
1314 void graph_trace_open(struct trace_iterator *iter)
1315 {
1316         /* pid and depth on the last trace processed */
1317         struct fgraph_data *data;
1318         gfp_t gfpflags;
1319         int cpu;
1320
1321         iter->private = NULL;
1322
1323         /* We can be called in atomic context via ftrace_dump() */
1324         gfpflags = (in_atomic() || irqs_disabled()) ? GFP_ATOMIC : GFP_KERNEL;
1325
1326         data = kzalloc(sizeof(*data), gfpflags);
1327         if (!data)
1328                 goto out_err;
1329
1330         data->cpu_data = alloc_percpu_gfp(struct fgraph_cpu_data, gfpflags);
1331         if (!data->cpu_data)
1332                 goto out_err_free;
1333
1334         for_each_possible_cpu(cpu) {
1335                 pid_t *pid = &(per_cpu_ptr(data->cpu_data, cpu)->last_pid);
1336                 int *depth = &(per_cpu_ptr(data->cpu_data, cpu)->depth);
1337                 int *ignore = &(per_cpu_ptr(data->cpu_data, cpu)->ignore);
1338                 int *depth_irq = &(per_cpu_ptr(data->cpu_data, cpu)->depth_irq);
1339
1340                 *pid = -1;
1341                 *depth = 0;
1342                 *ignore = 0;
1343                 *depth_irq = -1;
1344         }
1345
1346         iter->private = data;
1347
1348         return;
1349
1350  out_err_free:
1351         kfree(data);
1352  out_err:
1353         pr_warn("function graph tracer: not enough memory\n");
1354 }
1355
1356 void graph_trace_close(struct trace_iterator *iter)
1357 {
1358         struct fgraph_data *data = iter->private;
1359
1360         if (data) {
1361                 free_percpu(data->cpu_data);
1362                 kfree(data);
1363         }
1364 }
1365
1366 static int
1367 func_graph_set_flag(struct trace_array *tr, u32 old_flags, u32 bit, int set)
1368 {
1369         if (bit == TRACE_GRAPH_PRINT_IRQS)
1370                 ftrace_graph_skip_irqs = !set;
1371
1372         if (bit == TRACE_GRAPH_SLEEP_TIME)
1373                 ftrace_graph_sleep_time_control(set);
1374
1375         if (bit == TRACE_GRAPH_GRAPH_TIME)
1376                 ftrace_graph_graph_time_control(set);
1377
1378         return 0;
1379 }
1380
1381 static struct trace_event_functions graph_functions = {
1382         .trace          = print_graph_function_event,
1383 };
1384
1385 static struct trace_event graph_trace_entry_event = {
1386         .type           = TRACE_GRAPH_ENT,
1387         .funcs          = &graph_functions,
1388 };
1389
1390 static struct trace_event graph_trace_ret_event = {
1391         .type           = TRACE_GRAPH_RET,
1392         .funcs          = &graph_functions
1393 };
1394
1395 static struct tracer graph_trace __tracer_data = {
1396         .name           = "function_graph",
1397         .update_thresh  = graph_trace_update_thresh,
1398         .open           = graph_trace_open,
1399         .pipe_open      = graph_trace_open,
1400         .close          = graph_trace_close,
1401         .pipe_close     = graph_trace_close,
1402         .init           = graph_trace_init,
1403         .reset          = graph_trace_reset,
1404         .print_line     = print_graph_function,
1405         .print_header   = print_graph_headers,
1406         .flags          = &tracer_flags,
1407         .set_flag       = func_graph_set_flag,
1408 #ifdef CONFIG_FTRACE_SELFTEST
1409         .selftest       = trace_selftest_startup_function_graph,
1410 #endif
1411 };
1412
1413
1414 static ssize_t
1415 graph_depth_write(struct file *filp, const char __user *ubuf, size_t cnt,
1416                   loff_t *ppos)
1417 {
1418         unsigned long val;
1419         int ret;
1420
1421         ret = kstrtoul_from_user(ubuf, cnt, 10, &val);
1422         if (ret)
1423                 return ret;
1424
1425         max_depth = val;
1426
1427         *ppos += cnt;
1428
1429         return cnt;
1430 }
1431
1432 static ssize_t
1433 graph_depth_read(struct file *filp, char __user *ubuf, size_t cnt,
1434                  loff_t *ppos)
1435 {
1436         char buf[15]; /* More than enough to hold UINT_MAX + "\n"*/
1437         int n;
1438
1439         n = sprintf(buf, "%d\n", max_depth);
1440
1441         return simple_read_from_buffer(ubuf, cnt, ppos, buf, n);
1442 }
1443
1444 static const struct file_operations graph_depth_fops = {
1445         .open           = tracing_open_generic,
1446         .write          = graph_depth_write,
1447         .read           = graph_depth_read,
1448         .llseek         = generic_file_llseek,
1449 };
1450
1451 static __init int init_graph_tracefs(void)
1452 {
1453         struct dentry *d_tracer;
1454
1455         d_tracer = tracing_init_dentry();
1456         if (IS_ERR(d_tracer))
1457                 return 0;
1458
1459         trace_create_file("max_graph_depth", 0644, d_tracer,
1460                           NULL, &graph_depth_fops);
1461
1462         return 0;
1463 }
1464 fs_initcall(init_graph_tracefs);
1465
1466 static __init int init_graph_trace(void)
1467 {
1468         max_bytes_for_cpu = snprintf(NULL, 0, "%d", nr_cpu_ids - 1);
1469
1470         if (!register_trace_event(&graph_trace_entry_event)) {
1471                 pr_warn("Warning: could not register graph trace events\n");
1472                 return 1;
1473         }
1474
1475         if (!register_trace_event(&graph_trace_ret_event)) {
1476                 pr_warn("Warning: could not register graph trace events\n");
1477                 return 1;
1478         }
1479
1480         return register_tracer(&graph_trace);
1481 }
1482
1483 core_initcall(init_graph_trace);