ftrace: Add ops parameter to ftrace_startup/shutdown functions
[cascardo/linux.git] / kernel / trace / ftrace.c
1 /*
2  * Infrastructure for profiling code inserted by 'gcc -pg'.
3  *
4  * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
5  * Copyright (C) 2004-2008 Ingo Molnar <mingo@redhat.com>
6  *
7  * Originally ported from the -rt patch by:
8  *   Copyright (C) 2007 Arnaldo Carvalho de Melo <acme@redhat.com>
9  *
10  * Based on code in the latency_tracer, that is:
11  *
12  *  Copyright (C) 2004-2006 Ingo Molnar
13  *  Copyright (C) 2004 William Lee Irwin III
14  */
15
16 #include <linux/stop_machine.h>
17 #include <linux/clocksource.h>
18 #include <linux/kallsyms.h>
19 #include <linux/seq_file.h>
20 #include <linux/suspend.h>
21 #include <linux/debugfs.h>
22 #include <linux/hardirq.h>
23 #include <linux/kthread.h>
24 #include <linux/uaccess.h>
25 #include <linux/ftrace.h>
26 #include <linux/sysctl.h>
27 #include <linux/slab.h>
28 #include <linux/ctype.h>
29 #include <linux/list.h>
30 #include <linux/hash.h>
31 #include <linux/rcupdate.h>
32
33 #include <trace/events/sched.h>
34
35 #include <asm/ftrace.h>
36 #include <asm/setup.h>
37
38 #include "trace_output.h"
39 #include "trace_stat.h"
40
41 #define FTRACE_WARN_ON(cond)                    \
42         ({                                      \
43                 int ___r = cond;                \
44                 if (WARN_ON(___r))              \
45                         ftrace_kill();          \
46                 ___r;                           \
47         })
48
49 #define FTRACE_WARN_ON_ONCE(cond)               \
50         ({                                      \
51                 int ___r = cond;                \
52                 if (WARN_ON_ONCE(___r))         \
53                         ftrace_kill();          \
54                 ___r;                           \
55         })
56
57 /* hash bits for specific function selection */
58 #define FTRACE_HASH_BITS 7
59 #define FTRACE_FUNC_HASHSIZE (1 << FTRACE_HASH_BITS)
60 #define FTRACE_HASH_DEFAULT_BITS 10
61 #define FTRACE_HASH_MAX_BITS 12
62
63 /* ftrace_enabled is a method to turn ftrace on or off */
64 int ftrace_enabled __read_mostly;
65 static int last_ftrace_enabled;
66
67 /* Quick disabling of function tracer. */
68 int function_trace_stop;
69
70 /* List for set_ftrace_pid's pids. */
71 LIST_HEAD(ftrace_pids);
72 struct ftrace_pid {
73         struct list_head list;
74         struct pid *pid;
75 };
76
77 /*
78  * ftrace_disabled is set when an anomaly is discovered.
79  * ftrace_disabled is much stronger than ftrace_enabled.
80  */
81 static int ftrace_disabled __read_mostly;
82
83 static DEFINE_MUTEX(ftrace_lock);
84
85 static struct ftrace_ops ftrace_list_end __read_mostly =
86 {
87         .func           = ftrace_stub,
88 };
89
90 static struct ftrace_ops *ftrace_list __read_mostly = &ftrace_list_end;
91 ftrace_func_t ftrace_trace_function __read_mostly = ftrace_stub;
92 ftrace_func_t __ftrace_trace_function __read_mostly = ftrace_stub;
93 ftrace_func_t ftrace_pid_function __read_mostly = ftrace_stub;
94
95 /*
96  * Traverse the ftrace_list, invoking all entries.  The reason that we
97  * can use rcu_dereference_raw() is that elements removed from this list
98  * are simply leaked, so there is no need to interact with a grace-period
99  * mechanism.  The rcu_dereference_raw() calls are needed to handle
100  * concurrent insertions into the ftrace_list.
101  *
102  * Silly Alpha and silly pointer-speculation compiler optimizations!
103  */
104 static void ftrace_list_func(unsigned long ip, unsigned long parent_ip)
105 {
106         struct ftrace_ops *op = rcu_dereference_raw(ftrace_list); /*see above*/
107
108         while (op != &ftrace_list_end) {
109                 op->func(ip, parent_ip);
110                 op = rcu_dereference_raw(op->next); /*see above*/
111         };
112 }
113
114 static void ftrace_pid_func(unsigned long ip, unsigned long parent_ip)
115 {
116         if (!test_tsk_trace_trace(current))
117                 return;
118
119         ftrace_pid_function(ip, parent_ip);
120 }
121
122 static void set_ftrace_pid_function(ftrace_func_t func)
123 {
124         /* do not set ftrace_pid_function to itself! */
125         if (func != ftrace_pid_func)
126                 ftrace_pid_function = func;
127 }
128
129 /**
130  * clear_ftrace_function - reset the ftrace function
131  *
132  * This NULLs the ftrace function and in essence stops
133  * tracing.  There may be lag
134  */
135 void clear_ftrace_function(void)
136 {
137         ftrace_trace_function = ftrace_stub;
138         __ftrace_trace_function = ftrace_stub;
139         ftrace_pid_function = ftrace_stub;
140 }
141
142 #ifndef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
143 /*
144  * For those archs that do not test ftrace_trace_stop in their
145  * mcount call site, we need to do it from C.
146  */
147 static void ftrace_test_stop_func(unsigned long ip, unsigned long parent_ip)
148 {
149         if (function_trace_stop)
150                 return;
151
152         __ftrace_trace_function(ip, parent_ip);
153 }
154 #endif
155
156 static void update_ftrace_function(void)
157 {
158         ftrace_func_t func;
159
160         /*
161          * If there's only one function registered, then call that
162          * function directly. Otherwise, we need to iterate over the
163          * registered callers.
164          */
165         if (ftrace_list == &ftrace_list_end ||
166             ftrace_list->next == &ftrace_list_end)
167                 func = ftrace_list->func;
168         else
169                 func = ftrace_list_func;
170
171         /* If we filter on pids, update to use the pid function */
172         if (!list_empty(&ftrace_pids)) {
173                 set_ftrace_pid_function(func);
174                 func = ftrace_pid_func;
175         }
176 #ifdef CONFIG_HAVE_FUNCTION_TRACE_MCOUNT_TEST
177         ftrace_trace_function = func;
178 #else
179         __ftrace_trace_function = func;
180         ftrace_trace_function = ftrace_test_stop_func;
181 #endif
182 }
183
184 static int __register_ftrace_function(struct ftrace_ops *ops)
185 {
186         ops->next = ftrace_list;
187         /*
188          * We are entering ops into the ftrace_list but another
189          * CPU might be walking that list. We need to make sure
190          * the ops->next pointer is valid before another CPU sees
191          * the ops pointer included into the ftrace_list.
192          */
193         rcu_assign_pointer(ftrace_list, ops);
194
195         if (ftrace_enabled)
196                 update_ftrace_function();
197
198         return 0;
199 }
200
201 static int __unregister_ftrace_function(struct ftrace_ops *ops)
202 {
203         struct ftrace_ops **p;
204
205         /*
206          * If we are removing the last function, then simply point
207          * to the ftrace_stub.
208          */
209         if (ftrace_list == ops && ops->next == &ftrace_list_end) {
210                 ftrace_trace_function = ftrace_stub;
211                 ftrace_list = &ftrace_list_end;
212                 return 0;
213         }
214
215         for (p = &ftrace_list; *p != &ftrace_list_end; p = &(*p)->next)
216                 if (*p == ops)
217                         break;
218
219         if (*p != ops)
220                 return -1;
221
222         *p = (*p)->next;
223
224         if (ftrace_enabled)
225                 update_ftrace_function();
226
227         return 0;
228 }
229
230 static void ftrace_update_pid_func(void)
231 {
232         /* Only do something if we are tracing something */
233         if (ftrace_trace_function == ftrace_stub)
234                 return;
235
236         update_ftrace_function();
237 }
238
239 #ifdef CONFIG_FUNCTION_PROFILER
240 struct ftrace_profile {
241         struct hlist_node               node;
242         unsigned long                   ip;
243         unsigned long                   counter;
244 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
245         unsigned long long              time;
246         unsigned long long              time_squared;
247 #endif
248 };
249
250 struct ftrace_profile_page {
251         struct ftrace_profile_page      *next;
252         unsigned long                   index;
253         struct ftrace_profile           records[];
254 };
255
256 struct ftrace_profile_stat {
257         atomic_t                        disabled;
258         struct hlist_head               *hash;
259         struct ftrace_profile_page      *pages;
260         struct ftrace_profile_page      *start;
261         struct tracer_stat              stat;
262 };
263
264 #define PROFILE_RECORDS_SIZE                                            \
265         (PAGE_SIZE - offsetof(struct ftrace_profile_page, records))
266
267 #define PROFILES_PER_PAGE                                       \
268         (PROFILE_RECORDS_SIZE / sizeof(struct ftrace_profile))
269
270 static int ftrace_profile_bits __read_mostly;
271 static int ftrace_profile_enabled __read_mostly;
272
273 /* ftrace_profile_lock - synchronize the enable and disable of the profiler */
274 static DEFINE_MUTEX(ftrace_profile_lock);
275
276 static DEFINE_PER_CPU(struct ftrace_profile_stat, ftrace_profile_stats);
277
278 #define FTRACE_PROFILE_HASH_SIZE 1024 /* must be power of 2 */
279
280 static void *
281 function_stat_next(void *v, int idx)
282 {
283         struct ftrace_profile *rec = v;
284         struct ftrace_profile_page *pg;
285
286         pg = (struct ftrace_profile_page *)((unsigned long)rec & PAGE_MASK);
287
288  again:
289         if (idx != 0)
290                 rec++;
291
292         if ((void *)rec >= (void *)&pg->records[pg->index]) {
293                 pg = pg->next;
294                 if (!pg)
295                         return NULL;
296                 rec = &pg->records[0];
297                 if (!rec->counter)
298                         goto again;
299         }
300
301         return rec;
302 }
303
304 static void *function_stat_start(struct tracer_stat *trace)
305 {
306         struct ftrace_profile_stat *stat =
307                 container_of(trace, struct ftrace_profile_stat, stat);
308
309         if (!stat || !stat->start)
310                 return NULL;
311
312         return function_stat_next(&stat->start->records[0], 0);
313 }
314
315 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
316 /* function graph compares on total time */
317 static int function_stat_cmp(void *p1, void *p2)
318 {
319         struct ftrace_profile *a = p1;
320         struct ftrace_profile *b = p2;
321
322         if (a->time < b->time)
323                 return -1;
324         if (a->time > b->time)
325                 return 1;
326         else
327                 return 0;
328 }
329 #else
330 /* not function graph compares against hits */
331 static int function_stat_cmp(void *p1, void *p2)
332 {
333         struct ftrace_profile *a = p1;
334         struct ftrace_profile *b = p2;
335
336         if (a->counter < b->counter)
337                 return -1;
338         if (a->counter > b->counter)
339                 return 1;
340         else
341                 return 0;
342 }
343 #endif
344
345 static int function_stat_headers(struct seq_file *m)
346 {
347 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
348         seq_printf(m, "  Function                               "
349                    "Hit    Time            Avg             s^2\n"
350                       "  --------                               "
351                    "---    ----            ---             ---\n");
352 #else
353         seq_printf(m, "  Function                               Hit\n"
354                       "  --------                               ---\n");
355 #endif
356         return 0;
357 }
358
359 static int function_stat_show(struct seq_file *m, void *v)
360 {
361         struct ftrace_profile *rec = v;
362         char str[KSYM_SYMBOL_LEN];
363         int ret = 0;
364 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
365         static struct trace_seq s;
366         unsigned long long avg;
367         unsigned long long stddev;
368 #endif
369         mutex_lock(&ftrace_profile_lock);
370
371         /* we raced with function_profile_reset() */
372         if (unlikely(rec->counter == 0)) {
373                 ret = -EBUSY;
374                 goto out;
375         }
376
377         kallsyms_lookup(rec->ip, NULL, NULL, NULL, str);
378         seq_printf(m, "  %-30.30s  %10lu", str, rec->counter);
379
380 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
381         seq_printf(m, "    ");
382         avg = rec->time;
383         do_div(avg, rec->counter);
384
385         /* Sample standard deviation (s^2) */
386         if (rec->counter <= 1)
387                 stddev = 0;
388         else {
389                 stddev = rec->time_squared - rec->counter * avg * avg;
390                 /*
391                  * Divide only 1000 for ns^2 -> us^2 conversion.
392                  * trace_print_graph_duration will divide 1000 again.
393                  */
394                 do_div(stddev, (rec->counter - 1) * 1000);
395         }
396
397         trace_seq_init(&s);
398         trace_print_graph_duration(rec->time, &s);
399         trace_seq_puts(&s, "    ");
400         trace_print_graph_duration(avg, &s);
401         trace_seq_puts(&s, "    ");
402         trace_print_graph_duration(stddev, &s);
403         trace_print_seq(m, &s);
404 #endif
405         seq_putc(m, '\n');
406 out:
407         mutex_unlock(&ftrace_profile_lock);
408
409         return ret;
410 }
411
412 static void ftrace_profile_reset(struct ftrace_profile_stat *stat)
413 {
414         struct ftrace_profile_page *pg;
415
416         pg = stat->pages = stat->start;
417
418         while (pg) {
419                 memset(pg->records, 0, PROFILE_RECORDS_SIZE);
420                 pg->index = 0;
421                 pg = pg->next;
422         }
423
424         memset(stat->hash, 0,
425                FTRACE_PROFILE_HASH_SIZE * sizeof(struct hlist_head));
426 }
427
428 int ftrace_profile_pages_init(struct ftrace_profile_stat *stat)
429 {
430         struct ftrace_profile_page *pg;
431         int functions;
432         int pages;
433         int i;
434
435         /* If we already allocated, do nothing */
436         if (stat->pages)
437                 return 0;
438
439         stat->pages = (void *)get_zeroed_page(GFP_KERNEL);
440         if (!stat->pages)
441                 return -ENOMEM;
442
443 #ifdef CONFIG_DYNAMIC_FTRACE
444         functions = ftrace_update_tot_cnt;
445 #else
446         /*
447          * We do not know the number of functions that exist because
448          * dynamic tracing is what counts them. With past experience
449          * we have around 20K functions. That should be more than enough.
450          * It is highly unlikely we will execute every function in
451          * the kernel.
452          */
453         functions = 20000;
454 #endif
455
456         pg = stat->start = stat->pages;
457
458         pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE);
459
460         for (i = 0; i < pages; i++) {
461                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
462                 if (!pg->next)
463                         goto out_free;
464                 pg = pg->next;
465         }
466
467         return 0;
468
469  out_free:
470         pg = stat->start;
471         while (pg) {
472                 unsigned long tmp = (unsigned long)pg;
473
474                 pg = pg->next;
475                 free_page(tmp);
476         }
477
478         free_page((unsigned long)stat->pages);
479         stat->pages = NULL;
480         stat->start = NULL;
481
482         return -ENOMEM;
483 }
484
485 static int ftrace_profile_init_cpu(int cpu)
486 {
487         struct ftrace_profile_stat *stat;
488         int size;
489
490         stat = &per_cpu(ftrace_profile_stats, cpu);
491
492         if (stat->hash) {
493                 /* If the profile is already created, simply reset it */
494                 ftrace_profile_reset(stat);
495                 return 0;
496         }
497
498         /*
499          * We are profiling all functions, but usually only a few thousand
500          * functions are hit. We'll make a hash of 1024 items.
501          */
502         size = FTRACE_PROFILE_HASH_SIZE;
503
504         stat->hash = kzalloc(sizeof(struct hlist_head) * size, GFP_KERNEL);
505
506         if (!stat->hash)
507                 return -ENOMEM;
508
509         if (!ftrace_profile_bits) {
510                 size--;
511
512                 for (; size; size >>= 1)
513                         ftrace_profile_bits++;
514         }
515
516         /* Preallocate the function profiling pages */
517         if (ftrace_profile_pages_init(stat) < 0) {
518                 kfree(stat->hash);
519                 stat->hash = NULL;
520                 return -ENOMEM;
521         }
522
523         return 0;
524 }
525
526 static int ftrace_profile_init(void)
527 {
528         int cpu;
529         int ret = 0;
530
531         for_each_online_cpu(cpu) {
532                 ret = ftrace_profile_init_cpu(cpu);
533                 if (ret)
534                         break;
535         }
536
537         return ret;
538 }
539
540 /* interrupts must be disabled */
541 static struct ftrace_profile *
542 ftrace_find_profiled_func(struct ftrace_profile_stat *stat, unsigned long ip)
543 {
544         struct ftrace_profile *rec;
545         struct hlist_head *hhd;
546         struct hlist_node *n;
547         unsigned long key;
548
549         key = hash_long(ip, ftrace_profile_bits);
550         hhd = &stat->hash[key];
551
552         if (hlist_empty(hhd))
553                 return NULL;
554
555         hlist_for_each_entry_rcu(rec, n, hhd, node) {
556                 if (rec->ip == ip)
557                         return rec;
558         }
559
560         return NULL;
561 }
562
563 static void ftrace_add_profile(struct ftrace_profile_stat *stat,
564                                struct ftrace_profile *rec)
565 {
566         unsigned long key;
567
568         key = hash_long(rec->ip, ftrace_profile_bits);
569         hlist_add_head_rcu(&rec->node, &stat->hash[key]);
570 }
571
572 /*
573  * The memory is already allocated, this simply finds a new record to use.
574  */
575 static struct ftrace_profile *
576 ftrace_profile_alloc(struct ftrace_profile_stat *stat, unsigned long ip)
577 {
578         struct ftrace_profile *rec = NULL;
579
580         /* prevent recursion (from NMIs) */
581         if (atomic_inc_return(&stat->disabled) != 1)
582                 goto out;
583
584         /*
585          * Try to find the function again since an NMI
586          * could have added it
587          */
588         rec = ftrace_find_profiled_func(stat, ip);
589         if (rec)
590                 goto out;
591
592         if (stat->pages->index == PROFILES_PER_PAGE) {
593                 if (!stat->pages->next)
594                         goto out;
595                 stat->pages = stat->pages->next;
596         }
597
598         rec = &stat->pages->records[stat->pages->index++];
599         rec->ip = ip;
600         ftrace_add_profile(stat, rec);
601
602  out:
603         atomic_dec(&stat->disabled);
604
605         return rec;
606 }
607
608 static void
609 function_profile_call(unsigned long ip, unsigned long parent_ip)
610 {
611         struct ftrace_profile_stat *stat;
612         struct ftrace_profile *rec;
613         unsigned long flags;
614
615         if (!ftrace_profile_enabled)
616                 return;
617
618         local_irq_save(flags);
619
620         stat = &__get_cpu_var(ftrace_profile_stats);
621         if (!stat->hash || !ftrace_profile_enabled)
622                 goto out;
623
624         rec = ftrace_find_profiled_func(stat, ip);
625         if (!rec) {
626                 rec = ftrace_profile_alloc(stat, ip);
627                 if (!rec)
628                         goto out;
629         }
630
631         rec->counter++;
632  out:
633         local_irq_restore(flags);
634 }
635
636 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
637 static int profile_graph_entry(struct ftrace_graph_ent *trace)
638 {
639         function_profile_call(trace->func, 0);
640         return 1;
641 }
642
643 static void profile_graph_return(struct ftrace_graph_ret *trace)
644 {
645         struct ftrace_profile_stat *stat;
646         unsigned long long calltime;
647         struct ftrace_profile *rec;
648         unsigned long flags;
649
650         local_irq_save(flags);
651         stat = &__get_cpu_var(ftrace_profile_stats);
652         if (!stat->hash || !ftrace_profile_enabled)
653                 goto out;
654
655         /* If the calltime was zero'd ignore it */
656         if (!trace->calltime)
657                 goto out;
658
659         calltime = trace->rettime - trace->calltime;
660
661         if (!(trace_flags & TRACE_ITER_GRAPH_TIME)) {
662                 int index;
663
664                 index = trace->depth;
665
666                 /* Append this call time to the parent time to subtract */
667                 if (index)
668                         current->ret_stack[index - 1].subtime += calltime;
669
670                 if (current->ret_stack[index].subtime < calltime)
671                         calltime -= current->ret_stack[index].subtime;
672                 else
673                         calltime = 0;
674         }
675
676         rec = ftrace_find_profiled_func(stat, trace->func);
677         if (rec) {
678                 rec->time += calltime;
679                 rec->time_squared += calltime * calltime;
680         }
681
682  out:
683         local_irq_restore(flags);
684 }
685
686 static int register_ftrace_profiler(void)
687 {
688         return register_ftrace_graph(&profile_graph_return,
689                                      &profile_graph_entry);
690 }
691
692 static void unregister_ftrace_profiler(void)
693 {
694         unregister_ftrace_graph();
695 }
696 #else
697 static struct ftrace_ops ftrace_profile_ops __read_mostly =
698 {
699         .func           = function_profile_call,
700 };
701
702 static int register_ftrace_profiler(void)
703 {
704         return register_ftrace_function(&ftrace_profile_ops);
705 }
706
707 static void unregister_ftrace_profiler(void)
708 {
709         unregister_ftrace_function(&ftrace_profile_ops);
710 }
711 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
712
713 static ssize_t
714 ftrace_profile_write(struct file *filp, const char __user *ubuf,
715                      size_t cnt, loff_t *ppos)
716 {
717         unsigned long val;
718         char buf[64];           /* big enough to hold a number */
719         int ret;
720
721         if (cnt >= sizeof(buf))
722                 return -EINVAL;
723
724         if (copy_from_user(&buf, ubuf, cnt))
725                 return -EFAULT;
726
727         buf[cnt] = 0;
728
729         ret = strict_strtoul(buf, 10, &val);
730         if (ret < 0)
731                 return ret;
732
733         val = !!val;
734
735         mutex_lock(&ftrace_profile_lock);
736         if (ftrace_profile_enabled ^ val) {
737                 if (val) {
738                         ret = ftrace_profile_init();
739                         if (ret < 0) {
740                                 cnt = ret;
741                                 goto out;
742                         }
743
744                         ret = register_ftrace_profiler();
745                         if (ret < 0) {
746                                 cnt = ret;
747                                 goto out;
748                         }
749                         ftrace_profile_enabled = 1;
750                 } else {
751                         ftrace_profile_enabled = 0;
752                         /*
753                          * unregister_ftrace_profiler calls stop_machine
754                          * so this acts like an synchronize_sched.
755                          */
756                         unregister_ftrace_profiler();
757                 }
758         }
759  out:
760         mutex_unlock(&ftrace_profile_lock);
761
762         *ppos += cnt;
763
764         return cnt;
765 }
766
767 static ssize_t
768 ftrace_profile_read(struct file *filp, char __user *ubuf,
769                      size_t cnt, loff_t *ppos)
770 {
771         char buf[64];           /* big enough to hold a number */
772         int r;
773
774         r = sprintf(buf, "%u\n", ftrace_profile_enabled);
775         return simple_read_from_buffer(ubuf, cnt, ppos, buf, r);
776 }
777
778 static const struct file_operations ftrace_profile_fops = {
779         .open           = tracing_open_generic,
780         .read           = ftrace_profile_read,
781         .write          = ftrace_profile_write,
782         .llseek         = default_llseek,
783 };
784
785 /* used to initialize the real stat files */
786 static struct tracer_stat function_stats __initdata = {
787         .name           = "functions",
788         .stat_start     = function_stat_start,
789         .stat_next      = function_stat_next,
790         .stat_cmp       = function_stat_cmp,
791         .stat_headers   = function_stat_headers,
792         .stat_show      = function_stat_show
793 };
794
795 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
796 {
797         struct ftrace_profile_stat *stat;
798         struct dentry *entry;
799         char *name;
800         int ret;
801         int cpu;
802
803         for_each_possible_cpu(cpu) {
804                 stat = &per_cpu(ftrace_profile_stats, cpu);
805
806                 /* allocate enough for function name + cpu number */
807                 name = kmalloc(32, GFP_KERNEL);
808                 if (!name) {
809                         /*
810                          * The files created are permanent, if something happens
811                          * we still do not free memory.
812                          */
813                         WARN(1,
814                              "Could not allocate stat file for cpu %d\n",
815                              cpu);
816                         return;
817                 }
818                 stat->stat = function_stats;
819                 snprintf(name, 32, "function%d", cpu);
820                 stat->stat.name = name;
821                 ret = register_stat_tracer(&stat->stat);
822                 if (ret) {
823                         WARN(1,
824                              "Could not register function stat for cpu %d\n",
825                              cpu);
826                         kfree(name);
827                         return;
828                 }
829         }
830
831         entry = debugfs_create_file("function_profile_enabled", 0644,
832                                     d_tracer, NULL, &ftrace_profile_fops);
833         if (!entry)
834                 pr_warning("Could not create debugfs "
835                            "'function_profile_enabled' entry\n");
836 }
837
838 #else /* CONFIG_FUNCTION_PROFILER */
839 static __init void ftrace_profile_debugfs(struct dentry *d_tracer)
840 {
841 }
842 #endif /* CONFIG_FUNCTION_PROFILER */
843
844 static struct pid * const ftrace_swapper_pid = &init_struct_pid;
845
846 #ifdef CONFIG_DYNAMIC_FTRACE
847
848 #ifndef CONFIG_FTRACE_MCOUNT_RECORD
849 # error Dynamic ftrace depends on MCOUNT_RECORD
850 #endif
851
852 static struct hlist_head ftrace_func_hash[FTRACE_FUNC_HASHSIZE] __read_mostly;
853
854 struct ftrace_func_probe {
855         struct hlist_node       node;
856         struct ftrace_probe_ops *ops;
857         unsigned long           flags;
858         unsigned long           ip;
859         void                    *data;
860         struct rcu_head         rcu;
861 };
862
863 enum {
864         FTRACE_ENABLE_CALLS             = (1 << 0),
865         FTRACE_DISABLE_CALLS            = (1 << 1),
866         FTRACE_UPDATE_TRACE_FUNC        = (1 << 2),
867         FTRACE_START_FUNC_RET           = (1 << 3),
868         FTRACE_STOP_FUNC_RET            = (1 << 4),
869 };
870 struct ftrace_func_entry {
871         struct hlist_node hlist;
872         unsigned long ip;
873 };
874
875 struct ftrace_hash {
876         unsigned long           size_bits;
877         struct hlist_head       *buckets;
878         unsigned long           count;
879 };
880
881 /*
882  * We make these constant because no one should touch them,
883  * but they are used as the default "empty hash", to avoid allocating
884  * it all the time. These are in a read only section such that if
885  * anyone does try to modify it, it will cause an exception.
886  */
887 static const struct hlist_head empty_buckets[1];
888 static const struct ftrace_hash empty_hash = {
889         .buckets = (struct hlist_head *)empty_buckets,
890 };
891 #define EMPTY_HASH      ((struct ftrace_hash *)&empty_hash)
892
893 enum {
894         FTRACE_OPS_FL_ENABLED           = 1,
895 };
896
897 struct ftrace_ops global_ops = {
898         .func                   = ftrace_stub,
899         .notrace_hash           = EMPTY_HASH,
900         .filter_hash            = EMPTY_HASH,
901 };
902
903 static struct dyn_ftrace *ftrace_new_addrs;
904
905 static DEFINE_MUTEX(ftrace_regex_lock);
906
907 struct ftrace_page {
908         struct ftrace_page      *next;
909         int                     index;
910         struct dyn_ftrace       records[];
911 };
912
913 #define ENTRIES_PER_PAGE \
914   ((PAGE_SIZE - sizeof(struct ftrace_page)) / sizeof(struct dyn_ftrace))
915
916 /* estimate from running different kernels */
917 #define NR_TO_INIT              10000
918
919 static struct ftrace_page       *ftrace_pages_start;
920 static struct ftrace_page       *ftrace_pages;
921
922 static struct dyn_ftrace *ftrace_free_records;
923
924 static struct ftrace_func_entry *
925 ftrace_lookup_ip(struct ftrace_hash *hash, unsigned long ip)
926 {
927         unsigned long key;
928         struct ftrace_func_entry *entry;
929         struct hlist_head *hhd;
930         struct hlist_node *n;
931
932         if (!hash->count)
933                 return NULL;
934
935         if (hash->size_bits > 0)
936                 key = hash_long(ip, hash->size_bits);
937         else
938                 key = 0;
939
940         hhd = &hash->buckets[key];
941
942         hlist_for_each_entry_rcu(entry, n, hhd, hlist) {
943                 if (entry->ip == ip)
944                         return entry;
945         }
946         return NULL;
947 }
948
949 static void __add_hash_entry(struct ftrace_hash *hash,
950                              struct ftrace_func_entry *entry)
951 {
952         struct hlist_head *hhd;
953         unsigned long key;
954
955         if (hash->size_bits)
956                 key = hash_long(entry->ip, hash->size_bits);
957         else
958                 key = 0;
959
960         hhd = &hash->buckets[key];
961         hlist_add_head(&entry->hlist, hhd);
962         hash->count++;
963 }
964
965 static int add_hash_entry(struct ftrace_hash *hash, unsigned long ip)
966 {
967         struct ftrace_func_entry *entry;
968
969         entry = kmalloc(sizeof(*entry), GFP_KERNEL);
970         if (!entry)
971                 return -ENOMEM;
972
973         entry->ip = ip;
974         __add_hash_entry(hash, entry);
975
976         return 0;
977 }
978
979 static void
980 free_hash_entry(struct ftrace_hash *hash,
981                   struct ftrace_func_entry *entry)
982 {
983         hlist_del(&entry->hlist);
984         kfree(entry);
985         hash->count--;
986 }
987
988 static void
989 remove_hash_entry(struct ftrace_hash *hash,
990                   struct ftrace_func_entry *entry)
991 {
992         hlist_del(&entry->hlist);
993         hash->count--;
994 }
995
996 static void ftrace_hash_clear(struct ftrace_hash *hash)
997 {
998         struct hlist_head *hhd;
999         struct hlist_node *tp, *tn;
1000         struct ftrace_func_entry *entry;
1001         int size = 1 << hash->size_bits;
1002         int i;
1003
1004         if (!hash->count)
1005                 return;
1006
1007         for (i = 0; i < size; i++) {
1008                 hhd = &hash->buckets[i];
1009                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist)
1010                         free_hash_entry(hash, entry);
1011         }
1012         FTRACE_WARN_ON(hash->count);
1013 }
1014
1015 static void free_ftrace_hash(struct ftrace_hash *hash)
1016 {
1017         if (!hash || hash == EMPTY_HASH)
1018                 return;
1019         ftrace_hash_clear(hash);
1020         kfree(hash->buckets);
1021         kfree(hash);
1022 }
1023
1024 static struct ftrace_hash *alloc_ftrace_hash(int size_bits)
1025 {
1026         struct ftrace_hash *hash;
1027         int size;
1028
1029         hash = kzalloc(sizeof(*hash), GFP_KERNEL);
1030         if (!hash)
1031                 return NULL;
1032
1033         size = 1 << size_bits;
1034         hash->buckets = kzalloc(sizeof(*hash->buckets) * size, GFP_KERNEL);
1035
1036         if (!hash->buckets) {
1037                 kfree(hash);
1038                 return NULL;
1039         }
1040
1041         hash->size_bits = size_bits;
1042
1043         return hash;
1044 }
1045
1046 static struct ftrace_hash *
1047 alloc_and_copy_ftrace_hash(int size_bits, struct ftrace_hash *hash)
1048 {
1049         struct ftrace_func_entry *entry;
1050         struct ftrace_hash *new_hash;
1051         struct hlist_node *tp;
1052         int size;
1053         int ret;
1054         int i;
1055
1056         new_hash = alloc_ftrace_hash(size_bits);
1057         if (!new_hash)
1058                 return NULL;
1059
1060         /* Empty hash? */
1061         if (!hash || !hash->count)
1062                 return new_hash;
1063
1064         size = 1 << hash->size_bits;
1065         for (i = 0; i < size; i++) {
1066                 hlist_for_each_entry(entry, tp, &hash->buckets[i], hlist) {
1067                         ret = add_hash_entry(new_hash, entry->ip);
1068                         if (ret < 0)
1069                                 goto free_hash;
1070                 }
1071         }
1072
1073         FTRACE_WARN_ON(new_hash->count != hash->count);
1074
1075         return new_hash;
1076
1077  free_hash:
1078         free_ftrace_hash(new_hash);
1079         return NULL;
1080 }
1081
1082 static int
1083 ftrace_hash_move(struct ftrace_hash **dst, struct ftrace_hash *src)
1084 {
1085         struct ftrace_func_entry *entry;
1086         struct hlist_node *tp, *tn;
1087         struct hlist_head *hhd;
1088         struct ftrace_hash *hash = *dst;
1089         unsigned long key;
1090         int size = src->count;
1091         int bits = 0;
1092         int i;
1093
1094         /*
1095          * If the new source is empty, just free dst and assign it
1096          * the empty_hash.
1097          */
1098         if (!src->count) {
1099                 free_ftrace_hash(*dst);
1100                 *dst = EMPTY_HASH;
1101                 return 0;
1102         }
1103
1104         ftrace_hash_clear(hash);
1105
1106         /*
1107          * Make the hash size about 1/2 the # found
1108          */
1109         for (size /= 2; size; size >>= 1)
1110                 bits++;
1111
1112         /* Don't allocate too much */
1113         if (bits > FTRACE_HASH_MAX_BITS)
1114                 bits = FTRACE_HASH_MAX_BITS;
1115
1116         /* We can't modify the empty_hash */
1117         if (hash == EMPTY_HASH) {
1118                 /* Create a new hash */
1119                 *dst = alloc_ftrace_hash(bits);
1120                 if (!*dst) {
1121                         *dst = EMPTY_HASH;
1122                         return -ENOMEM;
1123                 }
1124                 hash = *dst;
1125         } else {
1126                 size = 1 << bits;
1127
1128                 /* Use the old hash, but create new buckets */
1129                 hhd = kzalloc(sizeof(*hhd) * size, GFP_KERNEL);
1130                 if (!hhd)
1131                         return -ENOMEM;
1132
1133                 kfree(hash->buckets);
1134                 hash->buckets = hhd;
1135                 hash->size_bits = bits;
1136         }
1137
1138         size = 1 << src->size_bits;
1139         for (i = 0; i < size; i++) {
1140                 hhd = &src->buckets[i];
1141                 hlist_for_each_entry_safe(entry, tp, tn, hhd, hlist) {
1142                         if (bits > 0)
1143                                 key = hash_long(entry->ip, bits);
1144                         else
1145                                 key = 0;
1146                         remove_hash_entry(src, entry);
1147                         __add_hash_entry(hash, entry);
1148                 }
1149         }
1150
1151         return 0;
1152 }
1153
1154 /*
1155  * This is a double for. Do not use 'break' to break out of the loop,
1156  * you must use a goto.
1157  */
1158 #define do_for_each_ftrace_rec(pg, rec)                                 \
1159         for (pg = ftrace_pages_start; pg; pg = pg->next) {              \
1160                 int _____i;                                             \
1161                 for (_____i = 0; _____i < pg->index; _____i++) {        \
1162                         rec = &pg->records[_____i];
1163
1164 #define while_for_each_ftrace_rec()             \
1165                 }                               \
1166         }
1167
1168 static void __ftrace_hash_rec_update(struct ftrace_ops *ops,
1169                                      int filter_hash,
1170                                      bool inc)
1171 {
1172         struct ftrace_hash *hash;
1173         struct ftrace_hash *other_hash;
1174         struct ftrace_page *pg;
1175         struct dyn_ftrace *rec;
1176         int count = 0;
1177         int all = 0;
1178
1179         /* Only update if the ops has been registered */
1180         if (!(ops->flags & FTRACE_OPS_FL_ENABLED))
1181                 return;
1182
1183         /*
1184          * In the filter_hash case:
1185          *   If the count is zero, we update all records.
1186          *   Otherwise we just update the items in the hash.
1187          *
1188          * In the notrace_hash case:
1189          *   We enable the update in the hash.
1190          *   As disabling notrace means enabling the tracing,
1191          *   and enabling notrace means disabling, the inc variable
1192          *   gets inversed.
1193          */
1194         if (filter_hash) {
1195                 hash = ops->filter_hash;
1196                 other_hash = ops->notrace_hash;
1197                 if (!hash->count)
1198                         all = 1;
1199         } else {
1200                 inc = !inc;
1201                 hash = ops->notrace_hash;
1202                 other_hash = ops->filter_hash;
1203                 /*
1204                  * If the notrace hash has no items,
1205                  * then there's nothing to do.
1206                  */
1207                 if (!hash->count)
1208                         return;
1209         }
1210
1211         do_for_each_ftrace_rec(pg, rec) {
1212                 int in_other_hash = 0;
1213                 int in_hash = 0;
1214                 int match = 0;
1215
1216                 if (all) {
1217                         /*
1218                          * Only the filter_hash affects all records.
1219                          * Update if the record is not in the notrace hash.
1220                          */
1221                         if (!ftrace_lookup_ip(other_hash, rec->ip))
1222                                 match = 1;
1223                 } else {
1224                         in_hash = !!ftrace_lookup_ip(hash, rec->ip);
1225                         in_other_hash = !!ftrace_lookup_ip(other_hash, rec->ip);
1226
1227                         /*
1228                          *
1229                          */
1230                         if (filter_hash && in_hash && !in_other_hash)
1231                                 match = 1;
1232                         else if (!filter_hash && in_hash &&
1233                                  (in_other_hash || !other_hash->count))
1234                                 match = 1;
1235                 }
1236                 if (!match)
1237                         continue;
1238
1239                 if (inc) {
1240                         rec->flags++;
1241                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == FTRACE_REF_MAX))
1242                                 return;
1243                 } else {
1244                         if (FTRACE_WARN_ON((rec->flags & ~FTRACE_FL_MASK) == 0))
1245                                 return;
1246                         rec->flags--;
1247                 }
1248                 count++;
1249                 /* Shortcut, if we handled all records, we are done. */
1250                 if (!all && count == hash->count)
1251                         return;
1252         } while_for_each_ftrace_rec();
1253 }
1254
1255 static void ftrace_hash_rec_disable(struct ftrace_ops *ops,
1256                                     int filter_hash)
1257 {
1258         __ftrace_hash_rec_update(ops, filter_hash, 0);
1259 }
1260
1261 static void ftrace_hash_rec_enable(struct ftrace_ops *ops,
1262                                    int filter_hash)
1263 {
1264         __ftrace_hash_rec_update(ops, filter_hash, 1);
1265 }
1266
1267 static void ftrace_free_rec(struct dyn_ftrace *rec)
1268 {
1269         rec->freelist = ftrace_free_records;
1270         ftrace_free_records = rec;
1271         rec->flags |= FTRACE_FL_FREE;
1272 }
1273
1274 static struct dyn_ftrace *ftrace_alloc_dyn_node(unsigned long ip)
1275 {
1276         struct dyn_ftrace *rec;
1277
1278         /* First check for freed records */
1279         if (ftrace_free_records) {
1280                 rec = ftrace_free_records;
1281
1282                 if (unlikely(!(rec->flags & FTRACE_FL_FREE))) {
1283                         FTRACE_WARN_ON_ONCE(1);
1284                         ftrace_free_records = NULL;
1285                         return NULL;
1286                 }
1287
1288                 ftrace_free_records = rec->freelist;
1289                 memset(rec, 0, sizeof(*rec));
1290                 return rec;
1291         }
1292
1293         if (ftrace_pages->index == ENTRIES_PER_PAGE) {
1294                 if (!ftrace_pages->next) {
1295                         /* allocate another page */
1296                         ftrace_pages->next =
1297                                 (void *)get_zeroed_page(GFP_KERNEL);
1298                         if (!ftrace_pages->next)
1299                                 return NULL;
1300                 }
1301                 ftrace_pages = ftrace_pages->next;
1302         }
1303
1304         return &ftrace_pages->records[ftrace_pages->index++];
1305 }
1306
1307 static struct dyn_ftrace *
1308 ftrace_record_ip(unsigned long ip)
1309 {
1310         struct dyn_ftrace *rec;
1311
1312         if (ftrace_disabled)
1313                 return NULL;
1314
1315         rec = ftrace_alloc_dyn_node(ip);
1316         if (!rec)
1317                 return NULL;
1318
1319         rec->ip = ip;
1320         rec->newlist = ftrace_new_addrs;
1321         ftrace_new_addrs = rec;
1322
1323         return rec;
1324 }
1325
1326 static void print_ip_ins(const char *fmt, unsigned char *p)
1327 {
1328         int i;
1329
1330         printk(KERN_CONT "%s", fmt);
1331
1332         for (i = 0; i < MCOUNT_INSN_SIZE; i++)
1333                 printk(KERN_CONT "%s%02x", i ? ":" : "", p[i]);
1334 }
1335
1336 static void ftrace_bug(int failed, unsigned long ip)
1337 {
1338         switch (failed) {
1339         case -EFAULT:
1340                 FTRACE_WARN_ON_ONCE(1);
1341                 pr_info("ftrace faulted on modifying ");
1342                 print_ip_sym(ip);
1343                 break;
1344         case -EINVAL:
1345                 FTRACE_WARN_ON_ONCE(1);
1346                 pr_info("ftrace failed to modify ");
1347                 print_ip_sym(ip);
1348                 print_ip_ins(" actual: ", (unsigned char *)ip);
1349                 printk(KERN_CONT "\n");
1350                 break;
1351         case -EPERM:
1352                 FTRACE_WARN_ON_ONCE(1);
1353                 pr_info("ftrace faulted on writing ");
1354                 print_ip_sym(ip);
1355                 break;
1356         default:
1357                 FTRACE_WARN_ON_ONCE(1);
1358                 pr_info("ftrace faulted on unknown error ");
1359                 print_ip_sym(ip);
1360         }
1361 }
1362
1363
1364 /* Return 1 if the address range is reserved for ftrace */
1365 int ftrace_text_reserved(void *start, void *end)
1366 {
1367         struct dyn_ftrace *rec;
1368         struct ftrace_page *pg;
1369
1370         do_for_each_ftrace_rec(pg, rec) {
1371                 if (rec->ip <= (unsigned long)end &&
1372                     rec->ip + MCOUNT_INSN_SIZE > (unsigned long)start)
1373                         return 1;
1374         } while_for_each_ftrace_rec();
1375         return 0;
1376 }
1377
1378
1379 static int
1380 __ftrace_replace_code(struct dyn_ftrace *rec, int enable)
1381 {
1382         unsigned long ftrace_addr;
1383         unsigned long flag = 0UL;
1384
1385         ftrace_addr = (unsigned long)FTRACE_ADDR;
1386
1387         /*
1388          * If we are enabling tracing:
1389          *
1390          *   If the record has a ref count, then we need to enable it
1391          *   because someone is using it.
1392          *
1393          *   Otherwise we make sure its disabled.
1394          *
1395          * If we are disabling tracing, then disable all records that
1396          * are enabled.
1397          */
1398         if (enable && (rec->flags & ~FTRACE_FL_MASK))
1399                 flag = FTRACE_FL_ENABLED;
1400
1401         /* If the state of this record hasn't changed, then do nothing */
1402         if ((rec->flags & FTRACE_FL_ENABLED) == flag)
1403                 return 0;
1404
1405         if (flag) {
1406                 rec->flags |= FTRACE_FL_ENABLED;
1407                 return ftrace_make_call(rec, ftrace_addr);
1408         }
1409
1410         rec->flags &= ~FTRACE_FL_ENABLED;
1411         return ftrace_make_nop(NULL, rec, ftrace_addr);
1412 }
1413
1414 static void ftrace_replace_code(int enable)
1415 {
1416         struct dyn_ftrace *rec;
1417         struct ftrace_page *pg;
1418         int failed;
1419
1420         if (unlikely(ftrace_disabled))
1421                 return;
1422
1423         do_for_each_ftrace_rec(pg, rec) {
1424                 /* Skip over free records */
1425                 if (rec->flags & FTRACE_FL_FREE)
1426                         continue;
1427
1428                 failed = __ftrace_replace_code(rec, enable);
1429                 if (failed) {
1430                         ftrace_bug(failed, rec->ip);
1431                         /* Stop processing */
1432                         return;
1433                 }
1434         } while_for_each_ftrace_rec();
1435 }
1436
1437 static int
1438 ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
1439 {
1440         unsigned long ip;
1441         int ret;
1442
1443         ip = rec->ip;
1444
1445         if (unlikely(ftrace_disabled))
1446                 return 0;
1447
1448         ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
1449         if (ret) {
1450                 ftrace_bug(ret, ip);
1451                 return 0;
1452         }
1453         return 1;
1454 }
1455
1456 /*
1457  * archs can override this function if they must do something
1458  * before the modifying code is performed.
1459  */
1460 int __weak ftrace_arch_code_modify_prepare(void)
1461 {
1462         return 0;
1463 }
1464
1465 /*
1466  * archs can override this function if they must do something
1467  * after the modifying code is performed.
1468  */
1469 int __weak ftrace_arch_code_modify_post_process(void)
1470 {
1471         return 0;
1472 }
1473
1474 static int __ftrace_modify_code(void *data)
1475 {
1476         int *command = data;
1477
1478         if (*command & FTRACE_ENABLE_CALLS)
1479                 ftrace_replace_code(1);
1480         else if (*command & FTRACE_DISABLE_CALLS)
1481                 ftrace_replace_code(0);
1482
1483         if (*command & FTRACE_UPDATE_TRACE_FUNC)
1484                 ftrace_update_ftrace_func(ftrace_trace_function);
1485
1486         if (*command & FTRACE_START_FUNC_RET)
1487                 ftrace_enable_ftrace_graph_caller();
1488         else if (*command & FTRACE_STOP_FUNC_RET)
1489                 ftrace_disable_ftrace_graph_caller();
1490
1491         return 0;
1492 }
1493
1494 static void ftrace_run_update_code(int command)
1495 {
1496         int ret;
1497
1498         ret = ftrace_arch_code_modify_prepare();
1499         FTRACE_WARN_ON(ret);
1500         if (ret)
1501                 return;
1502
1503         stop_machine(__ftrace_modify_code, &command, NULL);
1504
1505         ret = ftrace_arch_code_modify_post_process();
1506         FTRACE_WARN_ON(ret);
1507 }
1508
1509 static ftrace_func_t saved_ftrace_func;
1510 static int ftrace_start_up;
1511
1512 static void ftrace_startup_enable(int command)
1513 {
1514         if (saved_ftrace_func != ftrace_trace_function) {
1515                 saved_ftrace_func = ftrace_trace_function;
1516                 command |= FTRACE_UPDATE_TRACE_FUNC;
1517         }
1518
1519         if (!command || !ftrace_enabled)
1520                 return;
1521
1522         ftrace_run_update_code(command);
1523 }
1524
1525 static void ftrace_startup(struct ftrace_ops *ops, int command)
1526 {
1527         if (unlikely(ftrace_disabled))
1528                 return;
1529
1530         ftrace_start_up++;
1531         command |= FTRACE_ENABLE_CALLS;
1532
1533         ops->flags |= FTRACE_OPS_FL_ENABLED;
1534         if (ftrace_start_up == 1)
1535                 ftrace_hash_rec_enable(ops, 1);
1536
1537         ftrace_startup_enable(command);
1538 }
1539
1540 static void ftrace_shutdown(struct ftrace_ops *ops, int command)
1541 {
1542         if (unlikely(ftrace_disabled))
1543                 return;
1544
1545         ftrace_start_up--;
1546         /*
1547          * Just warn in case of unbalance, no need to kill ftrace, it's not
1548          * critical but the ftrace_call callers may be never nopped again after
1549          * further ftrace uses.
1550          */
1551         WARN_ON_ONCE(ftrace_start_up < 0);
1552
1553         if (!ftrace_start_up)
1554                 ftrace_hash_rec_disable(ops, 1);
1555
1556         if (!ftrace_start_up) {
1557                 command |= FTRACE_DISABLE_CALLS;
1558                 ops->flags &= ~FTRACE_OPS_FL_ENABLED;
1559         }
1560
1561         if (saved_ftrace_func != ftrace_trace_function) {
1562                 saved_ftrace_func = ftrace_trace_function;
1563                 command |= FTRACE_UPDATE_TRACE_FUNC;
1564         }
1565
1566         if (!command || !ftrace_enabled)
1567                 return;
1568
1569         ftrace_run_update_code(command);
1570 }
1571
1572 static void ftrace_startup_sysctl(void)
1573 {
1574         if (unlikely(ftrace_disabled))
1575                 return;
1576
1577         /* Force update next time */
1578         saved_ftrace_func = NULL;
1579         /* ftrace_start_up is true if we want ftrace running */
1580         if (ftrace_start_up)
1581                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
1582 }
1583
1584 static void ftrace_shutdown_sysctl(void)
1585 {
1586         if (unlikely(ftrace_disabled))
1587                 return;
1588
1589         /* ftrace_start_up is true if ftrace is running */
1590         if (ftrace_start_up)
1591                 ftrace_run_update_code(FTRACE_DISABLE_CALLS);
1592 }
1593
1594 static cycle_t          ftrace_update_time;
1595 static unsigned long    ftrace_update_cnt;
1596 unsigned long           ftrace_update_tot_cnt;
1597
1598 static int ftrace_update_code(struct module *mod)
1599 {
1600         struct dyn_ftrace *p;
1601         cycle_t start, stop;
1602
1603         start = ftrace_now(raw_smp_processor_id());
1604         ftrace_update_cnt = 0;
1605
1606         while (ftrace_new_addrs) {
1607
1608                 /* If something went wrong, bail without enabling anything */
1609                 if (unlikely(ftrace_disabled))
1610                         return -1;
1611
1612                 p = ftrace_new_addrs;
1613                 ftrace_new_addrs = p->newlist;
1614                 p->flags = 0L;
1615
1616                 /*
1617                  * Do the initial record conversion from mcount jump
1618                  * to the NOP instructions.
1619                  */
1620                 if (!ftrace_code_disable(mod, p)) {
1621                         ftrace_free_rec(p);
1622                         /* Game over */
1623                         break;
1624                 }
1625
1626                 ftrace_update_cnt++;
1627
1628                 /*
1629                  * If the tracing is enabled, go ahead and enable the record.
1630                  *
1631                  * The reason not to enable the record immediatelly is the
1632                  * inherent check of ftrace_make_nop/ftrace_make_call for
1633                  * correct previous instructions.  Making first the NOP
1634                  * conversion puts the module to the correct state, thus
1635                  * passing the ftrace_make_call check.
1636                  */
1637                 if (ftrace_start_up) {
1638                         int failed = __ftrace_replace_code(p, 1);
1639                         if (failed) {
1640                                 ftrace_bug(failed, p->ip);
1641                                 ftrace_free_rec(p);
1642                         }
1643                 }
1644         }
1645
1646         stop = ftrace_now(raw_smp_processor_id());
1647         ftrace_update_time = stop - start;
1648         ftrace_update_tot_cnt += ftrace_update_cnt;
1649
1650         return 0;
1651 }
1652
1653 static int __init ftrace_dyn_table_alloc(unsigned long num_to_init)
1654 {
1655         struct ftrace_page *pg;
1656         int cnt;
1657         int i;
1658
1659         /* allocate a few pages */
1660         ftrace_pages_start = (void *)get_zeroed_page(GFP_KERNEL);
1661         if (!ftrace_pages_start)
1662                 return -1;
1663
1664         /*
1665          * Allocate a few more pages.
1666          *
1667          * TODO: have some parser search vmlinux before
1668          *   final linking to find all calls to ftrace.
1669          *   Then we can:
1670          *    a) know how many pages to allocate.
1671          *     and/or
1672          *    b) set up the table then.
1673          *
1674          *  The dynamic code is still necessary for
1675          *  modules.
1676          */
1677
1678         pg = ftrace_pages = ftrace_pages_start;
1679
1680         cnt = num_to_init / ENTRIES_PER_PAGE;
1681         pr_info("ftrace: allocating %ld entries in %d pages\n",
1682                 num_to_init, cnt + 1);
1683
1684         for (i = 0; i < cnt; i++) {
1685                 pg->next = (void *)get_zeroed_page(GFP_KERNEL);
1686
1687                 /* If we fail, we'll try later anyway */
1688                 if (!pg->next)
1689                         break;
1690
1691                 pg = pg->next;
1692         }
1693
1694         return 0;
1695 }
1696
1697 enum {
1698         FTRACE_ITER_FILTER      = (1 << 0),
1699         FTRACE_ITER_NOTRACE     = (1 << 1),
1700         FTRACE_ITER_PRINTALL    = (1 << 2),
1701         FTRACE_ITER_HASH        = (1 << 3),
1702         FTRACE_ITER_ENABLED     = (1 << 4),
1703 };
1704
1705 #define FTRACE_BUFF_MAX (KSYM_SYMBOL_LEN+4) /* room for wildcards */
1706
1707 struct ftrace_iterator {
1708         loff_t                          pos;
1709         loff_t                          func_pos;
1710         struct ftrace_page              *pg;
1711         struct dyn_ftrace               *func;
1712         struct ftrace_func_probe        *probe;
1713         struct trace_parser             parser;
1714         struct ftrace_hash              *hash;
1715         struct ftrace_ops               *ops;
1716         int                             hidx;
1717         int                             idx;
1718         unsigned                        flags;
1719 };
1720
1721 static void *
1722 t_hash_next(struct seq_file *m, loff_t *pos)
1723 {
1724         struct ftrace_iterator *iter = m->private;
1725         struct hlist_node *hnd = NULL;
1726         struct hlist_head *hhd;
1727
1728         (*pos)++;
1729         iter->pos = *pos;
1730
1731         if (iter->probe)
1732                 hnd = &iter->probe->node;
1733  retry:
1734         if (iter->hidx >= FTRACE_FUNC_HASHSIZE)
1735                 return NULL;
1736
1737         hhd = &ftrace_func_hash[iter->hidx];
1738
1739         if (hlist_empty(hhd)) {
1740                 iter->hidx++;
1741                 hnd = NULL;
1742                 goto retry;
1743         }
1744
1745         if (!hnd)
1746                 hnd = hhd->first;
1747         else {
1748                 hnd = hnd->next;
1749                 if (!hnd) {
1750                         iter->hidx++;
1751                         goto retry;
1752                 }
1753         }
1754
1755         if (WARN_ON_ONCE(!hnd))
1756                 return NULL;
1757
1758         iter->probe = hlist_entry(hnd, struct ftrace_func_probe, node);
1759
1760         return iter;
1761 }
1762
1763 static void *t_hash_start(struct seq_file *m, loff_t *pos)
1764 {
1765         struct ftrace_iterator *iter = m->private;
1766         void *p = NULL;
1767         loff_t l;
1768
1769         if (iter->func_pos > *pos)
1770                 return NULL;
1771
1772         iter->hidx = 0;
1773         for (l = 0; l <= (*pos - iter->func_pos); ) {
1774                 p = t_hash_next(m, &l);
1775                 if (!p)
1776                         break;
1777         }
1778         if (!p)
1779                 return NULL;
1780
1781         /* Only set this if we have an item */
1782         iter->flags |= FTRACE_ITER_HASH;
1783
1784         return iter;
1785 }
1786
1787 static int
1788 t_hash_show(struct seq_file *m, struct ftrace_iterator *iter)
1789 {
1790         struct ftrace_func_probe *rec;
1791
1792         rec = iter->probe;
1793         if (WARN_ON_ONCE(!rec))
1794                 return -EIO;
1795
1796         if (rec->ops->print)
1797                 return rec->ops->print(m, rec->ip, rec->ops, rec->data);
1798
1799         seq_printf(m, "%ps:%ps", (void *)rec->ip, (void *)rec->ops->func);
1800
1801         if (rec->data)
1802                 seq_printf(m, ":%p", rec->data);
1803         seq_putc(m, '\n');
1804
1805         return 0;
1806 }
1807
1808 static void *
1809 t_next(struct seq_file *m, void *v, loff_t *pos)
1810 {
1811         struct ftrace_iterator *iter = m->private;
1812         struct ftrace_ops *ops = &global_ops;
1813         struct dyn_ftrace *rec = NULL;
1814
1815         if (unlikely(ftrace_disabled))
1816                 return NULL;
1817
1818         if (iter->flags & FTRACE_ITER_HASH)
1819                 return t_hash_next(m, pos);
1820
1821         (*pos)++;
1822         iter->pos = iter->func_pos = *pos;
1823
1824         if (iter->flags & FTRACE_ITER_PRINTALL)
1825                 return t_hash_start(m, pos);
1826
1827  retry:
1828         if (iter->idx >= iter->pg->index) {
1829                 if (iter->pg->next) {
1830                         iter->pg = iter->pg->next;
1831                         iter->idx = 0;
1832                         goto retry;
1833                 }
1834         } else {
1835                 rec = &iter->pg->records[iter->idx++];
1836                 if ((rec->flags & FTRACE_FL_FREE) ||
1837
1838                     ((iter->flags & FTRACE_ITER_FILTER) &&
1839                      !(ftrace_lookup_ip(ops->filter_hash, rec->ip))) ||
1840
1841                     ((iter->flags & FTRACE_ITER_NOTRACE) &&
1842                      !ftrace_lookup_ip(ops->notrace_hash, rec->ip)) ||
1843
1844                     ((iter->flags & FTRACE_ITER_ENABLED) &&
1845                      !(rec->flags & ~FTRACE_FL_MASK))) {
1846
1847                         rec = NULL;
1848                         goto retry;
1849                 }
1850         }
1851
1852         if (!rec)
1853                 return t_hash_start(m, pos);
1854
1855         iter->func = rec;
1856
1857         return iter;
1858 }
1859
1860 static void reset_iter_read(struct ftrace_iterator *iter)
1861 {
1862         iter->pos = 0;
1863         iter->func_pos = 0;
1864         iter->flags &= ~(FTRACE_ITER_PRINTALL & FTRACE_ITER_HASH);
1865 }
1866
1867 static void *t_start(struct seq_file *m, loff_t *pos)
1868 {
1869         struct ftrace_iterator *iter = m->private;
1870         struct ftrace_ops *ops = &global_ops;
1871         void *p = NULL;
1872         loff_t l;
1873
1874         mutex_lock(&ftrace_lock);
1875
1876         if (unlikely(ftrace_disabled))
1877                 return NULL;
1878
1879         /*
1880          * If an lseek was done, then reset and start from beginning.
1881          */
1882         if (*pos < iter->pos)
1883                 reset_iter_read(iter);
1884
1885         /*
1886          * For set_ftrace_filter reading, if we have the filter
1887          * off, we can short cut and just print out that all
1888          * functions are enabled.
1889          */
1890         if (iter->flags & FTRACE_ITER_FILTER && !ops->filter_hash->count) {
1891                 if (*pos > 0)
1892                         return t_hash_start(m, pos);
1893                 iter->flags |= FTRACE_ITER_PRINTALL;
1894                 /* reset in case of seek/pread */
1895                 iter->flags &= ~FTRACE_ITER_HASH;
1896                 return iter;
1897         }
1898
1899         if (iter->flags & FTRACE_ITER_HASH)
1900                 return t_hash_start(m, pos);
1901
1902         /*
1903          * Unfortunately, we need to restart at ftrace_pages_start
1904          * every time we let go of the ftrace_mutex. This is because
1905          * those pointers can change without the lock.
1906          */
1907         iter->pg = ftrace_pages_start;
1908         iter->idx = 0;
1909         for (l = 0; l <= *pos; ) {
1910                 p = t_next(m, p, &l);
1911                 if (!p)
1912                         break;
1913         }
1914
1915         if (!p) {
1916                 if (iter->flags & FTRACE_ITER_FILTER)
1917                         return t_hash_start(m, pos);
1918
1919                 return NULL;
1920         }
1921
1922         return iter;
1923 }
1924
1925 static void t_stop(struct seq_file *m, void *p)
1926 {
1927         mutex_unlock(&ftrace_lock);
1928 }
1929
1930 static int t_show(struct seq_file *m, void *v)
1931 {
1932         struct ftrace_iterator *iter = m->private;
1933         struct dyn_ftrace *rec;
1934
1935         if (iter->flags & FTRACE_ITER_HASH)
1936                 return t_hash_show(m, iter);
1937
1938         if (iter->flags & FTRACE_ITER_PRINTALL) {
1939                 seq_printf(m, "#### all functions enabled ####\n");
1940                 return 0;
1941         }
1942
1943         rec = iter->func;
1944
1945         if (!rec)
1946                 return 0;
1947
1948         seq_printf(m, "%ps", (void *)rec->ip);
1949         if (iter->flags & FTRACE_ITER_ENABLED)
1950                 seq_printf(m, " (%ld)",
1951                            rec->flags & ~FTRACE_FL_MASK);
1952         seq_printf(m, "\n");
1953
1954         return 0;
1955 }
1956
1957 static const struct seq_operations show_ftrace_seq_ops = {
1958         .start = t_start,
1959         .next = t_next,
1960         .stop = t_stop,
1961         .show = t_show,
1962 };
1963
1964 static int
1965 ftrace_avail_open(struct inode *inode, struct file *file)
1966 {
1967         struct ftrace_iterator *iter;
1968         int ret;
1969
1970         if (unlikely(ftrace_disabled))
1971                 return -ENODEV;
1972
1973         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
1974         if (!iter)
1975                 return -ENOMEM;
1976
1977         iter->pg = ftrace_pages_start;
1978
1979         ret = seq_open(file, &show_ftrace_seq_ops);
1980         if (!ret) {
1981                 struct seq_file *m = file->private_data;
1982
1983                 m->private = iter;
1984         } else {
1985                 kfree(iter);
1986         }
1987
1988         return ret;
1989 }
1990
1991 static int
1992 ftrace_enabled_open(struct inode *inode, struct file *file)
1993 {
1994         struct ftrace_iterator *iter;
1995         int ret;
1996
1997         if (unlikely(ftrace_disabled))
1998                 return -ENODEV;
1999
2000         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2001         if (!iter)
2002                 return -ENOMEM;
2003
2004         iter->pg = ftrace_pages_start;
2005         iter->flags = FTRACE_ITER_ENABLED;
2006
2007         ret = seq_open(file, &show_ftrace_seq_ops);
2008         if (!ret) {
2009                 struct seq_file *m = file->private_data;
2010
2011                 m->private = iter;
2012         } else {
2013                 kfree(iter);
2014         }
2015
2016         return ret;
2017 }
2018
2019 static void ftrace_filter_reset(struct ftrace_hash *hash)
2020 {
2021         mutex_lock(&ftrace_lock);
2022         ftrace_hash_clear(hash);
2023         mutex_unlock(&ftrace_lock);
2024 }
2025
2026 static int
2027 ftrace_regex_open(struct ftrace_ops *ops, int flag,
2028                   struct inode *inode, struct file *file)
2029 {
2030         struct ftrace_iterator *iter;
2031         struct ftrace_hash *hash;
2032         int ret = 0;
2033
2034         if (unlikely(ftrace_disabled))
2035                 return -ENODEV;
2036
2037         iter = kzalloc(sizeof(*iter), GFP_KERNEL);
2038         if (!iter)
2039                 return -ENOMEM;
2040
2041         if (trace_parser_get_init(&iter->parser, FTRACE_BUFF_MAX)) {
2042                 kfree(iter);
2043                 return -ENOMEM;
2044         }
2045
2046         if (flag & FTRACE_ITER_NOTRACE)
2047                 hash = ops->notrace_hash;
2048         else
2049                 hash = ops->filter_hash;
2050
2051         iter->ops = ops;
2052         iter->flags = flag;
2053
2054         if (file->f_mode & FMODE_WRITE) {
2055                 mutex_lock(&ftrace_lock);
2056                 iter->hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, hash);
2057                 mutex_unlock(&ftrace_lock);
2058
2059                 if (!iter->hash) {
2060                         trace_parser_put(&iter->parser);
2061                         kfree(iter);
2062                         return -ENOMEM;
2063                 }
2064         }
2065
2066         mutex_lock(&ftrace_regex_lock);
2067
2068         if ((file->f_mode & FMODE_WRITE) &&
2069             (file->f_flags & O_TRUNC))
2070                 ftrace_filter_reset(iter->hash);
2071
2072         if (file->f_mode & FMODE_READ) {
2073                 iter->pg = ftrace_pages_start;
2074
2075                 ret = seq_open(file, &show_ftrace_seq_ops);
2076                 if (!ret) {
2077                         struct seq_file *m = file->private_data;
2078                         m->private = iter;
2079                 } else {
2080                         /* Failed */
2081                         free_ftrace_hash(iter->hash);
2082                         trace_parser_put(&iter->parser);
2083                         kfree(iter);
2084                 }
2085         } else
2086                 file->private_data = iter;
2087         mutex_unlock(&ftrace_regex_lock);
2088
2089         return ret;
2090 }
2091
2092 static int
2093 ftrace_filter_open(struct inode *inode, struct file *file)
2094 {
2095         return ftrace_regex_open(&global_ops, FTRACE_ITER_FILTER,
2096                                  inode, file);
2097 }
2098
2099 static int
2100 ftrace_notrace_open(struct inode *inode, struct file *file)
2101 {
2102         return ftrace_regex_open(&global_ops, FTRACE_ITER_NOTRACE,
2103                                  inode, file);
2104 }
2105
2106 static loff_t
2107 ftrace_regex_lseek(struct file *file, loff_t offset, int origin)
2108 {
2109         loff_t ret;
2110
2111         if (file->f_mode & FMODE_READ)
2112                 ret = seq_lseek(file, offset, origin);
2113         else
2114                 file->f_pos = ret = 1;
2115
2116         return ret;
2117 }
2118
2119 static int ftrace_match(char *str, char *regex, int len, int type)
2120 {
2121         int matched = 0;
2122         int slen;
2123
2124         switch (type) {
2125         case MATCH_FULL:
2126                 if (strcmp(str, regex) == 0)
2127                         matched = 1;
2128                 break;
2129         case MATCH_FRONT_ONLY:
2130                 if (strncmp(str, regex, len) == 0)
2131                         matched = 1;
2132                 break;
2133         case MATCH_MIDDLE_ONLY:
2134                 if (strstr(str, regex))
2135                         matched = 1;
2136                 break;
2137         case MATCH_END_ONLY:
2138                 slen = strlen(str);
2139                 if (slen >= len && memcmp(str + slen - len, regex, len) == 0)
2140                         matched = 1;
2141                 break;
2142         }
2143
2144         return matched;
2145 }
2146
2147 static int
2148 enter_record(struct ftrace_hash *hash, struct dyn_ftrace *rec, int not)
2149 {
2150         struct ftrace_func_entry *entry;
2151         int ret = 0;
2152
2153         entry = ftrace_lookup_ip(hash, rec->ip);
2154         if (not) {
2155                 /* Do nothing if it doesn't exist */
2156                 if (!entry)
2157                         return 0;
2158
2159                 free_hash_entry(hash, entry);
2160         } else {
2161                 /* Do nothing if it exists */
2162                 if (entry)
2163                         return 0;
2164
2165                 ret = add_hash_entry(hash, rec->ip);
2166         }
2167         return ret;
2168 }
2169
2170 static int
2171 ftrace_match_record(struct dyn_ftrace *rec, char *mod,
2172                     char *regex, int len, int type)
2173 {
2174         char str[KSYM_SYMBOL_LEN];
2175         char *modname;
2176
2177         kallsyms_lookup(rec->ip, NULL, NULL, &modname, str);
2178
2179         if (mod) {
2180                 /* module lookup requires matching the module */
2181                 if (!modname || strcmp(modname, mod))
2182                         return 0;
2183
2184                 /* blank search means to match all funcs in the mod */
2185                 if (!len)
2186                         return 1;
2187         }
2188
2189         return ftrace_match(str, regex, len, type);
2190 }
2191
2192 static int
2193 match_records(struct ftrace_hash *hash, char *buff,
2194               int len, char *mod, int not)
2195 {
2196         unsigned search_len = 0;
2197         struct ftrace_page *pg;
2198         struct dyn_ftrace *rec;
2199         int type = MATCH_FULL;
2200         char *search = buff;
2201         int found = 0;
2202         int ret;
2203
2204         if (len) {
2205                 type = filter_parse_regex(buff, len, &search, &not);
2206                 search_len = strlen(search);
2207         }
2208
2209         mutex_lock(&ftrace_lock);
2210
2211         if (unlikely(ftrace_disabled))
2212                 goto out_unlock;
2213
2214         do_for_each_ftrace_rec(pg, rec) {
2215
2216                 if (ftrace_match_record(rec, mod, search, search_len, type)) {
2217                         ret = enter_record(hash, rec, not);
2218                         if (ret < 0) {
2219                                 found = ret;
2220                                 goto out_unlock;
2221                         }
2222                         found = 1;
2223                 }
2224         } while_for_each_ftrace_rec();
2225  out_unlock:
2226         mutex_unlock(&ftrace_lock);
2227
2228         return found;
2229 }
2230
2231 static int
2232 ftrace_match_records(struct ftrace_hash *hash, char *buff, int len)
2233 {
2234         return match_records(hash, buff, len, NULL, 0);
2235 }
2236
2237 static int
2238 ftrace_match_module_records(struct ftrace_hash *hash, char *buff, char *mod)
2239 {
2240         int not = 0;
2241
2242         /* blank or '*' mean the same */
2243         if (strcmp(buff, "*") == 0)
2244                 buff[0] = 0;
2245
2246         /* handle the case of 'dont filter this module' */
2247         if (strcmp(buff, "!") == 0 || strcmp(buff, "!*") == 0) {
2248                 buff[0] = 0;
2249                 not = 1;
2250         }
2251
2252         return match_records(hash, buff, strlen(buff), mod, not);
2253 }
2254
2255 /*
2256  * We register the module command as a template to show others how
2257  * to register the a command as well.
2258  */
2259
2260 static int
2261 ftrace_mod_callback(char *func, char *cmd, char *param, int enable)
2262 {
2263         struct ftrace_ops *ops = &global_ops;
2264         struct ftrace_hash *hash;
2265         char *mod;
2266         int ret = -EINVAL;
2267
2268         /*
2269          * cmd == 'mod' because we only registered this func
2270          * for the 'mod' ftrace_func_command.
2271          * But if you register one func with multiple commands,
2272          * you can tell which command was used by the cmd
2273          * parameter.
2274          */
2275
2276         /* we must have a module name */
2277         if (!param)
2278                 return ret;
2279
2280         mod = strsep(&param, ":");
2281         if (!strlen(mod))
2282                 return ret;
2283
2284         if (enable)
2285                 hash = ops->filter_hash;
2286         else
2287                 hash = ops->notrace_hash;
2288
2289         ret = ftrace_match_module_records(hash, func, mod);
2290         if (!ret)
2291                 ret = -EINVAL;
2292         if (ret < 0)
2293                 return ret;
2294
2295         return 0;
2296 }
2297
2298 static struct ftrace_func_command ftrace_mod_cmd = {
2299         .name                   = "mod",
2300         .func                   = ftrace_mod_callback,
2301 };
2302
2303 static int __init ftrace_mod_cmd_init(void)
2304 {
2305         return register_ftrace_command(&ftrace_mod_cmd);
2306 }
2307 device_initcall(ftrace_mod_cmd_init);
2308
2309 static void
2310 function_trace_probe_call(unsigned long ip, unsigned long parent_ip)
2311 {
2312         struct ftrace_func_probe *entry;
2313         struct hlist_head *hhd;
2314         struct hlist_node *n;
2315         unsigned long key;
2316
2317         key = hash_long(ip, FTRACE_HASH_BITS);
2318
2319         hhd = &ftrace_func_hash[key];
2320
2321         if (hlist_empty(hhd))
2322                 return;
2323
2324         /*
2325          * Disable preemption for these calls to prevent a RCU grace
2326          * period. This syncs the hash iteration and freeing of items
2327          * on the hash. rcu_read_lock is too dangerous here.
2328          */
2329         preempt_disable_notrace();
2330         hlist_for_each_entry_rcu(entry, n, hhd, node) {
2331                 if (entry->ip == ip)
2332                         entry->ops->func(ip, parent_ip, &entry->data);
2333         }
2334         preempt_enable_notrace();
2335 }
2336
2337 static struct ftrace_ops trace_probe_ops __read_mostly =
2338 {
2339         .func           = function_trace_probe_call,
2340 };
2341
2342 static int ftrace_probe_registered;
2343
2344 static void __enable_ftrace_function_probe(void)
2345 {
2346         int i;
2347
2348         if (ftrace_probe_registered)
2349                 return;
2350
2351         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2352                 struct hlist_head *hhd = &ftrace_func_hash[i];
2353                 if (hhd->first)
2354                         break;
2355         }
2356         /* Nothing registered? */
2357         if (i == FTRACE_FUNC_HASHSIZE)
2358                 return;
2359
2360         __register_ftrace_function(&trace_probe_ops);
2361         ftrace_startup(&global_ops, 0);
2362         ftrace_probe_registered = 1;
2363 }
2364
2365 static void __disable_ftrace_function_probe(void)
2366 {
2367         int i;
2368
2369         if (!ftrace_probe_registered)
2370                 return;
2371
2372         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2373                 struct hlist_head *hhd = &ftrace_func_hash[i];
2374                 if (hhd->first)
2375                         return;
2376         }
2377
2378         /* no more funcs left */
2379         __unregister_ftrace_function(&trace_probe_ops);
2380         ftrace_shutdown(&global_ops, 0);
2381         ftrace_probe_registered = 0;
2382 }
2383
2384
2385 static void ftrace_free_entry_rcu(struct rcu_head *rhp)
2386 {
2387         struct ftrace_func_probe *entry =
2388                 container_of(rhp, struct ftrace_func_probe, rcu);
2389
2390         if (entry->ops->free)
2391                 entry->ops->free(&entry->data);
2392         kfree(entry);
2393 }
2394
2395
2396 int
2397 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2398                               void *data)
2399 {
2400         struct ftrace_func_probe *entry;
2401         struct ftrace_page *pg;
2402         struct dyn_ftrace *rec;
2403         int type, len, not;
2404         unsigned long key;
2405         int count = 0;
2406         char *search;
2407
2408         type = filter_parse_regex(glob, strlen(glob), &search, &not);
2409         len = strlen(search);
2410
2411         /* we do not support '!' for function probes */
2412         if (WARN_ON(not))
2413                 return -EINVAL;
2414
2415         mutex_lock(&ftrace_lock);
2416
2417         if (unlikely(ftrace_disabled))
2418                 goto out_unlock;
2419
2420         do_for_each_ftrace_rec(pg, rec) {
2421
2422                 if (!ftrace_match_record(rec, NULL, search, len, type))
2423                         continue;
2424
2425                 entry = kmalloc(sizeof(*entry), GFP_KERNEL);
2426                 if (!entry) {
2427                         /* If we did not process any, then return error */
2428                         if (!count)
2429                                 count = -ENOMEM;
2430                         goto out_unlock;
2431                 }
2432
2433                 count++;
2434
2435                 entry->data = data;
2436
2437                 /*
2438                  * The caller might want to do something special
2439                  * for each function we find. We call the callback
2440                  * to give the caller an opportunity to do so.
2441                  */
2442                 if (ops->callback) {
2443                         if (ops->callback(rec->ip, &entry->data) < 0) {
2444                                 /* caller does not like this func */
2445                                 kfree(entry);
2446                                 continue;
2447                         }
2448                 }
2449
2450                 entry->ops = ops;
2451                 entry->ip = rec->ip;
2452
2453                 key = hash_long(entry->ip, FTRACE_HASH_BITS);
2454                 hlist_add_head_rcu(&entry->node, &ftrace_func_hash[key]);
2455
2456         } while_for_each_ftrace_rec();
2457         __enable_ftrace_function_probe();
2458
2459  out_unlock:
2460         mutex_unlock(&ftrace_lock);
2461
2462         return count;
2463 }
2464
2465 enum {
2466         PROBE_TEST_FUNC         = 1,
2467         PROBE_TEST_DATA         = 2
2468 };
2469
2470 static void
2471 __unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2472                                   void *data, int flags)
2473 {
2474         struct ftrace_func_probe *entry;
2475         struct hlist_node *n, *tmp;
2476         char str[KSYM_SYMBOL_LEN];
2477         int type = MATCH_FULL;
2478         int i, len = 0;
2479         char *search;
2480
2481         if (glob && (strcmp(glob, "*") == 0 || !strlen(glob)))
2482                 glob = NULL;
2483         else if (glob) {
2484                 int not;
2485
2486                 type = filter_parse_regex(glob, strlen(glob), &search, &not);
2487                 len = strlen(search);
2488
2489                 /* we do not support '!' for function probes */
2490                 if (WARN_ON(not))
2491                         return;
2492         }
2493
2494         mutex_lock(&ftrace_lock);
2495         for (i = 0; i < FTRACE_FUNC_HASHSIZE; i++) {
2496                 struct hlist_head *hhd = &ftrace_func_hash[i];
2497
2498                 hlist_for_each_entry_safe(entry, n, tmp, hhd, node) {
2499
2500                         /* break up if statements for readability */
2501                         if ((flags & PROBE_TEST_FUNC) && entry->ops != ops)
2502                                 continue;
2503
2504                         if ((flags & PROBE_TEST_DATA) && entry->data != data)
2505                                 continue;
2506
2507                         /* do this last, since it is the most expensive */
2508                         if (glob) {
2509                                 kallsyms_lookup(entry->ip, NULL, NULL,
2510                                                 NULL, str);
2511                                 if (!ftrace_match(str, glob, len, type))
2512                                         continue;
2513                         }
2514
2515                         hlist_del(&entry->node);
2516                         call_rcu(&entry->rcu, ftrace_free_entry_rcu);
2517                 }
2518         }
2519         __disable_ftrace_function_probe();
2520         mutex_unlock(&ftrace_lock);
2521 }
2522
2523 void
2524 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
2525                                 void *data)
2526 {
2527         __unregister_ftrace_function_probe(glob, ops, data,
2528                                           PROBE_TEST_FUNC | PROBE_TEST_DATA);
2529 }
2530
2531 void
2532 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops)
2533 {
2534         __unregister_ftrace_function_probe(glob, ops, NULL, PROBE_TEST_FUNC);
2535 }
2536
2537 void unregister_ftrace_function_probe_all(char *glob)
2538 {
2539         __unregister_ftrace_function_probe(glob, NULL, NULL, 0);
2540 }
2541
2542 static LIST_HEAD(ftrace_commands);
2543 static DEFINE_MUTEX(ftrace_cmd_mutex);
2544
2545 int register_ftrace_command(struct ftrace_func_command *cmd)
2546 {
2547         struct ftrace_func_command *p;
2548         int ret = 0;
2549
2550         mutex_lock(&ftrace_cmd_mutex);
2551         list_for_each_entry(p, &ftrace_commands, list) {
2552                 if (strcmp(cmd->name, p->name) == 0) {
2553                         ret = -EBUSY;
2554                         goto out_unlock;
2555                 }
2556         }
2557         list_add(&cmd->list, &ftrace_commands);
2558  out_unlock:
2559         mutex_unlock(&ftrace_cmd_mutex);
2560
2561         return ret;
2562 }
2563
2564 int unregister_ftrace_command(struct ftrace_func_command *cmd)
2565 {
2566         struct ftrace_func_command *p, *n;
2567         int ret = -ENODEV;
2568
2569         mutex_lock(&ftrace_cmd_mutex);
2570         list_for_each_entry_safe(p, n, &ftrace_commands, list) {
2571                 if (strcmp(cmd->name, p->name) == 0) {
2572                         ret = 0;
2573                         list_del_init(&p->list);
2574                         goto out_unlock;
2575                 }
2576         }
2577  out_unlock:
2578         mutex_unlock(&ftrace_cmd_mutex);
2579
2580         return ret;
2581 }
2582
2583 static int ftrace_process_regex(struct ftrace_hash *hash,
2584                                 char *buff, int len, int enable)
2585 {
2586         char *func, *command, *next = buff;
2587         struct ftrace_func_command *p;
2588         int ret;
2589
2590         func = strsep(&next, ":");
2591
2592         if (!next) {
2593                 ret = ftrace_match_records(hash, func, len);
2594                 if (!ret)
2595                         ret = -EINVAL;
2596                 if (ret < 0)
2597                         return ret;
2598                 return 0;
2599         }
2600
2601         /* command found */
2602
2603         command = strsep(&next, ":");
2604
2605         mutex_lock(&ftrace_cmd_mutex);
2606         list_for_each_entry(p, &ftrace_commands, list) {
2607                 if (strcmp(p->name, command) == 0) {
2608                         ret = p->func(func, command, next, enable);
2609                         goto out_unlock;
2610                 }
2611         }
2612  out_unlock:
2613         mutex_unlock(&ftrace_cmd_mutex);
2614
2615         return ret;
2616 }
2617
2618 static ssize_t
2619 ftrace_regex_write(struct file *file, const char __user *ubuf,
2620                    size_t cnt, loff_t *ppos, int enable)
2621 {
2622         struct ftrace_iterator *iter;
2623         struct trace_parser *parser;
2624         ssize_t ret, read;
2625
2626         if (!cnt)
2627                 return 0;
2628
2629         mutex_lock(&ftrace_regex_lock);
2630
2631         ret = -ENODEV;
2632         if (unlikely(ftrace_disabled))
2633                 goto out_unlock;
2634
2635         if (file->f_mode & FMODE_READ) {
2636                 struct seq_file *m = file->private_data;
2637                 iter = m->private;
2638         } else
2639                 iter = file->private_data;
2640
2641         parser = &iter->parser;
2642         read = trace_get_user(parser, ubuf, cnt, ppos);
2643
2644         if (read >= 0 && trace_parser_loaded(parser) &&
2645             !trace_parser_cont(parser)) {
2646                 ret = ftrace_process_regex(iter->hash, parser->buffer,
2647                                            parser->idx, enable);
2648                 trace_parser_clear(parser);
2649                 if (ret)
2650                         goto out_unlock;
2651         }
2652
2653         ret = read;
2654 out_unlock:
2655         mutex_unlock(&ftrace_regex_lock);
2656
2657         return ret;
2658 }
2659
2660 static ssize_t
2661 ftrace_filter_write(struct file *file, const char __user *ubuf,
2662                     size_t cnt, loff_t *ppos)
2663 {
2664         return ftrace_regex_write(file, ubuf, cnt, ppos, 1);
2665 }
2666
2667 static ssize_t
2668 ftrace_notrace_write(struct file *file, const char __user *ubuf,
2669                      size_t cnt, loff_t *ppos)
2670 {
2671         return ftrace_regex_write(file, ubuf, cnt, ppos, 0);
2672 }
2673
2674 static int
2675 ftrace_set_regex(struct ftrace_ops *ops, unsigned char *buf, int len,
2676                  int reset, int enable)
2677 {
2678         struct ftrace_hash **orig_hash;
2679         struct ftrace_hash *hash;
2680         int ret;
2681
2682         if (unlikely(ftrace_disabled))
2683                 return -ENODEV;
2684
2685         if (enable)
2686                 orig_hash = &ops->filter_hash;
2687         else
2688                 orig_hash = &ops->notrace_hash;
2689
2690         hash = alloc_and_copy_ftrace_hash(FTRACE_HASH_DEFAULT_BITS, *orig_hash);
2691         if (!hash)
2692                 return -ENOMEM;
2693
2694         mutex_lock(&ftrace_regex_lock);
2695         if (reset)
2696                 ftrace_filter_reset(hash);
2697         if (buf)
2698                 ftrace_match_records(hash, buf, len);
2699
2700         mutex_lock(&ftrace_lock);
2701         ret = ftrace_hash_move(orig_hash, hash);
2702         mutex_unlock(&ftrace_lock);
2703
2704         mutex_unlock(&ftrace_regex_lock);
2705
2706         free_ftrace_hash(hash);
2707         return ret;
2708 }
2709
2710 /**
2711  * ftrace_set_filter - set a function to filter on in ftrace
2712  * @buf - the string that holds the function filter text.
2713  * @len - the length of the string.
2714  * @reset - non zero to reset all filters before applying this filter.
2715  *
2716  * Filters denote which functions should be enabled when tracing is enabled.
2717  * If @buf is NULL and reset is set, all functions will be enabled for tracing.
2718  */
2719 void ftrace_set_filter(unsigned char *buf, int len, int reset)
2720 {
2721         ftrace_set_regex(&global_ops, buf, len, reset, 1);
2722 }
2723
2724 /**
2725  * ftrace_set_notrace - set a function to not trace in ftrace
2726  * @buf - the string that holds the function notrace text.
2727  * @len - the length of the string.
2728  * @reset - non zero to reset all filters before applying this filter.
2729  *
2730  * Notrace Filters denote which functions should not be enabled when tracing
2731  * is enabled. If @buf is NULL and reset is set, all functions will be enabled
2732  * for tracing.
2733  */
2734 void ftrace_set_notrace(unsigned char *buf, int len, int reset)
2735 {
2736         ftrace_set_regex(&global_ops, buf, len, reset, 0);
2737 }
2738
2739 /*
2740  * command line interface to allow users to set filters on boot up.
2741  */
2742 #define FTRACE_FILTER_SIZE              COMMAND_LINE_SIZE
2743 static char ftrace_notrace_buf[FTRACE_FILTER_SIZE] __initdata;
2744 static char ftrace_filter_buf[FTRACE_FILTER_SIZE] __initdata;
2745
2746 static int __init set_ftrace_notrace(char *str)
2747 {
2748         strncpy(ftrace_notrace_buf, str, FTRACE_FILTER_SIZE);
2749         return 1;
2750 }
2751 __setup("ftrace_notrace=", set_ftrace_notrace);
2752
2753 static int __init set_ftrace_filter(char *str)
2754 {
2755         strncpy(ftrace_filter_buf, str, FTRACE_FILTER_SIZE);
2756         return 1;
2757 }
2758 __setup("ftrace_filter=", set_ftrace_filter);
2759
2760 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2761 static char ftrace_graph_buf[FTRACE_FILTER_SIZE] __initdata;
2762 static int ftrace_set_func(unsigned long *array, int *idx, char *buffer);
2763
2764 static int __init set_graph_function(char *str)
2765 {
2766         strlcpy(ftrace_graph_buf, str, FTRACE_FILTER_SIZE);
2767         return 1;
2768 }
2769 __setup("ftrace_graph_filter=", set_graph_function);
2770
2771 static void __init set_ftrace_early_graph(char *buf)
2772 {
2773         int ret;
2774         char *func;
2775
2776         while (buf) {
2777                 func = strsep(&buf, ",");
2778                 /* we allow only one expression at a time */
2779                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
2780                                       func);
2781                 if (ret)
2782                         printk(KERN_DEBUG "ftrace: function %s not "
2783                                           "traceable\n", func);
2784         }
2785 }
2786 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2787
2788 static void __init
2789 set_ftrace_early_filter(struct ftrace_ops *ops, char *buf, int enable)
2790 {
2791         char *func;
2792
2793         while (buf) {
2794                 func = strsep(&buf, ",");
2795                 ftrace_set_regex(ops, func, strlen(func), 0, enable);
2796         }
2797 }
2798
2799 static void __init set_ftrace_early_filters(void)
2800 {
2801         if (ftrace_filter_buf[0])
2802                 set_ftrace_early_filter(&global_ops, ftrace_filter_buf, 1);
2803         if (ftrace_notrace_buf[0])
2804                 set_ftrace_early_filter(&global_ops, ftrace_notrace_buf, 0);
2805 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2806         if (ftrace_graph_buf[0])
2807                 set_ftrace_early_graph(ftrace_graph_buf);
2808 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
2809 }
2810
2811 static int
2812 ftrace_regex_release(struct inode *inode, struct file *file)
2813 {
2814         struct seq_file *m = (struct seq_file *)file->private_data;
2815         struct ftrace_iterator *iter;
2816         struct ftrace_hash **orig_hash;
2817         struct trace_parser *parser;
2818         int filter_hash;
2819         int ret;
2820
2821         mutex_lock(&ftrace_regex_lock);
2822         if (file->f_mode & FMODE_READ) {
2823                 iter = m->private;
2824
2825                 seq_release(inode, file);
2826         } else
2827                 iter = file->private_data;
2828
2829         parser = &iter->parser;
2830         if (trace_parser_loaded(parser)) {
2831                 parser->buffer[parser->idx] = 0;
2832                 ftrace_match_records(iter->hash, parser->buffer, parser->idx);
2833         }
2834
2835         trace_parser_put(parser);
2836
2837         if (file->f_mode & FMODE_WRITE) {
2838                 filter_hash = !!(iter->flags & FTRACE_ITER_FILTER);
2839
2840                 if (filter_hash)
2841                         orig_hash = &iter->ops->filter_hash;
2842                 else
2843                         orig_hash = &iter->ops->notrace_hash;
2844
2845                 mutex_lock(&ftrace_lock);
2846                 /*
2847                  * Remove the current set, update the hash and add
2848                  * them back.
2849                  */
2850                 ftrace_hash_rec_disable(iter->ops, filter_hash);
2851                 ret = ftrace_hash_move(orig_hash, iter->hash);
2852                 if (!ret) {
2853                         ftrace_hash_rec_enable(iter->ops, filter_hash);
2854                         if (iter->ops->flags & FTRACE_OPS_FL_ENABLED
2855                             && ftrace_enabled)
2856                                 ftrace_run_update_code(FTRACE_ENABLE_CALLS);
2857                 }
2858                 mutex_unlock(&ftrace_lock);
2859         }
2860         free_ftrace_hash(iter->hash);
2861         kfree(iter);
2862
2863         mutex_unlock(&ftrace_regex_lock);
2864         return 0;
2865 }
2866
2867 static const struct file_operations ftrace_avail_fops = {
2868         .open = ftrace_avail_open,
2869         .read = seq_read,
2870         .llseek = seq_lseek,
2871         .release = seq_release_private,
2872 };
2873
2874 static const struct file_operations ftrace_enabled_fops = {
2875         .open = ftrace_enabled_open,
2876         .read = seq_read,
2877         .llseek = seq_lseek,
2878         .release = seq_release_private,
2879 };
2880
2881 static const struct file_operations ftrace_filter_fops = {
2882         .open = ftrace_filter_open,
2883         .read = seq_read,
2884         .write = ftrace_filter_write,
2885         .llseek = ftrace_regex_lseek,
2886         .release = ftrace_regex_release,
2887 };
2888
2889 static const struct file_operations ftrace_notrace_fops = {
2890         .open = ftrace_notrace_open,
2891         .read = seq_read,
2892         .write = ftrace_notrace_write,
2893         .llseek = ftrace_regex_lseek,
2894         .release = ftrace_regex_release,
2895 };
2896
2897 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
2898
2899 static DEFINE_MUTEX(graph_lock);
2900
2901 int ftrace_graph_count;
2902 int ftrace_graph_filter_enabled;
2903 unsigned long ftrace_graph_funcs[FTRACE_GRAPH_MAX_FUNCS] __read_mostly;
2904
2905 static void *
2906 __g_next(struct seq_file *m, loff_t *pos)
2907 {
2908         if (*pos >= ftrace_graph_count)
2909                 return NULL;
2910         return &ftrace_graph_funcs[*pos];
2911 }
2912
2913 static void *
2914 g_next(struct seq_file *m, void *v, loff_t *pos)
2915 {
2916         (*pos)++;
2917         return __g_next(m, pos);
2918 }
2919
2920 static void *g_start(struct seq_file *m, loff_t *pos)
2921 {
2922         mutex_lock(&graph_lock);
2923
2924         /* Nothing, tell g_show to print all functions are enabled */
2925         if (!ftrace_graph_filter_enabled && !*pos)
2926                 return (void *)1;
2927
2928         return __g_next(m, pos);
2929 }
2930
2931 static void g_stop(struct seq_file *m, void *p)
2932 {
2933         mutex_unlock(&graph_lock);
2934 }
2935
2936 static int g_show(struct seq_file *m, void *v)
2937 {
2938         unsigned long *ptr = v;
2939
2940         if (!ptr)
2941                 return 0;
2942
2943         if (ptr == (unsigned long *)1) {
2944                 seq_printf(m, "#### all functions enabled ####\n");
2945                 return 0;
2946         }
2947
2948         seq_printf(m, "%ps\n", (void *)*ptr);
2949
2950         return 0;
2951 }
2952
2953 static const struct seq_operations ftrace_graph_seq_ops = {
2954         .start = g_start,
2955         .next = g_next,
2956         .stop = g_stop,
2957         .show = g_show,
2958 };
2959
2960 static int
2961 ftrace_graph_open(struct inode *inode, struct file *file)
2962 {
2963         int ret = 0;
2964
2965         if (unlikely(ftrace_disabled))
2966                 return -ENODEV;
2967
2968         mutex_lock(&graph_lock);
2969         if ((file->f_mode & FMODE_WRITE) &&
2970             (file->f_flags & O_TRUNC)) {
2971                 ftrace_graph_filter_enabled = 0;
2972                 ftrace_graph_count = 0;
2973                 memset(ftrace_graph_funcs, 0, sizeof(ftrace_graph_funcs));
2974         }
2975         mutex_unlock(&graph_lock);
2976
2977         if (file->f_mode & FMODE_READ)
2978                 ret = seq_open(file, &ftrace_graph_seq_ops);
2979
2980         return ret;
2981 }
2982
2983 static int
2984 ftrace_graph_release(struct inode *inode, struct file *file)
2985 {
2986         if (file->f_mode & FMODE_READ)
2987                 seq_release(inode, file);
2988         return 0;
2989 }
2990
2991 static int
2992 ftrace_set_func(unsigned long *array, int *idx, char *buffer)
2993 {
2994         struct dyn_ftrace *rec;
2995         struct ftrace_page *pg;
2996         int search_len;
2997         int fail = 1;
2998         int type, not;
2999         char *search;
3000         bool exists;
3001         int i;
3002
3003         /* decode regex */
3004         type = filter_parse_regex(buffer, strlen(buffer), &search, &not);
3005         if (!not && *idx >= FTRACE_GRAPH_MAX_FUNCS)
3006                 return -EBUSY;
3007
3008         search_len = strlen(search);
3009
3010         mutex_lock(&ftrace_lock);
3011
3012         if (unlikely(ftrace_disabled)) {
3013                 mutex_unlock(&ftrace_lock);
3014                 return -ENODEV;
3015         }
3016
3017         do_for_each_ftrace_rec(pg, rec) {
3018
3019                 if (rec->flags & FTRACE_FL_FREE)
3020                         continue;
3021
3022                 if (ftrace_match_record(rec, NULL, search, search_len, type)) {
3023                         /* if it is in the array */
3024                         exists = false;
3025                         for (i = 0; i < *idx; i++) {
3026                                 if (array[i] == rec->ip) {
3027                                         exists = true;
3028                                         break;
3029                                 }
3030                         }
3031
3032                         if (!not) {
3033                                 fail = 0;
3034                                 if (!exists) {
3035                                         array[(*idx)++] = rec->ip;
3036                                         if (*idx >= FTRACE_GRAPH_MAX_FUNCS)
3037                                                 goto out;
3038                                 }
3039                         } else {
3040                                 if (exists) {
3041                                         array[i] = array[--(*idx)];
3042                                         array[*idx] = 0;
3043                                         fail = 0;
3044                                 }
3045                         }
3046                 }
3047         } while_for_each_ftrace_rec();
3048 out:
3049         mutex_unlock(&ftrace_lock);
3050
3051         if (fail)
3052                 return -EINVAL;
3053
3054         ftrace_graph_filter_enabled = 1;
3055         return 0;
3056 }
3057
3058 static ssize_t
3059 ftrace_graph_write(struct file *file, const char __user *ubuf,
3060                    size_t cnt, loff_t *ppos)
3061 {
3062         struct trace_parser parser;
3063         ssize_t read, ret;
3064
3065         if (!cnt)
3066                 return 0;
3067
3068         mutex_lock(&graph_lock);
3069
3070         if (trace_parser_get_init(&parser, FTRACE_BUFF_MAX)) {
3071                 ret = -ENOMEM;
3072                 goto out_unlock;
3073         }
3074
3075         read = trace_get_user(&parser, ubuf, cnt, ppos);
3076
3077         if (read >= 0 && trace_parser_loaded((&parser))) {
3078                 parser.buffer[parser.idx] = 0;
3079
3080                 /* we allow only one expression at a time */
3081                 ret = ftrace_set_func(ftrace_graph_funcs, &ftrace_graph_count,
3082                                         parser.buffer);
3083                 if (ret)
3084                         goto out_free;
3085         }
3086
3087         ret = read;
3088
3089 out_free:
3090         trace_parser_put(&parser);
3091 out_unlock:
3092         mutex_unlock(&graph_lock);
3093
3094         return ret;
3095 }
3096
3097 static const struct file_operations ftrace_graph_fops = {
3098         .open           = ftrace_graph_open,
3099         .read           = seq_read,
3100         .write          = ftrace_graph_write,
3101         .release        = ftrace_graph_release,
3102         .llseek         = seq_lseek,
3103 };
3104 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3105
3106 static __init int ftrace_init_dyn_debugfs(struct dentry *d_tracer)
3107 {
3108
3109         trace_create_file("available_filter_functions", 0444,
3110                         d_tracer, NULL, &ftrace_avail_fops);
3111
3112         trace_create_file("enabled_functions", 0444,
3113                         d_tracer, NULL, &ftrace_enabled_fops);
3114
3115         trace_create_file("set_ftrace_filter", 0644, d_tracer,
3116                         NULL, &ftrace_filter_fops);
3117
3118         trace_create_file("set_ftrace_notrace", 0644, d_tracer,
3119                                     NULL, &ftrace_notrace_fops);
3120
3121 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3122         trace_create_file("set_graph_function", 0444, d_tracer,
3123                                     NULL,
3124                                     &ftrace_graph_fops);
3125 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
3126
3127         return 0;
3128 }
3129
3130 static int ftrace_process_locs(struct module *mod,
3131                                unsigned long *start,
3132                                unsigned long *end)
3133 {
3134         unsigned long *p;
3135         unsigned long addr;
3136
3137         mutex_lock(&ftrace_lock);
3138         p = start;
3139         while (p < end) {
3140                 addr = ftrace_call_adjust(*p++);
3141                 /*
3142                  * Some architecture linkers will pad between
3143                  * the different mcount_loc sections of different
3144                  * object files to satisfy alignments.
3145                  * Skip any NULL pointers.
3146                  */
3147                 if (!addr)
3148                         continue;
3149                 ftrace_record_ip(addr);
3150         }
3151
3152         ftrace_update_code(mod);
3153         mutex_unlock(&ftrace_lock);
3154
3155         return 0;
3156 }
3157
3158 #ifdef CONFIG_MODULES
3159 void ftrace_release_mod(struct module *mod)
3160 {
3161         struct dyn_ftrace *rec;
3162         struct ftrace_page *pg;
3163
3164         mutex_lock(&ftrace_lock);
3165
3166         if (ftrace_disabled)
3167                 goto out_unlock;
3168
3169         do_for_each_ftrace_rec(pg, rec) {
3170                 if (within_module_core(rec->ip, mod)) {
3171                         /*
3172                          * rec->ip is changed in ftrace_free_rec()
3173                          * It should not between s and e if record was freed.
3174                          */
3175                         FTRACE_WARN_ON(rec->flags & FTRACE_FL_FREE);
3176                         ftrace_free_rec(rec);
3177                 }
3178         } while_for_each_ftrace_rec();
3179  out_unlock:
3180         mutex_unlock(&ftrace_lock);
3181 }
3182
3183 static void ftrace_init_module(struct module *mod,
3184                                unsigned long *start, unsigned long *end)
3185 {
3186         if (ftrace_disabled || start == end)
3187                 return;
3188         ftrace_process_locs(mod, start, end);
3189 }
3190
3191 static int ftrace_module_notify(struct notifier_block *self,
3192                                 unsigned long val, void *data)
3193 {
3194         struct module *mod = data;
3195
3196         switch (val) {
3197         case MODULE_STATE_COMING:
3198                 ftrace_init_module(mod, mod->ftrace_callsites,
3199                                    mod->ftrace_callsites +
3200                                    mod->num_ftrace_callsites);
3201                 break;
3202         case MODULE_STATE_GOING:
3203                 ftrace_release_mod(mod);
3204                 break;
3205         }
3206
3207         return 0;
3208 }
3209 #else
3210 static int ftrace_module_notify(struct notifier_block *self,
3211                                 unsigned long val, void *data)
3212 {
3213         return 0;
3214 }
3215 #endif /* CONFIG_MODULES */
3216
3217 struct notifier_block ftrace_module_nb = {
3218         .notifier_call = ftrace_module_notify,
3219         .priority = 0,
3220 };
3221
3222 extern unsigned long __start_mcount_loc[];
3223 extern unsigned long __stop_mcount_loc[];
3224
3225 void __init ftrace_init(void)
3226 {
3227         unsigned long count, addr, flags;
3228         int ret;
3229
3230         /* Keep the ftrace pointer to the stub */
3231         addr = (unsigned long)ftrace_stub;
3232
3233         local_irq_save(flags);
3234         ftrace_dyn_arch_init(&addr);
3235         local_irq_restore(flags);
3236
3237         /* ftrace_dyn_arch_init places the return code in addr */
3238         if (addr)
3239                 goto failed;
3240
3241         count = __stop_mcount_loc - __start_mcount_loc;
3242
3243         ret = ftrace_dyn_table_alloc(count);
3244         if (ret)
3245                 goto failed;
3246
3247         last_ftrace_enabled = ftrace_enabled = 1;
3248
3249         ret = ftrace_process_locs(NULL,
3250                                   __start_mcount_loc,
3251                                   __stop_mcount_loc);
3252
3253         ret = register_module_notifier(&ftrace_module_nb);
3254         if (ret)
3255                 pr_warning("Failed to register trace ftrace module notifier\n");
3256
3257         set_ftrace_early_filters();
3258
3259         return;
3260  failed:
3261         ftrace_disabled = 1;
3262 }
3263
3264 #else
3265
3266 struct ftrace_ops global_ops = {
3267         .func                   = ftrace_stub,
3268 };
3269
3270 static int __init ftrace_nodyn_init(void)
3271 {
3272         ftrace_enabled = 1;
3273         return 0;
3274 }
3275 device_initcall(ftrace_nodyn_init);
3276
3277 static inline int ftrace_init_dyn_debugfs(struct dentry *d_tracer) { return 0; }
3278 static inline void ftrace_startup_enable(int command) { }
3279 /* Keep as macros so we do not need to define the commands */
3280 # define ftrace_startup(ops, command)   do { } while (0)
3281 # define ftrace_shutdown(ops, command)  do { } while (0)
3282 # define ftrace_startup_sysctl()        do { } while (0)
3283 # define ftrace_shutdown_sysctl()       do { } while (0)
3284 #endif /* CONFIG_DYNAMIC_FTRACE */
3285
3286 static void clear_ftrace_swapper(void)
3287 {
3288         struct task_struct *p;
3289         int cpu;
3290
3291         get_online_cpus();
3292         for_each_online_cpu(cpu) {
3293                 p = idle_task(cpu);
3294                 clear_tsk_trace_trace(p);
3295         }
3296         put_online_cpus();
3297 }
3298
3299 static void set_ftrace_swapper(void)
3300 {
3301         struct task_struct *p;
3302         int cpu;
3303
3304         get_online_cpus();
3305         for_each_online_cpu(cpu) {
3306                 p = idle_task(cpu);
3307                 set_tsk_trace_trace(p);
3308         }
3309         put_online_cpus();
3310 }
3311
3312 static void clear_ftrace_pid(struct pid *pid)
3313 {
3314         struct task_struct *p;
3315
3316         rcu_read_lock();
3317         do_each_pid_task(pid, PIDTYPE_PID, p) {
3318                 clear_tsk_trace_trace(p);
3319         } while_each_pid_task(pid, PIDTYPE_PID, p);
3320         rcu_read_unlock();
3321
3322         put_pid(pid);
3323 }
3324
3325 static void set_ftrace_pid(struct pid *pid)
3326 {
3327         struct task_struct *p;
3328
3329         rcu_read_lock();
3330         do_each_pid_task(pid, PIDTYPE_PID, p) {
3331                 set_tsk_trace_trace(p);
3332         } while_each_pid_task(pid, PIDTYPE_PID, p);
3333         rcu_read_unlock();
3334 }
3335
3336 static void clear_ftrace_pid_task(struct pid *pid)
3337 {
3338         if (pid == ftrace_swapper_pid)
3339                 clear_ftrace_swapper();
3340         else
3341                 clear_ftrace_pid(pid);
3342 }
3343
3344 static void set_ftrace_pid_task(struct pid *pid)
3345 {
3346         if (pid == ftrace_swapper_pid)
3347                 set_ftrace_swapper();
3348         else
3349                 set_ftrace_pid(pid);
3350 }
3351
3352 static int ftrace_pid_add(int p)
3353 {
3354         struct pid *pid;
3355         struct ftrace_pid *fpid;
3356         int ret = -EINVAL;
3357
3358         mutex_lock(&ftrace_lock);
3359
3360         if (!p)
3361                 pid = ftrace_swapper_pid;
3362         else
3363                 pid = find_get_pid(p);
3364
3365         if (!pid)
3366                 goto out;
3367
3368         ret = 0;
3369
3370         list_for_each_entry(fpid, &ftrace_pids, list)
3371                 if (fpid->pid == pid)
3372                         goto out_put;
3373
3374         ret = -ENOMEM;
3375
3376         fpid = kmalloc(sizeof(*fpid), GFP_KERNEL);
3377         if (!fpid)
3378                 goto out_put;
3379
3380         list_add(&fpid->list, &ftrace_pids);
3381         fpid->pid = pid;
3382
3383         set_ftrace_pid_task(pid);
3384
3385         ftrace_update_pid_func();
3386         ftrace_startup_enable(0);
3387
3388         mutex_unlock(&ftrace_lock);
3389         return 0;
3390
3391 out_put:
3392         if (pid != ftrace_swapper_pid)
3393                 put_pid(pid);
3394
3395 out:
3396         mutex_unlock(&ftrace_lock);
3397         return ret;
3398 }
3399
3400 static void ftrace_pid_reset(void)
3401 {
3402         struct ftrace_pid *fpid, *safe;
3403
3404         mutex_lock(&ftrace_lock);
3405         list_for_each_entry_safe(fpid, safe, &ftrace_pids, list) {
3406                 struct pid *pid = fpid->pid;
3407
3408                 clear_ftrace_pid_task(pid);
3409
3410                 list_del(&fpid->list);
3411                 kfree(fpid);
3412         }
3413
3414         ftrace_update_pid_func();
3415         ftrace_startup_enable(0);
3416
3417         mutex_unlock(&ftrace_lock);
3418 }
3419
3420 static void *fpid_start(struct seq_file *m, loff_t *pos)
3421 {
3422         mutex_lock(&ftrace_lock);
3423
3424         if (list_empty(&ftrace_pids) && (!*pos))
3425                 return (void *) 1;
3426
3427         return seq_list_start(&ftrace_pids, *pos);
3428 }
3429
3430 static void *fpid_next(struct seq_file *m, void *v, loff_t *pos)
3431 {
3432         if (v == (void *)1)
3433                 return NULL;
3434
3435         return seq_list_next(v, &ftrace_pids, pos);
3436 }
3437
3438 static void fpid_stop(struct seq_file *m, void *p)
3439 {
3440         mutex_unlock(&ftrace_lock);
3441 }
3442
3443 static int fpid_show(struct seq_file *m, void *v)
3444 {
3445         const struct ftrace_pid *fpid = list_entry(v, struct ftrace_pid, list);
3446
3447         if (v == (void *)1) {
3448                 seq_printf(m, "no pid\n");
3449                 return 0;
3450         }
3451
3452         if (fpid->pid == ftrace_swapper_pid)
3453                 seq_printf(m, "swapper tasks\n");
3454         else
3455                 seq_printf(m, "%u\n", pid_vnr(fpid->pid));
3456
3457         return 0;
3458 }
3459
3460 static const struct seq_operations ftrace_pid_sops = {
3461         .start = fpid_start,
3462         .next = fpid_next,
3463         .stop = fpid_stop,
3464         .show = fpid_show,
3465 };
3466
3467 static int
3468 ftrace_pid_open(struct inode *inode, struct file *file)
3469 {
3470         int ret = 0;
3471
3472         if ((file->f_mode & FMODE_WRITE) &&
3473             (file->f_flags & O_TRUNC))
3474                 ftrace_pid_reset();
3475
3476         if (file->f_mode & FMODE_READ)
3477                 ret = seq_open(file, &ftrace_pid_sops);
3478
3479         return ret;
3480 }
3481
3482 static ssize_t
3483 ftrace_pid_write(struct file *filp, const char __user *ubuf,
3484                    size_t cnt, loff_t *ppos)
3485 {
3486         char buf[64], *tmp;
3487         long val;
3488         int ret;
3489
3490         if (cnt >= sizeof(buf))
3491                 return -EINVAL;
3492
3493         if (copy_from_user(&buf, ubuf, cnt))
3494                 return -EFAULT;
3495
3496         buf[cnt] = 0;
3497
3498         /*
3499          * Allow "echo > set_ftrace_pid" or "echo -n '' > set_ftrace_pid"
3500          * to clean the filter quietly.
3501          */
3502         tmp = strstrip(buf);
3503         if (strlen(tmp) == 0)
3504                 return 1;
3505
3506         ret = strict_strtol(tmp, 10, &val);
3507         if (ret < 0)
3508                 return ret;
3509
3510         ret = ftrace_pid_add(val);
3511
3512         return ret ? ret : cnt;
3513 }
3514
3515 static int
3516 ftrace_pid_release(struct inode *inode, struct file *file)
3517 {
3518         if (file->f_mode & FMODE_READ)
3519                 seq_release(inode, file);
3520
3521         return 0;
3522 }
3523
3524 static const struct file_operations ftrace_pid_fops = {
3525         .open           = ftrace_pid_open,
3526         .write          = ftrace_pid_write,
3527         .read           = seq_read,
3528         .llseek         = seq_lseek,
3529         .release        = ftrace_pid_release,
3530 };
3531
3532 static __init int ftrace_init_debugfs(void)
3533 {
3534         struct dentry *d_tracer;
3535
3536         d_tracer = tracing_init_dentry();
3537         if (!d_tracer)
3538                 return 0;
3539
3540         ftrace_init_dyn_debugfs(d_tracer);
3541
3542         trace_create_file("set_ftrace_pid", 0644, d_tracer,
3543                             NULL, &ftrace_pid_fops);
3544
3545         ftrace_profile_debugfs(d_tracer);
3546
3547         return 0;
3548 }
3549 fs_initcall(ftrace_init_debugfs);
3550
3551 /**
3552  * ftrace_kill - kill ftrace
3553  *
3554  * This function should be used by panic code. It stops ftrace
3555  * but in a not so nice way. If you need to simply kill ftrace
3556  * from a non-atomic section, use ftrace_kill.
3557  */
3558 void ftrace_kill(void)
3559 {
3560         ftrace_disabled = 1;
3561         ftrace_enabled = 0;
3562         clear_ftrace_function();
3563 }
3564
3565 /**
3566  * register_ftrace_function - register a function for profiling
3567  * @ops - ops structure that holds the function for profiling.
3568  *
3569  * Register a function to be called by all functions in the
3570  * kernel.
3571  *
3572  * Note: @ops->func and all the functions it calls must be labeled
3573  *       with "notrace", otherwise it will go into a
3574  *       recursive loop.
3575  */
3576 int register_ftrace_function(struct ftrace_ops *ops)
3577 {
3578         int ret = -1;
3579
3580         mutex_lock(&ftrace_lock);
3581
3582         if (unlikely(ftrace_disabled))
3583                 goto out_unlock;
3584
3585         ret = __register_ftrace_function(ops);
3586         ftrace_startup(&global_ops, 0);
3587
3588  out_unlock:
3589         mutex_unlock(&ftrace_lock);
3590         return ret;
3591 }
3592
3593 /**
3594  * unregister_ftrace_function - unregister a function for profiling.
3595  * @ops - ops structure that holds the function to unregister
3596  *
3597  * Unregister a function that was added to be called by ftrace profiling.
3598  */
3599 int unregister_ftrace_function(struct ftrace_ops *ops)
3600 {
3601         int ret;
3602
3603         mutex_lock(&ftrace_lock);
3604         ret = __unregister_ftrace_function(ops);
3605         ftrace_shutdown(&global_ops, 0);
3606         mutex_unlock(&ftrace_lock);
3607
3608         return ret;
3609 }
3610
3611 int
3612 ftrace_enable_sysctl(struct ctl_table *table, int write,
3613                      void __user *buffer, size_t *lenp,
3614                      loff_t *ppos)
3615 {
3616         int ret = -ENODEV;
3617
3618         mutex_lock(&ftrace_lock);
3619
3620         if (unlikely(ftrace_disabled))
3621                 goto out;
3622
3623         ret = proc_dointvec(table, write, buffer, lenp, ppos);
3624
3625         if (ret || !write || (last_ftrace_enabled == !!ftrace_enabled))
3626                 goto out;
3627
3628         last_ftrace_enabled = !!ftrace_enabled;
3629
3630         if (ftrace_enabled) {
3631
3632                 ftrace_startup_sysctl();
3633
3634                 /* we are starting ftrace again */
3635                 if (ftrace_list != &ftrace_list_end) {
3636                         if (ftrace_list->next == &ftrace_list_end)
3637                                 ftrace_trace_function = ftrace_list->func;
3638                         else
3639                                 ftrace_trace_function = ftrace_list_func;
3640                 }
3641
3642         } else {
3643                 /* stopping ftrace calls (just send to ftrace_stub) */
3644                 ftrace_trace_function = ftrace_stub;
3645
3646                 ftrace_shutdown_sysctl();
3647         }
3648
3649  out:
3650         mutex_unlock(&ftrace_lock);
3651         return ret;
3652 }
3653
3654 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
3655
3656 static int ftrace_graph_active;
3657 static struct notifier_block ftrace_suspend_notifier;
3658
3659 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
3660 {
3661         return 0;
3662 }
3663
3664 /* The callbacks that hook a function */
3665 trace_func_graph_ret_t ftrace_graph_return =
3666                         (trace_func_graph_ret_t)ftrace_stub;
3667 trace_func_graph_ent_t ftrace_graph_entry = ftrace_graph_entry_stub;
3668
3669 /* Try to assign a return stack array on FTRACE_RETSTACK_ALLOC_SIZE tasks. */
3670 static int alloc_retstack_tasklist(struct ftrace_ret_stack **ret_stack_list)
3671 {
3672         int i;
3673         int ret = 0;
3674         unsigned long flags;
3675         int start = 0, end = FTRACE_RETSTACK_ALLOC_SIZE;
3676         struct task_struct *g, *t;
3677
3678         for (i = 0; i < FTRACE_RETSTACK_ALLOC_SIZE; i++) {
3679                 ret_stack_list[i] = kmalloc(FTRACE_RETFUNC_DEPTH
3680                                         * sizeof(struct ftrace_ret_stack),
3681                                         GFP_KERNEL);
3682                 if (!ret_stack_list[i]) {
3683                         start = 0;
3684                         end = i;
3685                         ret = -ENOMEM;
3686                         goto free;
3687                 }
3688         }
3689
3690         read_lock_irqsave(&tasklist_lock, flags);
3691         do_each_thread(g, t) {
3692                 if (start == end) {
3693                         ret = -EAGAIN;
3694                         goto unlock;
3695                 }
3696
3697                 if (t->ret_stack == NULL) {
3698                         atomic_set(&t->tracing_graph_pause, 0);
3699                         atomic_set(&t->trace_overrun, 0);
3700                         t->curr_ret_stack = -1;
3701                         /* Make sure the tasks see the -1 first: */
3702                         smp_wmb();
3703                         t->ret_stack = ret_stack_list[start++];
3704                 }
3705         } while_each_thread(g, t);
3706
3707 unlock:
3708         read_unlock_irqrestore(&tasklist_lock, flags);
3709 free:
3710         for (i = start; i < end; i++)
3711                 kfree(ret_stack_list[i]);
3712         return ret;
3713 }
3714
3715 static void
3716 ftrace_graph_probe_sched_switch(void *ignore,
3717                         struct task_struct *prev, struct task_struct *next)
3718 {
3719         unsigned long long timestamp;
3720         int index;
3721
3722         /*
3723          * Does the user want to count the time a function was asleep.
3724          * If so, do not update the time stamps.
3725          */
3726         if (trace_flags & TRACE_ITER_SLEEP_TIME)
3727                 return;
3728
3729         timestamp = trace_clock_local();
3730
3731         prev->ftrace_timestamp = timestamp;
3732
3733         /* only process tasks that we timestamped */
3734         if (!next->ftrace_timestamp)
3735                 return;
3736
3737         /*
3738          * Update all the counters in next to make up for the
3739          * time next was sleeping.
3740          */
3741         timestamp -= next->ftrace_timestamp;
3742
3743         for (index = next->curr_ret_stack; index >= 0; index--)
3744                 next->ret_stack[index].calltime += timestamp;
3745 }
3746
3747 /* Allocate a return stack for each task */
3748 static int start_graph_tracing(void)
3749 {
3750         struct ftrace_ret_stack **ret_stack_list;
3751         int ret, cpu;
3752
3753         ret_stack_list = kmalloc(FTRACE_RETSTACK_ALLOC_SIZE *
3754                                 sizeof(struct ftrace_ret_stack *),
3755                                 GFP_KERNEL);
3756
3757         if (!ret_stack_list)
3758                 return -ENOMEM;
3759
3760         /* The cpu_boot init_task->ret_stack will never be freed */
3761         for_each_online_cpu(cpu) {
3762                 if (!idle_task(cpu)->ret_stack)
3763                         ftrace_graph_init_idle_task(idle_task(cpu), cpu);
3764         }
3765
3766         do {
3767                 ret = alloc_retstack_tasklist(ret_stack_list);
3768         } while (ret == -EAGAIN);
3769
3770         if (!ret) {
3771                 ret = register_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3772                 if (ret)
3773                         pr_info("ftrace_graph: Couldn't activate tracepoint"
3774                                 " probe to kernel_sched_switch\n");
3775         }
3776
3777         kfree(ret_stack_list);
3778         return ret;
3779 }
3780
3781 /*
3782  * Hibernation protection.
3783  * The state of the current task is too much unstable during
3784  * suspend/restore to disk. We want to protect against that.
3785  */
3786 static int
3787 ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
3788                                                         void *unused)
3789 {
3790         switch (state) {
3791         case PM_HIBERNATION_PREPARE:
3792                 pause_graph_tracing();
3793                 break;
3794
3795         case PM_POST_HIBERNATION:
3796                 unpause_graph_tracing();
3797                 break;
3798         }
3799         return NOTIFY_DONE;
3800 }
3801
3802 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
3803                         trace_func_graph_ent_t entryfunc)
3804 {
3805         int ret = 0;
3806
3807         mutex_lock(&ftrace_lock);
3808
3809         /* we currently allow only one tracer registered at a time */
3810         if (ftrace_graph_active) {
3811                 ret = -EBUSY;
3812                 goto out;
3813         }
3814
3815         ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
3816         register_pm_notifier(&ftrace_suspend_notifier);
3817
3818         ftrace_graph_active++;
3819         ret = start_graph_tracing();
3820         if (ret) {
3821                 ftrace_graph_active--;
3822                 goto out;
3823         }
3824
3825         ftrace_graph_return = retfunc;
3826         ftrace_graph_entry = entryfunc;
3827
3828         ftrace_startup(&global_ops, FTRACE_START_FUNC_RET);
3829
3830 out:
3831         mutex_unlock(&ftrace_lock);
3832         return ret;
3833 }
3834
3835 void unregister_ftrace_graph(void)
3836 {
3837         mutex_lock(&ftrace_lock);
3838
3839         if (unlikely(!ftrace_graph_active))
3840                 goto out;
3841
3842         ftrace_graph_active--;
3843         ftrace_graph_return = (trace_func_graph_ret_t)ftrace_stub;
3844         ftrace_graph_entry = ftrace_graph_entry_stub;
3845         ftrace_shutdown(&global_ops, FTRACE_STOP_FUNC_RET);
3846         unregister_pm_notifier(&ftrace_suspend_notifier);
3847         unregister_trace_sched_switch(ftrace_graph_probe_sched_switch, NULL);
3848
3849  out:
3850         mutex_unlock(&ftrace_lock);
3851 }
3852
3853 static DEFINE_PER_CPU(struct ftrace_ret_stack *, idle_ret_stack);
3854
3855 static void
3856 graph_init_task(struct task_struct *t, struct ftrace_ret_stack *ret_stack)
3857 {
3858         atomic_set(&t->tracing_graph_pause, 0);
3859         atomic_set(&t->trace_overrun, 0);
3860         t->ftrace_timestamp = 0;
3861         /* make curr_ret_stack visible before we add the ret_stack */
3862         smp_wmb();
3863         t->ret_stack = ret_stack;
3864 }
3865
3866 /*
3867  * Allocate a return stack for the idle task. May be the first
3868  * time through, or it may be done by CPU hotplug online.
3869  */
3870 void ftrace_graph_init_idle_task(struct task_struct *t, int cpu)
3871 {
3872         t->curr_ret_stack = -1;
3873         /*
3874          * The idle task has no parent, it either has its own
3875          * stack or no stack at all.
3876          */
3877         if (t->ret_stack)
3878                 WARN_ON(t->ret_stack != per_cpu(idle_ret_stack, cpu));
3879
3880         if (ftrace_graph_active) {
3881                 struct ftrace_ret_stack *ret_stack;
3882
3883                 ret_stack = per_cpu(idle_ret_stack, cpu);
3884                 if (!ret_stack) {
3885                         ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3886                                             * sizeof(struct ftrace_ret_stack),
3887                                             GFP_KERNEL);
3888                         if (!ret_stack)
3889                                 return;
3890                         per_cpu(idle_ret_stack, cpu) = ret_stack;
3891                 }
3892                 graph_init_task(t, ret_stack);
3893         }
3894 }
3895
3896 /* Allocate a return stack for newly created task */
3897 void ftrace_graph_init_task(struct task_struct *t)
3898 {
3899         /* Make sure we do not use the parent ret_stack */
3900         t->ret_stack = NULL;
3901         t->curr_ret_stack = -1;
3902
3903         if (ftrace_graph_active) {
3904                 struct ftrace_ret_stack *ret_stack;
3905
3906                 ret_stack = kmalloc(FTRACE_RETFUNC_DEPTH
3907                                 * sizeof(struct ftrace_ret_stack),
3908                                 GFP_KERNEL);
3909                 if (!ret_stack)
3910                         return;
3911                 graph_init_task(t, ret_stack);
3912         }
3913 }
3914
3915 void ftrace_graph_exit_task(struct task_struct *t)
3916 {
3917         struct ftrace_ret_stack *ret_stack = t->ret_stack;
3918
3919         t->ret_stack = NULL;
3920         /* NULL must become visible to IRQs before we free it: */
3921         barrier();
3922
3923         kfree(ret_stack);
3924 }
3925
3926 void ftrace_graph_stop(void)
3927 {
3928         ftrace_stop();
3929 }
3930 #endif