2 * Ftrace header. For implementation details beyond the random comments
3 * scattered below, see: Documentation/trace/ftrace-design.txt
6 #ifndef _LINUX_FTRACE_H
7 #define _LINUX_FTRACE_H
9 #include <linux/trace_clock.h>
10 #include <linux/kallsyms.h>
11 #include <linux/linkage.h>
12 #include <linux/bitops.h>
13 #include <linux/ptrace.h>
14 #include <linux/ktime.h>
15 #include <linux/sched.h>
16 #include <linux/types.h>
17 #include <linux/init.h>
20 #include <asm/ftrace.h>
23 * If the arch supports passing the variable contents of
24 * function_trace_op as the third parameter back from the
25 * mcount call, then the arch should define this as 1.
27 #ifndef ARCH_SUPPORTS_FTRACE_OPS
28 #define ARCH_SUPPORTS_FTRACE_OPS 0
32 * If the arch's mcount caller does not support all of ftrace's
33 * features, then it must call an indirect function that
34 * does. Or at least does enough to prevent any unwelcomed side effects.
36 #if !ARCH_SUPPORTS_FTRACE_OPS
37 # define FTRACE_FORCE_LIST_FUNC 1
39 # define FTRACE_FORCE_LIST_FUNC 0
46 #ifdef CONFIG_FUNCTION_TRACER
48 extern int ftrace_enabled;
50 ftrace_enable_sysctl(struct ctl_table *table, int write,
51 void __user *buffer, size_t *lenp,
56 typedef void (*ftrace_func_t)(unsigned long ip, unsigned long parent_ip,
57 struct ftrace_ops *op, struct pt_regs *regs);
59 ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
62 * FTRACE_OPS_FL_* bits denote the state of ftrace_ops struct and are
63 * set in the flags member.
65 * ENABLED - set/unset when ftrace_ops is registered/unregistered
66 * DYNAMIC - set when ftrace_ops is registered to denote dynamically
67 * allocated ftrace_ops which need special care
68 * CONTROL - set manualy by ftrace_ops user to denote the ftrace_ops
69 * could be controled by following calls:
70 * ftrace_function_local_enable
71 * ftrace_function_local_disable
72 * SAVE_REGS - The ftrace_ops wants regs saved at each function called
73 * and passed to the callback. If this flag is set, but the
74 * architecture does not support passing regs
75 * (CONFIG_DYNAMIC_FTRACE_WITH_REGS is not defined), then the
76 * ftrace_ops will fail to register, unless the next flag
78 * SAVE_REGS_IF_SUPPORTED - This is the same as SAVE_REGS, but if the
79 * handler can handle an arch that does not save regs
80 * (the handler tests if regs == NULL), then it can set
81 * this flag instead. It will not fail registering the ftrace_ops
82 * but, the regs field will be NULL if the arch does not support
83 * passing regs to the handler.
84 * Note, if this flag is set, the SAVE_REGS flag will automatically
85 * get set upon registering the ftrace_ops, if the arch supports it.
86 * RECURSION_SAFE - The ftrace_ops can set this to tell the ftrace infrastructure
87 * that the call back has its own recursion protection. If it does
88 * not set this, then the ftrace infrastructure will add recursion
89 * protection for the caller.
90 * STUB - The ftrace_ops is just a place holder.
91 * INITIALIZED - The ftrace_ops has already been initialized (first use time
92 * register_ftrace_function() is called, it will initialized the ops)
93 * DELETED - The ops are being deleted, do not let them be registered again.
94 * ADDING - The ops is in the process of being added.
95 * REMOVING - The ops is in the process of being removed.
96 * MODIFYING - The ops is in the process of changing its filter functions.
97 * ALLOC_TRAMP - A dynamic trampoline was allocated by the core code.
98 * The arch specific code sets this flag when it allocated a
99 * trampoline. This lets the arch know that it can update the
100 * trampoline in case the callback function changes.
101 * The ftrace_ops trampoline can be set by the ftrace users, and
102 * in such cases the arch must not modify it. Only the arch ftrace
103 * core code should set this flag.
106 FTRACE_OPS_FL_ENABLED = 1 << 0,
107 FTRACE_OPS_FL_DYNAMIC = 1 << 1,
108 FTRACE_OPS_FL_CONTROL = 1 << 2,
109 FTRACE_OPS_FL_SAVE_REGS = 1 << 3,
110 FTRACE_OPS_FL_SAVE_REGS_IF_SUPPORTED = 1 << 4,
111 FTRACE_OPS_FL_RECURSION_SAFE = 1 << 5,
112 FTRACE_OPS_FL_STUB = 1 << 6,
113 FTRACE_OPS_FL_INITIALIZED = 1 << 7,
114 FTRACE_OPS_FL_DELETED = 1 << 8,
115 FTRACE_OPS_FL_ADDING = 1 << 9,
116 FTRACE_OPS_FL_REMOVING = 1 << 10,
117 FTRACE_OPS_FL_MODIFYING = 1 << 11,
118 FTRACE_OPS_FL_ALLOC_TRAMP = 1 << 12,
121 #ifdef CONFIG_DYNAMIC_FTRACE
122 /* The hash used to know what functions callbacks trace */
123 struct ftrace_ops_hash {
124 struct ftrace_hash *notrace_hash;
125 struct ftrace_hash *filter_hash;
126 struct mutex regex_lock;
131 * Note, ftrace_ops can be referenced outside of RCU protection.
132 * (Although, for perf, the control ops prevent that). If ftrace_ops is
133 * allocated and not part of kernel core data, the unregistering of it will
134 * perform a scheduling on all CPUs to make sure that there are no more users.
135 * Depending on the load of the system that may take a bit of time.
137 * Any private data added must also take care not to be freed and if private
138 * data is added to a ftrace_ops that is in core code, the user of the
139 * ftrace_ops must perform a schedule_on_each_cpu() before freeing it.
143 struct ftrace_ops *next;
146 int __percpu *disabled;
147 #ifdef CONFIG_DYNAMIC_FTRACE
149 struct ftrace_ops_hash local_hash;
150 struct ftrace_ops_hash *func_hash;
151 struct ftrace_ops_hash old_hash;
152 unsigned long trampoline;
157 * Type of the current tracing.
159 enum ftrace_tracing_type_t {
160 FTRACE_TYPE_ENTER = 0, /* Hook the call of the function */
161 FTRACE_TYPE_RETURN, /* Hook the return of the function */
164 /* Current tracing type, default is FTRACE_TYPE_ENTER */
165 extern enum ftrace_tracing_type_t ftrace_tracing_type;
168 * The ftrace_ops must be a static and should also
169 * be read_mostly. These functions do modify read_mostly variables
170 * so use them sparely. Never free an ftrace_op or modify the
171 * next pointer after it has been registered. Even after unregistering
172 * it, the next pointer may still be used internally.
174 int register_ftrace_function(struct ftrace_ops *ops);
175 int unregister_ftrace_function(struct ftrace_ops *ops);
176 void clear_ftrace_function(void);
179 * ftrace_function_local_enable - enable controlled ftrace_ops on current cpu
181 * This function enables tracing on current cpu by decreasing
182 * the per cpu control variable.
183 * It must be called with preemption disabled and only on ftrace_ops
184 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
185 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
187 static inline void ftrace_function_local_enable(struct ftrace_ops *ops)
189 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
192 (*this_cpu_ptr(ops->disabled))--;
196 * ftrace_function_local_disable - enable controlled ftrace_ops on current cpu
198 * This function enables tracing on current cpu by decreasing
199 * the per cpu control variable.
200 * It must be called with preemption disabled and only on ftrace_ops
201 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
202 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
204 static inline void ftrace_function_local_disable(struct ftrace_ops *ops)
206 if (WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL)))
209 (*this_cpu_ptr(ops->disabled))++;
213 * ftrace_function_local_disabled - returns ftrace_ops disabled value
216 * This function returns value of ftrace_ops::disabled on current cpu.
217 * It must be called with preemption disabled and only on ftrace_ops
218 * registered with FTRACE_OPS_FL_CONTROL. If called without preemption
219 * disabled, this_cpu_ptr will complain when CONFIG_DEBUG_PREEMPT is enabled.
221 static inline int ftrace_function_local_disabled(struct ftrace_ops *ops)
223 WARN_ON_ONCE(!(ops->flags & FTRACE_OPS_FL_CONTROL));
224 return *this_cpu_ptr(ops->disabled);
227 extern void ftrace_stub(unsigned long a0, unsigned long a1,
228 struct ftrace_ops *op, struct pt_regs *regs);
230 #else /* !CONFIG_FUNCTION_TRACER */
232 * (un)register_ftrace_function must be a macro since the ops parameter
233 * must not be evaluated.
235 #define register_ftrace_function(ops) ({ 0; })
236 #define unregister_ftrace_function(ops) ({ 0; })
237 static inline int ftrace_nr_registered_ops(void)
241 static inline void clear_ftrace_function(void) { }
242 static inline void ftrace_kill(void) { }
243 #endif /* CONFIG_FUNCTION_TRACER */
245 #ifdef CONFIG_STACK_TRACER
246 extern int stack_tracer_enabled;
248 stack_trace_sysctl(struct ctl_table *table, int write,
249 void __user *buffer, size_t *lenp,
253 struct ftrace_func_command {
254 struct list_head list;
256 int (*func)(struct ftrace_hash *hash,
257 char *func, char *cmd,
258 char *params, int enable);
261 #ifdef CONFIG_DYNAMIC_FTRACE
263 int ftrace_arch_code_modify_prepare(void);
264 int ftrace_arch_code_modify_post_process(void);
268 void ftrace_bug(int err, struct dyn_ftrace *rec);
272 struct ftrace_probe_ops {
273 void (*func)(unsigned long ip,
274 unsigned long parent_ip,
276 int (*init)(struct ftrace_probe_ops *ops,
277 unsigned long ip, void **data);
278 void (*free)(struct ftrace_probe_ops *ops,
279 unsigned long ip, void **data);
280 int (*print)(struct seq_file *m,
282 struct ftrace_probe_ops *ops,
287 register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
290 unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
293 unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
294 extern void unregister_ftrace_function_probe_all(char *glob);
296 extern int ftrace_text_reserved(const void *start, const void *end);
298 extern int ftrace_nr_registered_ops(void);
301 * The dyn_ftrace record's flags field is split into two parts.
302 * the first part which is '0-FTRACE_REF_MAX' is a counter of
303 * the number of callbacks that have registered the function that
304 * the dyn_ftrace descriptor represents.
306 * The second part is a mask:
307 * ENABLED - the function is being traced
308 * REGS - the record wants the function to save regs
309 * REGS_EN - the function is set up to save regs.
311 * When a new ftrace_ops is registered and wants a function to save
312 * pt_regs, the rec->flag REGS is set. When the function has been
313 * set up to save regs, the REG_EN flag is set. Once a function
314 * starts saving regs it will do so until all ftrace_ops are removed
315 * from tracing that function.
318 FTRACE_FL_ENABLED = (1UL << 31),
319 FTRACE_FL_REGS = (1UL << 30),
320 FTRACE_FL_REGS_EN = (1UL << 29),
321 FTRACE_FL_TRAMP = (1UL << 28),
322 FTRACE_FL_TRAMP_EN = (1UL << 27),
325 #define FTRACE_REF_MAX_SHIFT 27
326 #define FTRACE_FL_BITS 5
327 #define FTRACE_FL_MASKED_BITS ((1UL << FTRACE_FL_BITS) - 1)
328 #define FTRACE_FL_MASK (FTRACE_FL_MASKED_BITS << FTRACE_REF_MAX_SHIFT)
329 #define FTRACE_REF_MAX ((1UL << FTRACE_REF_MAX_SHIFT) - 1)
331 #define ftrace_rec_count(rec) ((rec)->flags & ~FTRACE_FL_MASK)
334 unsigned long ip; /* address of mcount call-site */
336 struct dyn_arch_ftrace arch;
339 int ftrace_force_update(void);
340 int ftrace_set_filter_ip(struct ftrace_ops *ops, unsigned long ip,
341 int remove, int reset);
342 int ftrace_set_filter(struct ftrace_ops *ops, unsigned char *buf,
344 int ftrace_set_notrace(struct ftrace_ops *ops, unsigned char *buf,
346 void ftrace_set_global_filter(unsigned char *buf, int len, int reset);
347 void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
348 void ftrace_free_filter(struct ftrace_ops *ops);
350 int register_ftrace_command(struct ftrace_func_command *cmd);
351 int unregister_ftrace_command(struct ftrace_func_command *cmd);
354 FTRACE_UPDATE_CALLS = (1 << 0),
355 FTRACE_DISABLE_CALLS = (1 << 1),
356 FTRACE_UPDATE_TRACE_FUNC = (1 << 2),
357 FTRACE_START_FUNC_RET = (1 << 3),
358 FTRACE_STOP_FUNC_RET = (1 << 4),
362 * The FTRACE_UPDATE_* enum is used to pass information back
363 * from the ftrace_update_record() and ftrace_test_record()
364 * functions. These are called by the code update routines
365 * to find out what is to be done for a given function.
367 * IGNORE - The function is already what we want it to be
368 * MAKE_CALL - Start tracing the function
369 * MODIFY_CALL - Stop saving regs for the function
370 * MAKE_NOP - Stop tracing the function
373 FTRACE_UPDATE_IGNORE,
374 FTRACE_UPDATE_MAKE_CALL,
375 FTRACE_UPDATE_MODIFY_CALL,
376 FTRACE_UPDATE_MAKE_NOP,
380 FTRACE_ITER_FILTER = (1 << 0),
381 FTRACE_ITER_NOTRACE = (1 << 1),
382 FTRACE_ITER_PRINTALL = (1 << 2),
383 FTRACE_ITER_DO_HASH = (1 << 3),
384 FTRACE_ITER_HASH = (1 << 4),
385 FTRACE_ITER_ENABLED = (1 << 5),
388 void arch_ftrace_update_code(int command);
390 struct ftrace_rec_iter;
392 struct ftrace_rec_iter *ftrace_rec_iter_start(void);
393 struct ftrace_rec_iter *ftrace_rec_iter_next(struct ftrace_rec_iter *iter);
394 struct dyn_ftrace *ftrace_rec_iter_record(struct ftrace_rec_iter *iter);
396 #define for_ftrace_rec_iter(iter) \
397 for (iter = ftrace_rec_iter_start(); \
399 iter = ftrace_rec_iter_next(iter))
402 int ftrace_update_record(struct dyn_ftrace *rec, int enable);
403 int ftrace_test_record(struct dyn_ftrace *rec, int enable);
404 void ftrace_run_stop_machine(int command);
405 unsigned long ftrace_location(unsigned long ip);
406 unsigned long ftrace_get_addr_new(struct dyn_ftrace *rec);
407 unsigned long ftrace_get_addr_curr(struct dyn_ftrace *rec);
409 extern ftrace_func_t ftrace_trace_function;
411 int ftrace_regex_open(struct ftrace_ops *ops, int flag,
412 struct inode *inode, struct file *file);
413 ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
414 size_t cnt, loff_t *ppos);
415 ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
416 size_t cnt, loff_t *ppos);
417 int ftrace_regex_release(struct inode *inode, struct file *file);
420 ftrace_set_early_filter(struct ftrace_ops *ops, char *buf, int enable);
422 /* defined in arch */
423 extern int ftrace_ip_converted(unsigned long ip);
424 extern int ftrace_dyn_arch_init(void);
425 extern void ftrace_replace_code(int enable);
426 extern int ftrace_update_ftrace_func(ftrace_func_t func);
427 extern void ftrace_caller(void);
428 extern void ftrace_regs_caller(void);
429 extern void ftrace_call(void);
430 extern void ftrace_regs_call(void);
431 extern void mcount_call(void);
433 void ftrace_modify_all_code(int command);
436 #define FTRACE_ADDR ((unsigned long)ftrace_caller)
439 #ifndef FTRACE_GRAPH_ADDR
440 #define FTRACE_GRAPH_ADDR ((unsigned long)ftrace_graph_caller)
443 #ifndef FTRACE_REGS_ADDR
444 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
445 # define FTRACE_REGS_ADDR ((unsigned long)ftrace_regs_caller)
447 # define FTRACE_REGS_ADDR FTRACE_ADDR
452 * If an arch would like functions that are only traced
453 * by the function graph tracer to jump directly to its own
454 * trampoline, then they can define FTRACE_GRAPH_TRAMP_ADDR
455 * to be that address to jump to.
457 #ifndef FTRACE_GRAPH_TRAMP_ADDR
458 #define FTRACE_GRAPH_TRAMP_ADDR ((unsigned long) 0)
461 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
462 extern void ftrace_graph_caller(void);
463 extern int ftrace_enable_ftrace_graph_caller(void);
464 extern int ftrace_disable_ftrace_graph_caller(void);
466 static inline int ftrace_enable_ftrace_graph_caller(void) { return 0; }
467 static inline int ftrace_disable_ftrace_graph_caller(void) { return 0; }
471 * ftrace_make_nop - convert code into nop
472 * @mod: module structure if called by module load initialization
473 * @rec: the mcount call site record
474 * @addr: the address that the call site should be calling
476 * This is a very sensitive operation and great care needs
477 * to be taken by the arch. The operation should carefully
478 * read the location, check to see if what is read is indeed
479 * what we expect it to be, and then on success of the compare,
480 * it should write to the location.
482 * The code segment at @rec->ip should be a caller to @addr
486 * -EFAULT on error reading the location
487 * -EINVAL on a failed compare of the contents
488 * -EPERM on error writing to the location
489 * Any other value will be considered a failure.
491 extern int ftrace_make_nop(struct module *mod,
492 struct dyn_ftrace *rec, unsigned long addr);
495 * ftrace_make_call - convert a nop call site into a call to addr
496 * @rec: the mcount call site record
497 * @addr: the address that the call site should call
499 * This is a very sensitive operation and great care needs
500 * to be taken by the arch. The operation should carefully
501 * read the location, check to see if what is read is indeed
502 * what we expect it to be, and then on success of the compare,
503 * it should write to the location.
505 * The code segment at @rec->ip should be a nop
509 * -EFAULT on error reading the location
510 * -EINVAL on a failed compare of the contents
511 * -EPERM on error writing to the location
512 * Any other value will be considered a failure.
514 extern int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr);
516 #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
518 * ftrace_modify_call - convert from one addr to another (no nop)
519 * @rec: the mcount call site record
520 * @old_addr: the address expected to be currently called to
521 * @addr: the address to change to
523 * This is a very sensitive operation and great care needs
524 * to be taken by the arch. The operation should carefully
525 * read the location, check to see if what is read is indeed
526 * what we expect it to be, and then on success of the compare,
527 * it should write to the location.
529 * The code segment at @rec->ip should be a caller to @old_addr
533 * -EFAULT on error reading the location
534 * -EINVAL on a failed compare of the contents
535 * -EPERM on error writing to the location
536 * Any other value will be considered a failure.
538 extern int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
541 /* Should never be called */
542 static inline int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
549 /* May be defined in arch */
550 extern int ftrace_arch_read_dyn_info(char *buf, int size);
552 extern int skip_trace(unsigned long ip);
553 extern void ftrace_module_init(struct module *mod);
555 extern void ftrace_disable_daemon(void);
556 extern void ftrace_enable_daemon(void);
557 #else /* CONFIG_DYNAMIC_FTRACE */
558 static inline int skip_trace(unsigned long ip) { return 0; }
559 static inline int ftrace_force_update(void) { return 0; }
560 static inline void ftrace_disable_daemon(void) { }
561 static inline void ftrace_enable_daemon(void) { }
562 static inline void ftrace_release_mod(struct module *mod) {}
563 static inline void ftrace_module_init(struct module *mod) {}
564 static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
568 static inline __init int unregister_ftrace_command(char *cmd_name)
572 static inline int ftrace_text_reserved(const void *start, const void *end)
576 static inline unsigned long ftrace_location(unsigned long ip)
582 * Again users of functions that have ftrace_ops may not
583 * have them defined when ftrace is not enabled, but these
584 * functions may still be called. Use a macro instead of inline.
586 #define ftrace_regex_open(ops, flag, inod, file) ({ -ENODEV; })
587 #define ftrace_set_early_filter(ops, buf, enable) do { } while (0)
588 #define ftrace_set_filter_ip(ops, ip, remove, reset) ({ -ENODEV; })
589 #define ftrace_set_filter(ops, buf, len, reset) ({ -ENODEV; })
590 #define ftrace_set_notrace(ops, buf, len, reset) ({ -ENODEV; })
591 #define ftrace_free_filter(ops) do { } while (0)
593 static inline ssize_t ftrace_filter_write(struct file *file, const char __user *ubuf,
594 size_t cnt, loff_t *ppos) { return -ENODEV; }
595 static inline ssize_t ftrace_notrace_write(struct file *file, const char __user *ubuf,
596 size_t cnt, loff_t *ppos) { return -ENODEV; }
598 ftrace_regex_release(struct inode *inode, struct file *file) { return -ENODEV; }
599 #endif /* CONFIG_DYNAMIC_FTRACE */
601 /* totally disable ftrace - can not re-enable after this */
602 void ftrace_kill(void);
604 static inline void tracer_disable(void)
606 #ifdef CONFIG_FUNCTION_TRACER
612 * Ftrace disable/restore without lock. Some synchronization mechanism
613 * must be used to prevent ftrace_enabled to be changed between
616 static inline int __ftrace_enabled_save(void)
618 #ifdef CONFIG_FUNCTION_TRACER
619 int saved_ftrace_enabled = ftrace_enabled;
621 return saved_ftrace_enabled;
627 static inline void __ftrace_enabled_restore(int enabled)
629 #ifdef CONFIG_FUNCTION_TRACER
630 ftrace_enabled = enabled;
634 /* All archs should have this, but we define it for consistency */
635 #ifndef ftrace_return_address0
636 # define ftrace_return_address0 __builtin_return_address(0)
639 /* Archs may use other ways for ADDR1 and beyond */
640 #ifndef ftrace_return_address
641 # ifdef CONFIG_FRAME_POINTER
642 # define ftrace_return_address(n) __builtin_return_address(n)
644 # define ftrace_return_address(n) 0UL
648 #define CALLER_ADDR0 ((unsigned long)ftrace_return_address0)
649 #define CALLER_ADDR1 ((unsigned long)ftrace_return_address(1))
650 #define CALLER_ADDR2 ((unsigned long)ftrace_return_address(2))
651 #define CALLER_ADDR3 ((unsigned long)ftrace_return_address(3))
652 #define CALLER_ADDR4 ((unsigned long)ftrace_return_address(4))
653 #define CALLER_ADDR5 ((unsigned long)ftrace_return_address(5))
654 #define CALLER_ADDR6 ((unsigned long)ftrace_return_address(6))
656 #ifdef CONFIG_IRQSOFF_TRACER
657 extern void time_hardirqs_on(unsigned long a0, unsigned long a1);
658 extern void time_hardirqs_off(unsigned long a0, unsigned long a1);
660 static inline void time_hardirqs_on(unsigned long a0, unsigned long a1) { }
661 static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
664 #ifdef CONFIG_PREEMPT_TRACER
665 extern void trace_preempt_on(unsigned long a0, unsigned long a1);
666 extern void trace_preempt_off(unsigned long a0, unsigned long a1);
669 * Use defines instead of static inlines because some arches will make code out
670 * of the CALLER_ADDR, when we really want these to be a real nop.
672 # define trace_preempt_on(a0, a1) do { } while (0)
673 # define trace_preempt_off(a0, a1) do { } while (0)
676 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
677 extern void ftrace_init(void);
679 static inline void ftrace_init(void) { }
683 * Structure that defines an entry function trace.
685 struct ftrace_graph_ent {
686 unsigned long func; /* Current function */
691 * Structure that defines a return function trace.
693 struct ftrace_graph_ret {
694 unsigned long func; /* Current function */
695 unsigned long long calltime;
696 unsigned long long rettime;
697 /* Number of functions that overran the depth limit for current task */
698 unsigned long overrun;
702 /* Type of the callback handlers for tracing function graph*/
703 typedef void (*trace_func_graph_ret_t)(struct ftrace_graph_ret *); /* return */
704 typedef int (*trace_func_graph_ent_t)(struct ftrace_graph_ent *); /* entry */
706 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
709 #define INIT_FTRACE_GRAPH .ret_stack = NULL,
712 * Stack of return addresses for functions
714 * Used in struct thread_info
716 struct ftrace_ret_stack {
719 unsigned long long calltime;
720 unsigned long long subtime;
725 * Primary handler of a function return.
726 * It relays on ftrace_return_to_handler.
727 * Defined in entry_32/64.S
729 extern void return_to_handler(void);
732 ftrace_push_return_trace(unsigned long ret, unsigned long func, int *depth,
733 unsigned long frame_pointer);
736 * Sometimes we don't want to trace a function with the function
737 * graph tracer but we want them to keep traced by the usual function
738 * tracer if the function graph tracer is not configured.
740 #define __notrace_funcgraph notrace
743 * We want to which function is an entrypoint of a hardirq.
744 * That will help us to put a signal on output.
746 #define __irq_entry __attribute__((__section__(".irqentry.text")))
748 /* Limits of hardirq entrypoints */
749 extern char __irqentry_text_start[];
750 extern char __irqentry_text_end[];
752 #define FTRACE_NOTRACE_DEPTH 65536
753 #define FTRACE_RETFUNC_DEPTH 50
754 #define FTRACE_RETSTACK_ALLOC_SIZE 32
755 extern int register_ftrace_graph(trace_func_graph_ret_t retfunc,
756 trace_func_graph_ent_t entryfunc);
758 extern bool ftrace_graph_is_dead(void);
759 extern void ftrace_graph_stop(void);
761 /* The current handlers in use */
762 extern trace_func_graph_ret_t ftrace_graph_return;
763 extern trace_func_graph_ent_t ftrace_graph_entry;
765 extern void unregister_ftrace_graph(void);
767 extern void ftrace_graph_init_task(struct task_struct *t);
768 extern void ftrace_graph_exit_task(struct task_struct *t);
769 extern void ftrace_graph_init_idle_task(struct task_struct *t, int cpu);
771 static inline int task_curr_ret_stack(struct task_struct *t)
773 return t->curr_ret_stack;
776 static inline void pause_graph_tracing(void)
778 atomic_inc(¤t->tracing_graph_pause);
781 static inline void unpause_graph_tracing(void)
783 atomic_dec(¤t->tracing_graph_pause);
785 #else /* !CONFIG_FUNCTION_GRAPH_TRACER */
787 #define __notrace_funcgraph
789 #define INIT_FTRACE_GRAPH
791 static inline void ftrace_graph_init_task(struct task_struct *t) { }
792 static inline void ftrace_graph_exit_task(struct task_struct *t) { }
793 static inline void ftrace_graph_init_idle_task(struct task_struct *t, int cpu) { }
795 static inline int register_ftrace_graph(trace_func_graph_ret_t retfunc,
796 trace_func_graph_ent_t entryfunc)
800 static inline void unregister_ftrace_graph(void) { }
802 static inline int task_curr_ret_stack(struct task_struct *tsk)
807 static inline void pause_graph_tracing(void) { }
808 static inline void unpause_graph_tracing(void) { }
809 #endif /* CONFIG_FUNCTION_GRAPH_TRACER */
811 #ifdef CONFIG_TRACING
813 /* flags for current->trace */
815 TSK_TRACE_FL_TRACE_BIT = 0,
816 TSK_TRACE_FL_GRAPH_BIT = 1,
819 TSK_TRACE_FL_TRACE = 1 << TSK_TRACE_FL_TRACE_BIT,
820 TSK_TRACE_FL_GRAPH = 1 << TSK_TRACE_FL_GRAPH_BIT,
823 static inline void set_tsk_trace_trace(struct task_struct *tsk)
825 set_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
828 static inline void clear_tsk_trace_trace(struct task_struct *tsk)
830 clear_bit(TSK_TRACE_FL_TRACE_BIT, &tsk->trace);
833 static inline int test_tsk_trace_trace(struct task_struct *tsk)
835 return tsk->trace & TSK_TRACE_FL_TRACE;
838 static inline void set_tsk_trace_graph(struct task_struct *tsk)
840 set_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
843 static inline void clear_tsk_trace_graph(struct task_struct *tsk)
845 clear_bit(TSK_TRACE_FL_GRAPH_BIT, &tsk->trace);
848 static inline int test_tsk_trace_graph(struct task_struct *tsk)
850 return tsk->trace & TSK_TRACE_FL_GRAPH;
853 enum ftrace_dump_mode;
855 extern enum ftrace_dump_mode ftrace_dump_on_oops;
857 extern void disable_trace_on_warning(void);
858 extern int __disable_trace_on_warning;
860 #ifdef CONFIG_PREEMPT
861 #define INIT_TRACE_RECURSION .trace_recursion = 0,
864 #else /* CONFIG_TRACING */
865 static inline void disable_trace_on_warning(void) { }
866 #endif /* CONFIG_TRACING */
868 #ifndef INIT_TRACE_RECURSION
869 #define INIT_TRACE_RECURSION
872 #ifdef CONFIG_FTRACE_SYSCALLS
874 unsigned long arch_syscall_addr(int nr);
876 #endif /* CONFIG_FTRACE_SYSCALLS */
878 #endif /* _LINUX_FTRACE_H */