2 * Stage 1 of the trace events.
4 * Override the macros in <trace/trace_events.h> to include the following:
6 * struct ftrace_raw_<call> {
7 * struct trace_entry ent;
9 * <type2> <item2>[<len>];
13 * The <type> <item> is created by the __field(type, item) macro or
14 * the __array(type2, item2, len) macro.
15 * We simply do "type item;", and that will create the fields
19 #include <linux/ftrace_event.h>
22 * DECLARE_EVENT_CLASS can be used to add a generic function
23 * handlers for events. That is, if all events have the same
24 * parameters and just have distinct trace points.
25 * Each tracepoint can be defined with DEFINE_EVENT and that
26 * will map the DECLARE_EVENT_CLASS to the tracepoint.
28 * TRACE_EVENT is a one to one mapping between tracepoint and template.
31 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
32 DECLARE_EVENT_CLASS(name, \
38 DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
42 #define __field(type, item) type item;
45 #define __field_ext(type, item, filter_type) type item;
48 #define __array(type, item, len) type item[len];
50 #undef __dynamic_array
51 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
54 #define __string(item, src) __dynamic_array(char, item, -1)
57 #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
59 #undef TP_STRUCT__entry
60 #define TP_STRUCT__entry(args...) args
62 #undef DECLARE_EVENT_CLASS
63 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print) \
64 struct ftrace_raw_##name { \
65 struct trace_entry ent; \
70 static struct ftrace_event_class event_class_##name;
73 #define DEFINE_EVENT(template, name, proto, args) \
74 static struct ftrace_event_call __used \
75 __attribute__((__aligned__(4))) event_##name
77 #undef DEFINE_EVENT_FN
78 #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg) \
79 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
81 #undef DEFINE_EVENT_PRINT
82 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
83 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
85 /* Callbacks are meaningless to ftrace. */
87 #define TRACE_EVENT_FN(name, proto, args, tstruct, \
88 assign, print, reg, unreg) \
89 TRACE_EVENT(name, PARAMS(proto), PARAMS(args), \
90 PARAMS(tstruct), PARAMS(assign), PARAMS(print)) \
92 #undef TRACE_EVENT_FLAGS
93 #define TRACE_EVENT_FLAGS(name, value) \
94 __TRACE_EVENT_FLAGS(name, value)
96 #undef TRACE_EVENT_PERF_PERM
97 #define TRACE_EVENT_PERF_PERM(name, expr...) \
98 __TRACE_EVENT_PERF_PERM(name, expr)
100 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
104 * Stage 2 of the trace events.
106 * Include the following:
108 * struct ftrace_data_offsets_<call> {
114 * The __dynamic_array() macro will create each u32 <item>, this is
115 * to keep the offset of each array from the beginning of the event.
116 * The size of an array is also encoded, in the higher 16 bits of <item>.
120 #define __field(type, item)
123 #define __field_ext(type, item, filter_type)
126 #define __array(type, item, len)
128 #undef __dynamic_array
129 #define __dynamic_array(type, item, len) u32 item;
132 #define __string(item, src) __dynamic_array(char, item, -1)
135 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
137 #undef DECLARE_EVENT_CLASS
138 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
139 struct ftrace_data_offsets_##call { \
144 #define DEFINE_EVENT(template, name, proto, args)
146 #undef DEFINE_EVENT_PRINT
147 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
148 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
150 #undef TRACE_EVENT_FLAGS
151 #define TRACE_EVENT_FLAGS(event, flag)
153 #undef TRACE_EVENT_PERF_PERM
154 #define TRACE_EVENT_PERF_PERM(event, expr...)
156 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
159 * Stage 3 of the trace events.
161 * Override the macros in <trace/trace_events.h> to include the following:
164 * ftrace_raw_output_<call>(struct trace_iterator *iter, int flags)
166 * struct trace_seq *s = &iter->seq;
167 * struct ftrace_raw_<call> *field; <-- defined in stage 1
168 * struct trace_entry *entry;
169 * struct trace_seq *p = &iter->tmp_seq;
174 * if (entry->type != event_<call>->event.type) {
176 * return TRACE_TYPE_UNHANDLED;
179 * field = (typeof(field))entry;
182 * ret = trace_seq_printf(s, "%s: ", <call>);
184 * ret = trace_seq_printf(s, <TP_printk> "\n");
186 * return TRACE_TYPE_PARTIAL_LINE;
188 * return TRACE_TYPE_HANDLED;
191 * This is the method used to print the raw event to the trace
192 * output format. Note, this is not needed if the data is read
197 #define __entry field
200 #define TP_printk(fmt, args...) fmt "\n", args
202 #undef __get_dynamic_array
203 #define __get_dynamic_array(field) \
204 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
206 #undef __get_dynamic_array_len
207 #define __get_dynamic_array_len(field) \
208 ((__entry->__data_loc_##field >> 16) & 0xffff)
211 #define __get_str(field) (char *)__get_dynamic_array(field)
214 #define __get_bitmask(field) \
216 void *__bitmask = __get_dynamic_array(field); \
217 unsigned int __bitmask_size; \
218 __bitmask_size = __get_dynamic_array_len(field); \
219 ftrace_print_bitmask_seq(p, __bitmask, __bitmask_size); \
223 #define __print_flags(flag, delim, flag_array...) \
225 static const struct trace_print_flags __flags[] = \
226 { flag_array, { -1, NULL }}; \
227 ftrace_print_flags_seq(p, delim, flag, __flags); \
230 #undef __print_symbolic
231 #define __print_symbolic(value, symbol_array...) \
233 static const struct trace_print_flags symbols[] = \
234 { symbol_array, { -1, NULL }}; \
235 ftrace_print_symbols_seq(p, value, symbols); \
238 #undef __print_symbolic_u64
239 #if BITS_PER_LONG == 32
240 #define __print_symbolic_u64(value, symbol_array...) \
242 static const struct trace_print_flags_u64 symbols[] = \
243 { symbol_array, { -1, NULL } }; \
244 ftrace_print_symbols_seq_u64(p, value, symbols); \
247 #define __print_symbolic_u64(value, symbol_array...) \
248 __print_symbolic(value, symbol_array)
252 #define __print_hex(buf, buf_len) ftrace_print_hex_seq(p, buf, buf_len)
254 #undef DECLARE_EVENT_CLASS
255 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
256 static notrace enum print_line_t \
257 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
258 struct trace_event *trace_event) \
260 struct trace_seq *s = &iter->seq; \
261 struct trace_seq __maybe_unused *p = &iter->tmp_seq; \
262 struct ftrace_raw_##call *field; \
265 field = (typeof(field))iter->ent; \
267 ret = ftrace_raw_output_prep(iter, trace_event); \
271 ret = trace_seq_printf(s, print); \
273 return TRACE_TYPE_PARTIAL_LINE; \
275 return TRACE_TYPE_HANDLED; \
277 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
278 .trace = ftrace_raw_output_##call, \
281 #undef DEFINE_EVENT_PRINT
282 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
283 static notrace enum print_line_t \
284 ftrace_raw_output_##call(struct trace_iterator *iter, int flags, \
285 struct trace_event *event) \
287 struct ftrace_raw_##template *field; \
288 struct trace_entry *entry; \
289 struct trace_seq *p = &iter->tmp_seq; \
293 if (entry->type != event_##call.event.type) { \
295 return TRACE_TYPE_UNHANDLED; \
298 field = (typeof(field))entry; \
301 return ftrace_output_call(iter, #call, print); \
303 static struct trace_event_functions ftrace_event_type_funcs_##call = { \
304 .trace = ftrace_raw_output_##call, \
307 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
310 #define __field_ext(type, item, filter_type) \
311 ret = trace_define_field(event_call, #type, #item, \
312 offsetof(typeof(field), item), \
313 sizeof(field.item), \
314 is_signed_type(type), filter_type); \
319 #define __field(type, item) __field_ext(type, item, FILTER_OTHER)
322 #define __array(type, item, len) \
324 char *type_str = #type"["__stringify(len)"]"; \
325 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL); \
326 ret = trace_define_field(event_call, type_str, #item, \
327 offsetof(typeof(field), item), \
328 sizeof(field.item), \
329 is_signed_type(type), FILTER_OTHER); \
334 #undef __dynamic_array
335 #define __dynamic_array(type, item, len) \
336 ret = trace_define_field(event_call, "__data_loc " #type "[]", #item, \
337 offsetof(typeof(field), __data_loc_##item), \
338 sizeof(field.__data_loc_##item), \
339 is_signed_type(type), FILTER_OTHER);
342 #define __string(item, src) __dynamic_array(char, item, -1)
345 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
347 #undef DECLARE_EVENT_CLASS
348 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print) \
349 static int notrace __init \
350 ftrace_define_fields_##call(struct ftrace_event_call *event_call) \
352 struct ftrace_raw_##call field; \
361 #define DEFINE_EVENT(template, name, proto, args)
363 #undef DEFINE_EVENT_PRINT
364 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
365 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
367 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
370 * remember the offset of each array from the beginning of the event.
374 #define __entry entry
377 #define __field(type, item)
380 #define __field_ext(type, item, filter_type)
383 #define __array(type, item, len)
385 #undef __dynamic_array
386 #define __dynamic_array(type, item, len) \
387 __item_length = (len) * sizeof(type); \
388 __data_offsets->item = __data_size + \
389 offsetof(typeof(*entry), __data); \
390 __data_offsets->item |= __item_length << 16; \
391 __data_size += __item_length;
394 #define __string(item, src) __dynamic_array(char, item, \
395 strlen((src) ? (const char *)(src) : "(null)") + 1)
398 * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
399 * num_possible_cpus().
401 #define __bitmask_size_in_bytes_raw(nr_bits) \
402 (((nr_bits) + 7) / 8)
404 #define __bitmask_size_in_longs(nr_bits) \
405 ((__bitmask_size_in_bytes_raw(nr_bits) + \
406 ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
409 * __bitmask_size_in_bytes is the number of bytes needed to hold
410 * num_possible_cpus() padded out to the nearest long. This is what
411 * is saved in the buffer, just to be consistent.
413 #define __bitmask_size_in_bytes(nr_bits) \
414 (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
417 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, \
418 __bitmask_size_in_longs(nr_bits))
420 #undef DECLARE_EVENT_CLASS
421 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
422 static inline notrace int ftrace_get_offsets_##call( \
423 struct ftrace_data_offsets_##call *__data_offsets, proto) \
425 int __data_size = 0; \
426 int __maybe_unused __item_length; \
427 struct ftrace_raw_##call __maybe_unused *entry; \
431 return __data_size; \
435 #define DEFINE_EVENT(template, name, proto, args)
437 #undef DEFINE_EVENT_PRINT
438 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
439 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
441 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
444 * Stage 4 of the trace events.
446 * Override the macros in <trace/trace_events.h> to include the following:
448 * For those macros defined with TRACE_EVENT:
450 * static struct ftrace_event_call event_<call>;
452 * static void ftrace_raw_event_<call>(void *__data, proto)
454 * struct ftrace_event_file *ftrace_file = __data;
455 * struct ftrace_event_call *event_call = ftrace_file->event_call;
456 * struct ftrace_data_offsets_<call> __maybe_unused __data_offsets;
457 * unsigned long eflags = ftrace_file->flags;
458 * enum event_trigger_type __tt = ETT_NONE;
459 * struct ring_buffer_event *event;
460 * struct ftrace_raw_<call> *entry; <-- defined in stage 1
461 * struct ring_buffer *buffer;
462 * unsigned long irq_flags;
466 * if (!(eflags & FTRACE_EVENT_FL_TRIGGER_COND)) {
467 * if (eflags & FTRACE_EVENT_FL_TRIGGER_MODE)
468 * event_triggers_call(ftrace_file, NULL);
469 * if (eflags & FTRACE_EVENT_FL_SOFT_DISABLED)
473 * local_save_flags(irq_flags);
474 * pc = preempt_count();
476 * __data_size = ftrace_get_offsets_<call>(&__data_offsets, args);
478 * event = trace_event_buffer_lock_reserve(&buffer, ftrace_file,
479 * event_<call>->event.type,
480 * sizeof(*entry) + __data_size,
484 * entry = ring_buffer_event_data(event);
486 * { <assign>; } <-- Here we assign the entries by the __field and
489 * if (eflags & FTRACE_EVENT_FL_TRIGGER_COND)
490 * __tt = event_triggers_call(ftrace_file, entry);
492 * if (test_bit(FTRACE_EVENT_FL_SOFT_DISABLED_BIT,
493 * &ftrace_file->flags))
494 * ring_buffer_discard_commit(buffer, event);
495 * else if (!filter_check_discard(ftrace_file, entry, buffer, event))
496 * trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
499 * event_triggers_post_call(ftrace_file, __tt);
502 * static struct trace_event ftrace_event_type_<call> = {
503 * .trace = ftrace_raw_output_<call>, <-- stage 2
506 * static const char print_fmt_<call>[] = <TP_printk>;
508 * static struct ftrace_event_class __used event_class_<template> = {
509 * .system = "<system>",
510 * .define_fields = ftrace_define_fields_<call>,
511 * .fields = LIST_HEAD_INIT(event_class_##call.fields),
512 * .raw_init = trace_event_raw_init,
513 * .probe = ftrace_raw_event_##call,
514 * .reg = ftrace_event_reg,
517 * static struct ftrace_event_call event_<call> = {
518 * .class = event_class_<template>,
520 * .tp = &__tracepoint_<call>,
522 * .event = &ftrace_event_type_<call>,
523 * .print_fmt = print_fmt_<call>,
524 * .flags = TRACE_EVENT_FL_TRACEPOINT,
526 * // its only safe to use pointers when doing linker tricks to
527 * // create an array.
528 * static struct ftrace_event_call __used
529 * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
533 #ifdef CONFIG_PERF_EVENTS
535 #define _TRACE_PERF_PROTO(call, proto) \
536 static notrace void \
537 perf_trace_##call(void *__data, proto);
539 #define _TRACE_PERF_INIT(call) \
540 .perf_probe = perf_trace_##call,
543 #define _TRACE_PERF_PROTO(call, proto)
544 #define _TRACE_PERF_INIT(call)
545 #endif /* CONFIG_PERF_EVENTS */
548 #define __entry entry
551 #define __field(type, item)
554 #define __array(type, item, len)
556 #undef __dynamic_array
557 #define __dynamic_array(type, item, len) \
558 __entry->__data_loc_##item = __data_offsets.item;
561 #define __string(item, src) __dynamic_array(char, item, -1)
564 #define __assign_str(dst, src) \
565 strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
568 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
571 #define __get_bitmask(field) (char *)__get_dynamic_array(field)
573 #undef __assign_bitmask
574 #define __assign_bitmask(dst, src, nr_bits) \
575 memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
577 #undef TP_fast_assign
578 #define TP_fast_assign(args...) args
581 #define __perf_addr(a) (a)
584 #define __perf_count(c) (c)
587 #define __perf_task(t) (t)
589 #undef DECLARE_EVENT_CLASS
590 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
592 static notrace void \
593 ftrace_raw_event_##call(void *__data, proto) \
595 struct ftrace_event_file *ftrace_file = __data; \
596 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
597 struct ftrace_event_buffer fbuffer; \
598 struct ftrace_raw_##call *entry; \
601 if (ftrace_trigger_soft_disabled(ftrace_file)) \
604 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
606 entry = ftrace_event_buffer_reserve(&fbuffer, ftrace_file, \
607 sizeof(*entry) + __data_size); \
616 ftrace_event_buffer_commit(&fbuffer); \
619 * The ftrace_test_probe is compiled out, it is only here as a build time check
620 * to make sure that if the tracepoint handling changes, the ftrace probe will
621 * fail to compile unless it too is updated.
625 #define DEFINE_EVENT(template, call, proto, args) \
626 static inline void ftrace_test_probe_##call(void) \
628 check_trace_callback_type_##call(ftrace_raw_event_##template); \
631 #undef DEFINE_EVENT_PRINT
632 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
634 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
640 #undef __print_symbolic
642 #undef __get_dynamic_array
643 #undef __get_dynamic_array_len
648 #define TP_printk(fmt, args...) "\"" fmt "\", " __stringify(args)
650 #undef DECLARE_EVENT_CLASS
651 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
652 _TRACE_PERF_PROTO(call, PARAMS(proto)); \
653 static const char print_fmt_##call[] = print; \
654 static struct ftrace_event_class __used __refdata event_class_##call = { \
655 .system = __stringify(TRACE_SYSTEM), \
656 .define_fields = ftrace_define_fields_##call, \
657 .fields = LIST_HEAD_INIT(event_class_##call.fields),\
658 .raw_init = trace_event_raw_init, \
659 .probe = ftrace_raw_event_##call, \
660 .reg = ftrace_event_reg, \
661 _TRACE_PERF_INIT(call) \
665 #define DEFINE_EVENT(template, call, proto, args) \
667 static struct ftrace_event_call __used event_##call = { \
668 .class = &event_class_##template, \
670 .tp = &__tracepoint_##call, \
672 .event.funcs = &ftrace_event_type_funcs_##template, \
673 .print_fmt = print_fmt_##template, \
674 .flags = TRACE_EVENT_FL_TRACEPOINT, \
676 static struct ftrace_event_call __used \
677 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
679 #undef DEFINE_EVENT_PRINT
680 #define DEFINE_EVENT_PRINT(template, call, proto, args, print) \
682 static const char print_fmt_##call[] = print; \
684 static struct ftrace_event_call __used event_##call = { \
685 .class = &event_class_##template, \
687 .tp = &__tracepoint_##call, \
689 .event.funcs = &ftrace_event_type_funcs_##call, \
690 .print_fmt = print_fmt_##call, \
691 .flags = TRACE_EVENT_FL_TRACEPOINT, \
693 static struct ftrace_event_call __used \
694 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
696 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
699 #ifdef CONFIG_PERF_EVENTS
702 #define __entry entry
704 #undef __get_dynamic_array
705 #define __get_dynamic_array(field) \
706 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
708 #undef __get_dynamic_array_len
709 #define __get_dynamic_array_len(field) \
710 ((__entry->__data_loc_##field >> 16) & 0xffff)
713 #define __get_str(field) (char *)__get_dynamic_array(field)
716 #define __get_bitmask(field) (char *)__get_dynamic_array(field)
719 #define __perf_addr(a) (__addr = (a))
722 #define __perf_count(c) (__count = (c))
725 #define __perf_task(t) (__task = (t))
727 #undef DECLARE_EVENT_CLASS
728 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
729 static notrace void \
730 perf_trace_##call(void *__data, proto) \
732 struct ftrace_event_call *event_call = __data; \
733 struct ftrace_data_offsets_##call __maybe_unused __data_offsets;\
734 struct ftrace_raw_##call *entry; \
735 struct pt_regs __regs; \
736 u64 __addr = 0, __count = 1; \
737 struct task_struct *__task = NULL; \
738 struct hlist_head *head; \
743 __data_size = ftrace_get_offsets_##call(&__data_offsets, args); \
745 head = this_cpu_ptr(event_call->perf_events); \
746 if (__builtin_constant_p(!__task) && !__task && \
750 __entry_size = ALIGN(__data_size + sizeof(*entry) + sizeof(u32),\
752 __entry_size -= sizeof(u32); \
754 perf_fetch_caller_regs(&__regs); \
755 entry = perf_trace_buf_prepare(__entry_size, \
756 event_call->event.type, &__regs, &rctx); \
764 perf_trace_buf_submit(entry, __entry_size, rctx, __addr, \
765 __count, &__regs, head, __task); \
769 * This part is compiled out, it is only here as a build time check
770 * to make sure that if the tracepoint handling changes, the
771 * perf probe will fail to compile unless it too is updated.
774 #define DEFINE_EVENT(template, call, proto, args) \
775 static inline void perf_test_probe_##call(void) \
777 check_trace_callback_type_##call(perf_trace_##template); \
781 #undef DEFINE_EVENT_PRINT
782 #define DEFINE_EVENT_PRINT(template, name, proto, args, print) \
783 DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
785 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
786 #endif /* CONFIG_PERF_EVENTS */