x86/smpboot: Init apic mapping before usage
[cascardo/linux.git] / include / trace / trace_events.h
1 /*
2  * Stage 1 of the trace events.
3  *
4  * Override the macros in <trace/trace_events.h> to include the following:
5  *
6  * struct trace_event_raw_<call> {
7  *      struct trace_entry              ent;
8  *      <type>                          <item>;
9  *      <type2>                         <item2>[<len>];
10  *      [...]
11  * };
12  *
13  * The <type> <item> is created by the __field(type, item) macro or
14  * the __array(type2, item2, len) macro.
15  * We simply do "type item;", and that will create the fields
16  * in the structure.
17  */
18
19 #include <linux/trace_events.h>
20
21 #ifndef TRACE_SYSTEM_VAR
22 #define TRACE_SYSTEM_VAR TRACE_SYSTEM
23 #endif
24
25 #define __app__(x, y) str__##x##y
26 #define __app(x, y) __app__(x, y)
27
28 #define TRACE_SYSTEM_STRING __app(TRACE_SYSTEM_VAR,__trace_system_name)
29
30 #define TRACE_MAKE_SYSTEM_STR()                         \
31         static const char TRACE_SYSTEM_STRING[] =       \
32                 __stringify(TRACE_SYSTEM)
33
34 TRACE_MAKE_SYSTEM_STR();
35
36 #undef TRACE_DEFINE_ENUM
37 #define TRACE_DEFINE_ENUM(a)                            \
38         static struct trace_enum_map __used __initdata  \
39         __##TRACE_SYSTEM##_##a =                        \
40         {                                               \
41                 .system = TRACE_SYSTEM_STRING,          \
42                 .enum_string = #a,                      \
43                 .enum_value = a                         \
44         };                                              \
45         static struct trace_enum_map __used             \
46         __attribute__((section("_ftrace_enum_map")))    \
47         *TRACE_SYSTEM##_##a = &__##TRACE_SYSTEM##_##a
48
49 /*
50  * DECLARE_EVENT_CLASS can be used to add a generic function
51  * handlers for events. That is, if all events have the same
52  * parameters and just have distinct trace points.
53  * Each tracepoint can be defined with DEFINE_EVENT and that
54  * will map the DECLARE_EVENT_CLASS to the tracepoint.
55  *
56  * TRACE_EVENT is a one to one mapping between tracepoint and template.
57  */
58 #undef TRACE_EVENT
59 #define TRACE_EVENT(name, proto, args, tstruct, assign, print) \
60         DECLARE_EVENT_CLASS(name,                              \
61                              PARAMS(proto),                    \
62                              PARAMS(args),                     \
63                              PARAMS(tstruct),                  \
64                              PARAMS(assign),                   \
65                              PARAMS(print));                   \
66         DEFINE_EVENT(name, name, PARAMS(proto), PARAMS(args));
67
68
69 #undef __field
70 #define __field(type, item)             type    item;
71
72 #undef __field_ext
73 #define __field_ext(type, item, filter_type)    type    item;
74
75 #undef __field_struct
76 #define __field_struct(type, item)      type    item;
77
78 #undef __field_struct_ext
79 #define __field_struct_ext(type, item, filter_type)     type    item;
80
81 #undef __array
82 #define __array(type, item, len)        type    item[len];
83
84 #undef __dynamic_array
85 #define __dynamic_array(type, item, len) u32 __data_loc_##item;
86
87 #undef __string
88 #define __string(item, src) __dynamic_array(char, item, -1)
89
90 #undef __bitmask
91 #define __bitmask(item, nr_bits) __dynamic_array(char, item, -1)
92
93 #undef TP_STRUCT__entry
94 #define TP_STRUCT__entry(args...) args
95
96 #undef DECLARE_EVENT_CLASS
97 #define DECLARE_EVENT_CLASS(name, proto, args, tstruct, assign, print)  \
98         struct trace_event_raw_##name {                                 \
99                 struct trace_entry      ent;                            \
100                 tstruct                                                 \
101                 char                    __data[0];                      \
102         };                                                              \
103                                                                         \
104         static struct trace_event_class event_class_##name;
105
106 #undef DEFINE_EVENT
107 #define DEFINE_EVENT(template, name, proto, args)       \
108         static struct trace_event_call  __used          \
109         __attribute__((__aligned__(4))) event_##name
110
111 #undef DEFINE_EVENT_FN
112 #define DEFINE_EVENT_FN(template, name, proto, args, reg, unreg)        \
113         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
114
115 #undef DEFINE_EVENT_PRINT
116 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
117         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
118
119 /* Callbacks are meaningless to ftrace. */
120 #undef TRACE_EVENT_FN
121 #define TRACE_EVENT_FN(name, proto, args, tstruct,                      \
122                 assign, print, reg, unreg)                              \
123         TRACE_EVENT(name, PARAMS(proto), PARAMS(args),                  \
124                 PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
125
126 #undef TRACE_EVENT_FN_COND
127 #define TRACE_EVENT_FN_COND(name, proto, args, cond, tstruct,   \
128                 assign, print, reg, unreg)                              \
129         TRACE_EVENT_CONDITION(name, PARAMS(proto), PARAMS(args), PARAMS(cond),          \
130                 PARAMS(tstruct), PARAMS(assign), PARAMS(print))         \
131
132 #undef TRACE_EVENT_FLAGS
133 #define TRACE_EVENT_FLAGS(name, value)                                  \
134         __TRACE_EVENT_FLAGS(name, value)
135
136 #undef TRACE_EVENT_PERF_PERM
137 #define TRACE_EVENT_PERF_PERM(name, expr...)                            \
138         __TRACE_EVENT_PERF_PERM(name, expr)
139
140 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
141
142 /*
143  * Stage 2 of the trace events.
144  *
145  * Include the following:
146  *
147  * struct trace_event_data_offsets_<call> {
148  *      u32                             <item1>;
149  *      u32                             <item2>;
150  *      [...]
151  * };
152  *
153  * The __dynamic_array() macro will create each u32 <item>, this is
154  * to keep the offset of each array from the beginning of the event.
155  * The size of an array is also encoded, in the higher 16 bits of <item>.
156  */
157
158 #undef TRACE_DEFINE_ENUM
159 #define TRACE_DEFINE_ENUM(a)
160
161 #undef __field
162 #define __field(type, item)
163
164 #undef __field_ext
165 #define __field_ext(type, item, filter_type)
166
167 #undef __field_struct
168 #define __field_struct(type, item)
169
170 #undef __field_struct_ext
171 #define __field_struct_ext(type, item, filter_type)
172
173 #undef __array
174 #define __array(type, item, len)
175
176 #undef __dynamic_array
177 #define __dynamic_array(type, item, len)        u32 item;
178
179 #undef __string
180 #define __string(item, src) __dynamic_array(char, item, -1)
181
182 #undef __bitmask
183 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
184
185 #undef DECLARE_EVENT_CLASS
186 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
187         struct trace_event_data_offsets_##call {                        \
188                 tstruct;                                                \
189         };
190
191 #undef DEFINE_EVENT
192 #define DEFINE_EVENT(template, name, proto, args)
193
194 #undef DEFINE_EVENT_PRINT
195 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
196         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
197
198 #undef TRACE_EVENT_FLAGS
199 #define TRACE_EVENT_FLAGS(event, flag)
200
201 #undef TRACE_EVENT_PERF_PERM
202 #define TRACE_EVENT_PERF_PERM(event, expr...)
203
204 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
205
206 /*
207  * Stage 3 of the trace events.
208  *
209  * Override the macros in <trace/trace_events.h> to include the following:
210  *
211  * enum print_line_t
212  * trace_raw_output_<call>(struct trace_iterator *iter, int flags)
213  * {
214  *      struct trace_seq *s = &iter->seq;
215  *      struct trace_event_raw_<call> *field; <-- defined in stage 1
216  *      struct trace_entry *entry;
217  *      struct trace_seq *p = &iter->tmp_seq;
218  *      int ret;
219  *
220  *      entry = iter->ent;
221  *
222  *      if (entry->type != event_<call>->event.type) {
223  *              WARN_ON_ONCE(1);
224  *              return TRACE_TYPE_UNHANDLED;
225  *      }
226  *
227  *      field = (typeof(field))entry;
228  *
229  *      trace_seq_init(p);
230  *      ret = trace_seq_printf(s, "%s: ", <call>);
231  *      if (ret)
232  *              ret = trace_seq_printf(s, <TP_printk> "\n");
233  *      if (!ret)
234  *              return TRACE_TYPE_PARTIAL_LINE;
235  *
236  *      return TRACE_TYPE_HANDLED;
237  * }
238  *
239  * This is the method used to print the raw event to the trace
240  * output format. Note, this is not needed if the data is read
241  * in binary.
242  */
243
244 #undef __entry
245 #define __entry field
246
247 #undef TP_printk
248 #define TP_printk(fmt, args...) fmt "\n", args
249
250 #undef __get_dynamic_array
251 #define __get_dynamic_array(field)      \
252                 ((void *)__entry + (__entry->__data_loc_##field & 0xffff))
253
254 #undef __get_dynamic_array_len
255 #define __get_dynamic_array_len(field)  \
256                 ((__entry->__data_loc_##field >> 16) & 0xffff)
257
258 #undef __get_str
259 #define __get_str(field) ((char *)__get_dynamic_array(field))
260
261 #undef __get_bitmask
262 #define __get_bitmask(field)                                            \
263         ({                                                              \
264                 void *__bitmask = __get_dynamic_array(field);           \
265                 unsigned int __bitmask_size;                            \
266                 __bitmask_size = __get_dynamic_array_len(field);        \
267                 trace_print_bitmask_seq(p, __bitmask, __bitmask_size);  \
268         })
269
270 #undef __print_flags
271 #define __print_flags(flag, delim, flag_array...)                       \
272         ({                                                              \
273                 static const struct trace_print_flags __flags[] =       \
274                         { flag_array, { -1, NULL }};                    \
275                 trace_print_flags_seq(p, delim, flag, __flags); \
276         })
277
278 #undef __print_symbolic
279 #define __print_symbolic(value, symbol_array...)                        \
280         ({                                                              \
281                 static const struct trace_print_flags symbols[] =       \
282                         { symbol_array, { -1, NULL }};                  \
283                 trace_print_symbols_seq(p, value, symbols);             \
284         })
285
286 #undef __print_symbolic_u64
287 #if BITS_PER_LONG == 32
288 #define __print_symbolic_u64(value, symbol_array...)                    \
289         ({                                                              \
290                 static const struct trace_print_flags_u64 symbols[] =   \
291                         { symbol_array, { -1, NULL } };                 \
292                 trace_print_symbols_seq_u64(p, value, symbols); \
293         })
294 #else
295 #define __print_symbolic_u64(value, symbol_array...)                    \
296                         __print_symbolic(value, symbol_array)
297 #endif
298
299 #undef __print_hex
300 #define __print_hex(buf, buf_len) trace_print_hex_seq(p, buf, buf_len)
301
302 #undef __print_array
303 #define __print_array(array, count, el_size)                            \
304         ({                                                              \
305                 BUILD_BUG_ON(el_size != 1 && el_size != 2 &&            \
306                              el_size != 4 && el_size != 8);             \
307                 trace_print_array_seq(p, array, count, el_size);        \
308         })
309
310 #undef DECLARE_EVENT_CLASS
311 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
312 static notrace enum print_line_t                                        \
313 trace_raw_output_##call(struct trace_iterator *iter, int flags,         \
314                         struct trace_event *trace_event)                \
315 {                                                                       \
316         struct trace_seq *s = &iter->seq;                               \
317         struct trace_seq __maybe_unused *p = &iter->tmp_seq;            \
318         struct trace_event_raw_##call *field;                           \
319         int ret;                                                        \
320                                                                         \
321         field = (typeof(field))iter->ent;                               \
322                                                                         \
323         ret = trace_raw_output_prep(iter, trace_event);                 \
324         if (ret != TRACE_TYPE_HANDLED)                                  \
325                 return ret;                                             \
326                                                                         \
327         trace_seq_printf(s, print);                                     \
328                                                                         \
329         return trace_handle_return(s);                                  \
330 }                                                                       \
331 static struct trace_event_functions trace_event_type_funcs_##call = {   \
332         .trace                  = trace_raw_output_##call,              \
333 };
334
335 #undef DEFINE_EVENT_PRINT
336 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
337 static notrace enum print_line_t                                        \
338 trace_raw_output_##call(struct trace_iterator *iter, int flags,         \
339                          struct trace_event *event)                     \
340 {                                                                       \
341         struct trace_event_raw_##template *field;                       \
342         struct trace_entry *entry;                                      \
343         struct trace_seq *p = &iter->tmp_seq;                           \
344                                                                         \
345         entry = iter->ent;                                              \
346                                                                         \
347         if (entry->type != event_##call.event.type) {                   \
348                 WARN_ON_ONCE(1);                                        \
349                 return TRACE_TYPE_UNHANDLED;                            \
350         }                                                               \
351                                                                         \
352         field = (typeof(field))entry;                                   \
353                                                                         \
354         trace_seq_init(p);                                              \
355         return trace_output_call(iter, #call, print);                   \
356 }                                                                       \
357 static struct trace_event_functions trace_event_type_funcs_##call = {   \
358         .trace                  = trace_raw_output_##call,              \
359 };
360
361 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
362
363 #undef __field_ext
364 #define __field_ext(type, item, filter_type)                            \
365         ret = trace_define_field(event_call, #type, #item,              \
366                                  offsetof(typeof(field), item),         \
367                                  sizeof(field.item),                    \
368                                  is_signed_type(type), filter_type);    \
369         if (ret)                                                        \
370                 return ret;
371
372 #undef __field_struct_ext
373 #define __field_struct_ext(type, item, filter_type)                     \
374         ret = trace_define_field(event_call, #type, #item,              \
375                                  offsetof(typeof(field), item),         \
376                                  sizeof(field.item),                    \
377                                  0, filter_type);                       \
378         if (ret)                                                        \
379                 return ret;
380
381 #undef __field
382 #define __field(type, item)     __field_ext(type, item, FILTER_OTHER)
383
384 #undef __field_struct
385 #define __field_struct(type, item) __field_struct_ext(type, item, FILTER_OTHER)
386
387 #undef __array
388 #define __array(type, item, len)                                        \
389         do {                                                            \
390                 char *type_str = #type"["__stringify(len)"]";           \
391                 BUILD_BUG_ON(len > MAX_FILTER_STR_VAL);                 \
392                 ret = trace_define_field(event_call, type_str, #item,   \
393                                  offsetof(typeof(field), item),         \
394                                  sizeof(field.item),                    \
395                                  is_signed_type(type), FILTER_OTHER);   \
396                 if (ret)                                                \
397                         return ret;                                     \
398         } while (0);
399
400 #undef __dynamic_array
401 #define __dynamic_array(type, item, len)                                       \
402         ret = trace_define_field(event_call, "__data_loc " #type "[]", #item,  \
403                                  offsetof(typeof(field), __data_loc_##item),   \
404                                  sizeof(field.__data_loc_##item),              \
405                                  is_signed_type(type), FILTER_OTHER);
406
407 #undef __string
408 #define __string(item, src) __dynamic_array(char, item, -1)
409
410 #undef __bitmask
411 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
412
413 #undef DECLARE_EVENT_CLASS
414 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, func, print)    \
415 static int notrace __init                                               \
416 trace_event_define_fields_##call(struct trace_event_call *event_call)   \
417 {                                                                       \
418         struct trace_event_raw_##call field;                            \
419         int ret;                                                        \
420                                                                         \
421         tstruct;                                                        \
422                                                                         \
423         return ret;                                                     \
424 }
425
426 #undef DEFINE_EVENT
427 #define DEFINE_EVENT(template, name, proto, args)
428
429 #undef DEFINE_EVENT_PRINT
430 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
431         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
432
433 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
434
435 /*
436  * remember the offset of each array from the beginning of the event.
437  */
438
439 #undef __entry
440 #define __entry entry
441
442 #undef __field
443 #define __field(type, item)
444
445 #undef __field_ext
446 #define __field_ext(type, item, filter_type)
447
448 #undef __field_struct
449 #define __field_struct(type, item)
450
451 #undef __field_struct_ext
452 #define __field_struct_ext(type, item, filter_type)
453
454 #undef __array
455 #define __array(type, item, len)
456
457 #undef __dynamic_array
458 #define __dynamic_array(type, item, len)                                \
459         __item_length = (len) * sizeof(type);                           \
460         __data_offsets->item = __data_size +                            \
461                                offsetof(typeof(*entry), __data);        \
462         __data_offsets->item |= __item_length << 16;                    \
463         __data_size += __item_length;
464
465 #undef __string
466 #define __string(item, src) __dynamic_array(char, item,                 \
467                     strlen((src) ? (const char *)(src) : "(null)") + 1)
468
469 /*
470  * __bitmask_size_in_bytes_raw is the number of bytes needed to hold
471  * num_possible_cpus().
472  */
473 #define __bitmask_size_in_bytes_raw(nr_bits)    \
474         (((nr_bits) + 7) / 8)
475
476 #define __bitmask_size_in_longs(nr_bits)                        \
477         ((__bitmask_size_in_bytes_raw(nr_bits) +                \
478           ((BITS_PER_LONG / 8) - 1)) / (BITS_PER_LONG / 8))
479
480 /*
481  * __bitmask_size_in_bytes is the number of bytes needed to hold
482  * num_possible_cpus() padded out to the nearest long. This is what
483  * is saved in the buffer, just to be consistent.
484  */
485 #define __bitmask_size_in_bytes(nr_bits)                                \
486         (__bitmask_size_in_longs(nr_bits) * (BITS_PER_LONG / 8))
487
488 #undef __bitmask
489 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item,   \
490                                          __bitmask_size_in_longs(nr_bits))
491
492 #undef DECLARE_EVENT_CLASS
493 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
494 static inline notrace int trace_event_get_offsets_##call(               \
495         struct trace_event_data_offsets_##call *__data_offsets, proto)  \
496 {                                                                       \
497         int __data_size = 0;                                            \
498         int __maybe_unused __item_length;                               \
499         struct trace_event_raw_##call __maybe_unused *entry;            \
500                                                                         \
501         tstruct;                                                        \
502                                                                         \
503         return __data_size;                                             \
504 }
505
506 #undef DEFINE_EVENT
507 #define DEFINE_EVENT(template, name, proto, args)
508
509 #undef DEFINE_EVENT_PRINT
510 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)  \
511         DEFINE_EVENT(template, name, PARAMS(proto), PARAMS(args))
512
513 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
514
515 /*
516  * Stage 4 of the trace events.
517  *
518  * Override the macros in <trace/trace_events.h> to include the following:
519  *
520  * For those macros defined with TRACE_EVENT:
521  *
522  * static struct trace_event_call event_<call>;
523  *
524  * static void trace_event_raw_event_<call>(void *__data, proto)
525  * {
526  *      struct trace_event_file *trace_file = __data;
527  *      struct trace_event_call *event_call = trace_file->event_call;
528  *      struct trace_event_data_offsets_<call> __maybe_unused __data_offsets;
529  *      unsigned long eflags = trace_file->flags;
530  *      enum event_trigger_type __tt = ETT_NONE;
531  *      struct ring_buffer_event *event;
532  *      struct trace_event_raw_<call> *entry; <-- defined in stage 1
533  *      struct ring_buffer *buffer;
534  *      unsigned long irq_flags;
535  *      int __data_size;
536  *      int pc;
537  *
538  *      if (!(eflags & EVENT_FILE_FL_TRIGGER_COND)) {
539  *              if (eflags & EVENT_FILE_FL_TRIGGER_MODE)
540  *                      event_triggers_call(trace_file, NULL);
541  *              if (eflags & EVENT_FILE_FL_SOFT_DISABLED)
542  *                      return;
543  *      }
544  *
545  *      local_save_flags(irq_flags);
546  *      pc = preempt_count();
547  *
548  *      __data_size = trace_event_get_offsets_<call>(&__data_offsets, args);
549  *
550  *      event = trace_event_buffer_lock_reserve(&buffer, trace_file,
551  *                                event_<call>->event.type,
552  *                                sizeof(*entry) + __data_size,
553  *                                irq_flags, pc);
554  *      if (!event)
555  *              return;
556  *      entry   = ring_buffer_event_data(event);
557  *
558  *      { <assign>; }  <-- Here we assign the entries by the __field and
559  *                         __array macros.
560  *
561  *      if (eflags & EVENT_FILE_FL_TRIGGER_COND)
562  *              __tt = event_triggers_call(trace_file, entry);
563  *
564  *      if (test_bit(EVENT_FILE_FL_SOFT_DISABLED_BIT,
565  *                   &trace_file->flags))
566  *              ring_buffer_discard_commit(buffer, event);
567  *      else if (!filter_check_discard(trace_file, entry, buffer, event))
568  *              trace_buffer_unlock_commit(buffer, event, irq_flags, pc);
569  *
570  *      if (__tt)
571  *              event_triggers_post_call(trace_file, __tt);
572  * }
573  *
574  * static struct trace_event ftrace_event_type_<call> = {
575  *      .trace                  = trace_raw_output_<call>, <-- stage 2
576  * };
577  *
578  * static char print_fmt_<call>[] = <TP_printk>;
579  *
580  * static struct trace_event_class __used event_class_<template> = {
581  *      .system                 = "<system>",
582  *      .define_fields          = trace_event_define_fields_<call>,
583  *      .fields                 = LIST_HEAD_INIT(event_class_##call.fields),
584  *      .raw_init               = trace_event_raw_init,
585  *      .probe                  = trace_event_raw_event_##call,
586  *      .reg                    = trace_event_reg,
587  * };
588  *
589  * static struct trace_event_call event_<call> = {
590  *      .class                  = event_class_<template>,
591  *      {
592  *              .tp                     = &__tracepoint_<call>,
593  *      },
594  *      .event                  = &ftrace_event_type_<call>,
595  *      .print_fmt              = print_fmt_<call>,
596  *      .flags                  = TRACE_EVENT_FL_TRACEPOINT,
597  * };
598  * // its only safe to use pointers when doing linker tricks to
599  * // create an array.
600  * static struct trace_event_call __used
601  * __attribute__((section("_ftrace_events"))) *__event_<call> = &event_<call>;
602  *
603  */
604
605 #ifdef CONFIG_PERF_EVENTS
606
607 #define _TRACE_PERF_PROTO(call, proto)                                  \
608         static notrace void                                             \
609         perf_trace_##call(void *__data, proto);
610
611 #define _TRACE_PERF_INIT(call)                                          \
612         .perf_probe             = perf_trace_##call,
613
614 #else
615 #define _TRACE_PERF_PROTO(call, proto)
616 #define _TRACE_PERF_INIT(call)
617 #endif /* CONFIG_PERF_EVENTS */
618
619 #undef __entry
620 #define __entry entry
621
622 #undef __field
623 #define __field(type, item)
624
625 #undef __field_struct
626 #define __field_struct(type, item)
627
628 #undef __array
629 #define __array(type, item, len)
630
631 #undef __dynamic_array
632 #define __dynamic_array(type, item, len)                                \
633         __entry->__data_loc_##item = __data_offsets.item;
634
635 #undef __string
636 #define __string(item, src) __dynamic_array(char, item, -1)
637
638 #undef __assign_str
639 #define __assign_str(dst, src)                                          \
640         strcpy(__get_str(dst), (src) ? (const char *)(src) : "(null)");
641
642 #undef __bitmask
643 #define __bitmask(item, nr_bits) __dynamic_array(unsigned long, item, -1)
644
645 #undef __get_bitmask
646 #define __get_bitmask(field) (char *)__get_dynamic_array(field)
647
648 #undef __assign_bitmask
649 #define __assign_bitmask(dst, src, nr_bits)                                     \
650         memcpy(__get_bitmask(dst), (src), __bitmask_size_in_bytes(nr_bits))
651
652 #undef TP_fast_assign
653 #define TP_fast_assign(args...) args
654
655 #undef __perf_count
656 #define __perf_count(c) (c)
657
658 #undef __perf_task
659 #define __perf_task(t)  (t)
660
661 #undef DECLARE_EVENT_CLASS
662 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
663                                                                         \
664 static notrace void                                                     \
665 trace_event_raw_event_##call(void *__data, proto)                       \
666 {                                                                       \
667         struct trace_event_file *trace_file = __data;                   \
668         struct trace_event_data_offsets_##call __maybe_unused __data_offsets;\
669         struct trace_event_buffer fbuffer;                              \
670         struct trace_event_raw_##call *entry;                           \
671         int __data_size;                                                \
672                                                                         \
673         if (trace_trigger_soft_disabled(trace_file))                    \
674                 return;                                                 \
675                                                                         \
676         __data_size = trace_event_get_offsets_##call(&__data_offsets, args); \
677                                                                         \
678         entry = trace_event_buffer_reserve(&fbuffer, trace_file,        \
679                                  sizeof(*entry) + __data_size);         \
680                                                                         \
681         if (!entry)                                                     \
682                 return;                                                 \
683                                                                         \
684         tstruct                                                         \
685                                                                         \
686         { assign; }                                                     \
687                                                                         \
688         trace_event_buffer_commit(&fbuffer);                            \
689 }
690 /*
691  * The ftrace_test_probe is compiled out, it is only here as a build time check
692  * to make sure that if the tracepoint handling changes, the ftrace probe will
693  * fail to compile unless it too is updated.
694  */
695
696 #undef DEFINE_EVENT
697 #define DEFINE_EVENT(template, call, proto, args)                       \
698 static inline void ftrace_test_probe_##call(void)                       \
699 {                                                                       \
700         check_trace_callback_type_##call(trace_event_raw_event_##template); \
701 }
702
703 #undef DEFINE_EVENT_PRINT
704 #define DEFINE_EVENT_PRINT(template, name, proto, args, print)
705
706 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)
707
708 #undef __entry
709 #define __entry REC
710
711 #undef __print_flags
712 #undef __print_symbolic
713 #undef __print_hex
714 #undef __get_dynamic_array
715 #undef __get_dynamic_array_len
716 #undef __get_str
717 #undef __get_bitmask
718 #undef __print_array
719
720 #undef TP_printk
721 #define TP_printk(fmt, args...) "\"" fmt "\", "  __stringify(args)
722
723 #undef DECLARE_EVENT_CLASS
724 #define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print)  \
725 _TRACE_PERF_PROTO(call, PARAMS(proto));                                 \
726 static char print_fmt_##call[] = print;                                 \
727 static struct trace_event_class __used __refdata event_class_##call = { \
728         .system                 = TRACE_SYSTEM_STRING,                  \
729         .define_fields          = trace_event_define_fields_##call,     \
730         .fields                 = LIST_HEAD_INIT(event_class_##call.fields),\
731         .raw_init               = trace_event_raw_init,                 \
732         .probe                  = trace_event_raw_event_##call,         \
733         .reg                    = trace_event_reg,                      \
734         _TRACE_PERF_INIT(call)                                          \
735 };
736
737 #undef DEFINE_EVENT
738 #define DEFINE_EVENT(template, call, proto, args)                       \
739                                                                         \
740 static struct trace_event_call __used event_##call = {                  \
741         .class                  = &event_class_##template,              \
742         {                                                               \
743                 .tp                     = &__tracepoint_##call,         \
744         },                                                              \
745         .event.funcs            = &trace_event_type_funcs_##template,   \
746         .print_fmt              = print_fmt_##template,                 \
747         .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
748 };                                                                      \
749 static struct trace_event_call __used                                   \
750 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
751
752 #undef DEFINE_EVENT_PRINT
753 #define DEFINE_EVENT_PRINT(template, call, proto, args, print)          \
754                                                                         \
755 static char print_fmt_##call[] = print;                                 \
756                                                                         \
757 static struct trace_event_call __used event_##call = {                  \
758         .class                  = &event_class_##template,              \
759         {                                                               \
760                 .tp                     = &__tracepoint_##call,         \
761         },                                                              \
762         .event.funcs            = &trace_event_type_funcs_##call,       \
763         .print_fmt              = print_fmt_##call,                     \
764         .flags                  = TRACE_EVENT_FL_TRACEPOINT,            \
765 };                                                                      \
766 static struct trace_event_call __used                                   \
767 __attribute__((section("_ftrace_events"))) *__event_##call = &event_##call
768
769 #include TRACE_INCLUDE(TRACE_INCLUDE_FILE)