2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
23 #include "tracing_map.h"
28 typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
31 struct ftrace_event_field *field;
38 static u64 hist_field_none(struct hist_field *field, void *event)
43 static u64 hist_field_counter(struct hist_field *field, void *event)
48 static u64 hist_field_string(struct hist_field *hist_field, void *event)
50 char *addr = (char *)(event + hist_field->field->offset);
52 return (u64)(unsigned long)addr;
55 static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
57 u32 str_item = *(u32 *)(event + hist_field->field->offset);
58 int str_loc = str_item & 0xffff;
59 char *addr = (char *)(event + str_loc);
61 return (u64)(unsigned long)addr;
64 static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
66 char **addr = (char **)(event + hist_field->field->offset);
68 return (u64)(unsigned long)*addr;
71 #define DEFINE_HIST_FIELD_FN(type) \
72 static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
74 type *addr = (type *)(event + hist_field->field->offset); \
76 return (u64)(unsigned long)*addr; \
79 DEFINE_HIST_FIELD_FN(s64);
80 DEFINE_HIST_FIELD_FN(u64);
81 DEFINE_HIST_FIELD_FN(s32);
82 DEFINE_HIST_FIELD_FN(u32);
83 DEFINE_HIST_FIELD_FN(s16);
84 DEFINE_HIST_FIELD_FN(u16);
85 DEFINE_HIST_FIELD_FN(s8);
86 DEFINE_HIST_FIELD_FN(u8);
88 #define for_each_hist_field(i, hist_data) \
89 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
91 #define for_each_hist_val_field(i, hist_data) \
92 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
94 #define for_each_hist_key_field(i, hist_data) \
95 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
97 #define HIST_STACKTRACE_DEPTH 16
98 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
99 #define HIST_STACKTRACE_SKIP 5
101 #define HITCOUNT_IDX 0
102 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
104 enum hist_field_flags {
105 HIST_FIELD_FL_HITCOUNT = 1,
106 HIST_FIELD_FL_KEY = 2,
107 HIST_FIELD_FL_STRING = 4,
108 HIST_FIELD_FL_HEX = 8,
109 HIST_FIELD_FL_SYM = 16,
110 HIST_FIELD_FL_SYM_OFFSET = 32,
111 HIST_FIELD_FL_EXECNAME = 64,
112 HIST_FIELD_FL_SYSCALL = 128,
113 HIST_FIELD_FL_STACKTRACE = 256,
116 struct hist_trigger_attrs {
124 unsigned int map_bits;
127 struct hist_trigger_data {
128 struct hist_field *fields[TRACING_MAP_FIELDS_MAX];
131 unsigned int n_fields;
132 unsigned int key_size;
133 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
134 unsigned int n_sort_keys;
135 struct trace_event_file *event_file;
136 struct hist_trigger_attrs *attrs;
137 struct tracing_map *map;
140 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
142 hist_field_fn_t fn = NULL;
144 switch (field_size) {
174 static int parse_map_size(char *str)
176 unsigned long size, map_bits;
185 ret = kstrtoul(str, 0, &size);
189 map_bits = ilog2(roundup_pow_of_two(size));
190 if (map_bits < TRACING_MAP_BITS_MIN ||
191 map_bits > TRACING_MAP_BITS_MAX)
199 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
205 kfree(attrs->sort_key_str);
206 kfree(attrs->keys_str);
207 kfree(attrs->vals_str);
211 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
213 struct hist_trigger_attrs *attrs;
216 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
218 return ERR_PTR(-ENOMEM);
220 while (trigger_str) {
221 char *str = strsep(&trigger_str, ":");
223 if ((strncmp(str, "key=", strlen("key=")) == 0) ||
224 (strncmp(str, "keys=", strlen("keys=")) == 0))
225 attrs->keys_str = kstrdup(str, GFP_KERNEL);
226 else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
227 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
228 (strncmp(str, "values=", strlen("values=")) == 0))
229 attrs->vals_str = kstrdup(str, GFP_KERNEL);
230 else if (strncmp(str, "sort=", strlen("sort=")) == 0)
231 attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
232 else if (strncmp(str, "name=", strlen("name=")) == 0)
233 attrs->name = kstrdup(str, GFP_KERNEL);
234 else if (strcmp(str, "pause") == 0)
236 else if ((strcmp(str, "cont") == 0) ||
237 (strcmp(str, "continue") == 0))
239 else if (strcmp(str, "clear") == 0)
241 else if (strncmp(str, "size=", strlen("size=")) == 0) {
242 int map_bits = parse_map_size(str);
248 attrs->map_bits = map_bits;
255 if (!attrs->keys_str) {
262 destroy_hist_trigger_attrs(attrs);
267 static inline void save_comm(char *comm, struct task_struct *task)
270 strcpy(comm, "<idle>");
274 if (WARN_ON_ONCE(task->pid < 0)) {
275 strcpy(comm, "<XXX>");
279 memcpy(comm, task->comm, TASK_COMM_LEN);
282 static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
284 kfree((char *)elt->private_data);
287 static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
289 struct hist_trigger_data *hist_data = elt->map->private_data;
290 struct hist_field *key_field;
293 for_each_hist_key_field(i, hist_data) {
294 key_field = hist_data->fields[i];
296 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
297 unsigned int size = TASK_COMM_LEN + 1;
299 elt->private_data = kzalloc(size, GFP_KERNEL);
300 if (!elt->private_data)
309 static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
310 struct tracing_map_elt *from)
312 char *comm_from = from->private_data;
313 char *comm_to = to->private_data;
316 memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
319 static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
321 char *comm = elt->private_data;
324 save_comm(comm, current);
327 static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
328 .elt_alloc = hist_trigger_elt_comm_alloc,
329 .elt_copy = hist_trigger_elt_comm_copy,
330 .elt_free = hist_trigger_elt_comm_free,
331 .elt_init = hist_trigger_elt_comm_init,
334 static void destroy_hist_field(struct hist_field *hist_field)
339 static struct hist_field *create_hist_field(struct ftrace_event_field *field,
342 struct hist_field *hist_field;
344 if (field && is_function_field(field))
347 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
351 if (flags & HIST_FIELD_FL_HITCOUNT) {
352 hist_field->fn = hist_field_counter;
356 if (flags & HIST_FIELD_FL_STACKTRACE) {
357 hist_field->fn = hist_field_none;
361 if (is_string_field(field)) {
362 flags |= HIST_FIELD_FL_STRING;
364 if (field->filter_type == FILTER_STATIC_STRING)
365 hist_field->fn = hist_field_string;
366 else if (field->filter_type == FILTER_DYN_STRING)
367 hist_field->fn = hist_field_dynstring;
369 hist_field->fn = hist_field_pstring;
371 hist_field->fn = select_value_fn(field->size,
373 if (!hist_field->fn) {
374 destroy_hist_field(hist_field);
379 hist_field->field = field;
380 hist_field->flags = flags;
385 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
389 for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
390 if (hist_data->fields[i]) {
391 destroy_hist_field(hist_data->fields[i]);
392 hist_data->fields[i] = NULL;
397 static int create_hitcount_val(struct hist_trigger_data *hist_data)
399 hist_data->fields[HITCOUNT_IDX] =
400 create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
401 if (!hist_data->fields[HITCOUNT_IDX])
406 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
412 static int create_val_field(struct hist_trigger_data *hist_data,
413 unsigned int val_idx,
414 struct trace_event_file *file,
417 struct ftrace_event_field *field = NULL;
418 unsigned long flags = 0;
422 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
425 field_name = strsep(&field_str, ".");
427 if (strcmp(field_str, "hex") == 0)
428 flags |= HIST_FIELD_FL_HEX;
435 field = trace_find_event_field(file->event_call, field_name);
441 hist_data->fields[val_idx] = create_hist_field(field, flags);
442 if (!hist_data->fields[val_idx]) {
449 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
455 static int create_val_fields(struct hist_trigger_data *hist_data,
456 struct trace_event_file *file)
458 char *fields_str, *field_str;
462 ret = create_hitcount_val(hist_data);
466 fields_str = hist_data->attrs->vals_str;
470 strsep(&fields_str, "=");
474 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
475 j < TRACING_MAP_VALS_MAX; i++) {
476 field_str = strsep(&fields_str, ",");
479 if (strcmp(field_str, "hitcount") == 0)
481 ret = create_val_field(hist_data, j++, file, field_str);
485 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
491 static int create_key_field(struct hist_trigger_data *hist_data,
492 unsigned int key_idx,
493 unsigned int key_offset,
494 struct trace_event_file *file,
497 struct ftrace_event_field *field = NULL;
498 unsigned long flags = 0;
499 unsigned int key_size;
502 if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
505 flags |= HIST_FIELD_FL_KEY;
507 if (strcmp(field_str, "stacktrace") == 0) {
508 flags |= HIST_FIELD_FL_STACKTRACE;
509 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
511 char *field_name = strsep(&field_str, ".");
514 if (strcmp(field_str, "hex") == 0)
515 flags |= HIST_FIELD_FL_HEX;
516 else if (strcmp(field_str, "sym") == 0)
517 flags |= HIST_FIELD_FL_SYM;
518 else if (strcmp(field_str, "sym-offset") == 0)
519 flags |= HIST_FIELD_FL_SYM_OFFSET;
520 else if ((strcmp(field_str, "execname") == 0) &&
521 (strcmp(field_name, "common_pid") == 0))
522 flags |= HIST_FIELD_FL_EXECNAME;
523 else if (strcmp(field_str, "syscall") == 0)
524 flags |= HIST_FIELD_FL_SYSCALL;
531 field = trace_find_event_field(file->event_call, field_name);
537 if (is_string_field(field))
538 key_size = MAX_FILTER_STR_VAL;
540 key_size = field->size;
543 hist_data->fields[key_idx] = create_hist_field(field, flags);
544 if (!hist_data->fields[key_idx]) {
549 key_size = ALIGN(key_size, sizeof(u64));
550 hist_data->fields[key_idx]->size = key_size;
551 hist_data->fields[key_idx]->offset = key_offset;
552 hist_data->key_size += key_size;
553 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
560 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
568 static int create_key_fields(struct hist_trigger_data *hist_data,
569 struct trace_event_file *file)
571 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
572 char *fields_str, *field_str;
575 fields_str = hist_data->attrs->keys_str;
579 strsep(&fields_str, "=");
583 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
584 field_str = strsep(&fields_str, ",");
587 ret = create_key_field(hist_data, i, key_offset,
602 static int create_hist_fields(struct hist_trigger_data *hist_data,
603 struct trace_event_file *file)
607 ret = create_val_fields(hist_data, file);
611 ret = create_key_fields(hist_data, file);
615 hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
620 static int is_descending(const char *str)
625 if (strcmp(str, "descending") == 0)
628 if (strcmp(str, "ascending") == 0)
634 static int create_sort_keys(struct hist_trigger_data *hist_data)
636 char *fields_str = hist_data->attrs->sort_key_str;
637 struct ftrace_event_field *field = NULL;
638 struct tracing_map_sort_key *sort_key;
639 int descending, ret = 0;
642 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
647 strsep(&fields_str, "=");
653 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
654 char *field_str, *field_name;
656 sort_key = &hist_data->sort_keys[i];
658 field_str = strsep(&fields_str, ",");
665 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
670 field_name = strsep(&field_str, ".");
676 if (strcmp(field_name, "hitcount") == 0) {
677 descending = is_descending(field_str);
678 if (descending < 0) {
682 sort_key->descending = descending;
686 for (j = 1; j < hist_data->n_fields; j++) {
687 field = hist_data->fields[j]->field;
688 if (field && (strcmp(field_name, field->name) == 0)) {
689 sort_key->field_idx = j;
690 descending = is_descending(field_str);
691 if (descending < 0) {
695 sort_key->descending = descending;
699 if (j == hist_data->n_fields) {
704 hist_data->n_sort_keys = i;
709 static void destroy_hist_data(struct hist_trigger_data *hist_data)
711 destroy_hist_trigger_attrs(hist_data->attrs);
712 destroy_hist_fields(hist_data);
713 tracing_map_destroy(hist_data->map);
717 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
719 struct tracing_map *map = hist_data->map;
720 struct ftrace_event_field *field;
721 struct hist_field *hist_field;
724 for_each_hist_field(i, hist_data) {
725 hist_field = hist_data->fields[i];
726 if (hist_field->flags & HIST_FIELD_FL_KEY) {
727 tracing_map_cmp_fn_t cmp_fn;
729 field = hist_field->field;
731 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
732 cmp_fn = tracing_map_cmp_none;
733 else if (is_string_field(field))
734 cmp_fn = tracing_map_cmp_string;
736 cmp_fn = tracing_map_cmp_num(field->size,
738 idx = tracing_map_add_key_field(map,
743 idx = tracing_map_add_sum_field(map);
752 static bool need_tracing_map_ops(struct hist_trigger_data *hist_data)
754 struct hist_field *key_field;
757 for_each_hist_key_field(i, hist_data) {
758 key_field = hist_data->fields[i];
760 if (key_field->flags & HIST_FIELD_FL_EXECNAME)
767 static struct hist_trigger_data *
768 create_hist_data(unsigned int map_bits,
769 struct hist_trigger_attrs *attrs,
770 struct trace_event_file *file)
772 const struct tracing_map_ops *map_ops = NULL;
773 struct hist_trigger_data *hist_data;
776 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
778 return ERR_PTR(-ENOMEM);
780 hist_data->attrs = attrs;
782 ret = create_hist_fields(hist_data, file);
786 ret = create_sort_keys(hist_data);
790 if (need_tracing_map_ops(hist_data))
791 map_ops = &hist_trigger_elt_comm_ops;
793 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
795 if (IS_ERR(hist_data->map)) {
796 ret = PTR_ERR(hist_data->map);
797 hist_data->map = NULL;
801 ret = create_tracing_map_fields(hist_data);
805 ret = tracing_map_init(hist_data->map);
809 hist_data->event_file = file;
813 hist_data->attrs = NULL;
815 destroy_hist_data(hist_data);
817 hist_data = ERR_PTR(ret);
822 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
823 struct tracing_map_elt *elt,
826 struct hist_field *hist_field;
830 for_each_hist_val_field(i, hist_data) {
831 hist_field = hist_data->fields[i];
832 hist_val = hist_field->fn(hist_field, rec);
833 tracing_map_update_sum(elt, i, hist_val);
837 static inline void add_to_key(char *compound_key, void *key,
838 struct hist_field *key_field, void *rec)
840 size_t size = key_field->size;
842 if (key_field->flags & HIST_FIELD_FL_STRING) {
843 struct ftrace_event_field *field;
845 field = key_field->field;
846 if (field->filter_type == FILTER_DYN_STRING)
847 size = *(u32 *)(rec + field->offset) >> 16;
848 else if (field->filter_type == FILTER_PTR_STRING)
850 else if (field->filter_type == FILTER_STATIC_STRING)
853 /* ensure NULL-termination */
854 if (size > key_field->size - 1)
855 size = key_field->size - 1;
858 memcpy(compound_key + key_field->offset, key, size);
861 static void event_hist_trigger(struct event_trigger_data *data, void *rec)
863 struct hist_trigger_data *hist_data = data->private_data;
864 bool use_compound_key = (hist_data->n_keys > 1);
865 unsigned long entries[HIST_STACKTRACE_DEPTH];
866 char compound_key[HIST_KEY_SIZE_MAX];
867 struct stack_trace stacktrace;
868 struct hist_field *key_field;
869 struct tracing_map_elt *elt;
874 memset(compound_key, 0, hist_data->key_size);
876 for_each_hist_key_field(i, hist_data) {
877 key_field = hist_data->fields[i];
879 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
880 stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
881 stacktrace.entries = entries;
882 stacktrace.nr_entries = 0;
883 stacktrace.skip = HIST_STACKTRACE_SKIP;
885 memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
886 save_stack_trace(&stacktrace);
890 field_contents = key_field->fn(key_field, rec);
891 if (key_field->flags & HIST_FIELD_FL_STRING) {
892 key = (void *)(unsigned long)field_contents;
893 use_compound_key = true;
895 key = (void *)&field_contents;
898 if (use_compound_key)
899 add_to_key(compound_key, key, key_field, rec);
902 if (use_compound_key)
905 elt = tracing_map_insert(hist_data->map, key);
907 hist_trigger_elt_update(hist_data, elt, rec);
910 static void hist_trigger_stacktrace_print(struct seq_file *m,
911 unsigned long *stacktrace_entries,
912 unsigned int max_entries)
914 char str[KSYM_SYMBOL_LEN];
915 unsigned int spaces = 8;
918 for (i = 0; i < max_entries; i++) {
919 if (stacktrace_entries[i] == ULONG_MAX)
922 seq_printf(m, "%*c", 1 + spaces, ' ');
923 sprint_symbol(str, stacktrace_entries[i]);
924 seq_printf(m, "%s\n", str);
929 hist_trigger_entry_print(struct seq_file *m,
930 struct hist_trigger_data *hist_data, void *key,
931 struct tracing_map_elt *elt)
933 struct hist_field *key_field;
934 char str[KSYM_SYMBOL_LEN];
935 bool multiline = false;
941 for_each_hist_key_field(i, hist_data) {
942 key_field = hist_data->fields[i];
944 if (i > hist_data->n_vals)
947 if (key_field->flags & HIST_FIELD_FL_HEX) {
948 uval = *(u64 *)(key + key_field->offset);
949 seq_printf(m, "%s: %llx",
950 key_field->field->name, uval);
951 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
952 uval = *(u64 *)(key + key_field->offset);
953 sprint_symbol_no_offset(str, uval);
954 seq_printf(m, "%s: [%llx] %-45s",
955 key_field->field->name, uval, str);
956 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
957 uval = *(u64 *)(key + key_field->offset);
958 sprint_symbol(str, uval);
959 seq_printf(m, "%s: [%llx] %-55s",
960 key_field->field->name, uval, str);
961 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
962 char *comm = elt->private_data;
964 uval = *(u64 *)(key + key_field->offset);
965 seq_printf(m, "%s: %-16s[%10llu]",
966 key_field->field->name, comm, uval);
967 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
968 const char *syscall_name;
970 uval = *(u64 *)(key + key_field->offset);
971 syscall_name = get_syscall_name(uval);
973 syscall_name = "unknown_syscall";
975 seq_printf(m, "%s: %-30s[%3llu]",
976 key_field->field->name, syscall_name, uval);
977 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
978 seq_puts(m, "stacktrace:\n");
979 hist_trigger_stacktrace_print(m,
980 key + key_field->offset,
981 HIST_STACKTRACE_DEPTH);
983 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
984 seq_printf(m, "%s: %-50s", key_field->field->name,
985 (char *)(key + key_field->offset));
987 uval = *(u64 *)(key + key_field->offset);
988 seq_printf(m, "%s: %10llu", key_field->field->name,
998 seq_printf(m, " hitcount: %10llu",
999 tracing_map_read_sum(elt, HITCOUNT_IDX));
1001 for (i = 1; i < hist_data->n_vals; i++) {
1002 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
1003 seq_printf(m, " %s: %10llx",
1004 hist_data->fields[i]->field->name,
1005 tracing_map_read_sum(elt, i));
1007 seq_printf(m, " %s: %10llu",
1008 hist_data->fields[i]->field->name,
1009 tracing_map_read_sum(elt, i));
1016 static int print_entries(struct seq_file *m,
1017 struct hist_trigger_data *hist_data)
1019 struct tracing_map_sort_entry **sort_entries = NULL;
1020 struct tracing_map *map = hist_data->map;
1021 unsigned int i, n_entries;
1023 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
1024 hist_data->n_sort_keys,
1029 for (i = 0; i < n_entries; i++)
1030 hist_trigger_entry_print(m, hist_data,
1031 sort_entries[i]->key,
1032 sort_entries[i]->elt);
1034 tracing_map_destroy_sort_entries(sort_entries, n_entries);
1039 static void hist_trigger_show(struct seq_file *m,
1040 struct event_trigger_data *data, int n)
1042 struct hist_trigger_data *hist_data;
1043 int n_entries, ret = 0;
1046 seq_puts(m, "\n\n");
1048 seq_puts(m, "# event histogram\n#\n# trigger info: ");
1049 data->ops->print(m, data->ops, data);
1050 seq_puts(m, "#\n\n");
1052 hist_data = data->private_data;
1053 n_entries = print_entries(m, hist_data);
1054 if (n_entries < 0) {
1059 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
1060 (u64)atomic64_read(&hist_data->map->hits),
1061 n_entries, (u64)atomic64_read(&hist_data->map->drops));
1064 static int hist_show(struct seq_file *m, void *v)
1066 struct event_trigger_data *data;
1067 struct trace_event_file *event_file;
1070 mutex_lock(&event_mutex);
1072 event_file = event_file_data(m->private);
1073 if (unlikely(!event_file)) {
1078 list_for_each_entry_rcu(data, &event_file->triggers, list) {
1079 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
1080 hist_trigger_show(m, data, n++);
1084 mutex_unlock(&event_mutex);
1089 static int event_hist_open(struct inode *inode, struct file *file)
1091 return single_open(file, hist_show, file);
1094 const struct file_operations event_hist_fops = {
1095 .open = event_hist_open,
1097 .llseek = seq_lseek,
1098 .release = single_release,
1101 static const char *get_hist_field_flags(struct hist_field *hist_field)
1103 const char *flags_str = NULL;
1105 if (hist_field->flags & HIST_FIELD_FL_HEX)
1107 else if (hist_field->flags & HIST_FIELD_FL_SYM)
1109 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
1110 flags_str = "sym-offset";
1111 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
1112 flags_str = "execname";
1113 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
1114 flags_str = "syscall";
1119 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
1121 seq_printf(m, "%s", hist_field->field->name);
1122 if (hist_field->flags) {
1123 const char *flags_str = get_hist_field_flags(hist_field);
1126 seq_printf(m, ".%s", flags_str);
1130 static int event_hist_trigger_print(struct seq_file *m,
1131 struct event_trigger_ops *ops,
1132 struct event_trigger_data *data)
1134 struct hist_trigger_data *hist_data = data->private_data;
1135 struct hist_field *key_field;
1138 seq_puts(m, "hist:");
1141 seq_printf(m, "%s:", data->name);
1143 seq_puts(m, "keys=");
1145 for_each_hist_key_field(i, hist_data) {
1146 key_field = hist_data->fields[i];
1148 if (i > hist_data->n_vals)
1151 if (key_field->flags & HIST_FIELD_FL_STACKTRACE)
1152 seq_puts(m, "stacktrace");
1154 hist_field_print(m, key_field);
1157 seq_puts(m, ":vals=");
1159 for_each_hist_val_field(i, hist_data) {
1160 if (i == HITCOUNT_IDX)
1161 seq_puts(m, "hitcount");
1164 hist_field_print(m, hist_data->fields[i]);
1168 seq_puts(m, ":sort=");
1170 for (i = 0; i < hist_data->n_sort_keys; i++) {
1171 struct tracing_map_sort_key *sort_key;
1173 sort_key = &hist_data->sort_keys[i];
1178 if (sort_key->field_idx == HITCOUNT_IDX)
1179 seq_puts(m, "hitcount");
1181 unsigned int idx = sort_key->field_idx;
1183 if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
1186 hist_field_print(m, hist_data->fields[idx]);
1189 if (sort_key->descending)
1190 seq_puts(m, ".descending");
1193 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
1195 if (data->filter_str)
1196 seq_printf(m, " if %s", data->filter_str);
1199 seq_puts(m, " [paused]");
1201 seq_puts(m, " [active]");
1208 static int event_hist_trigger_init(struct event_trigger_ops *ops,
1209 struct event_trigger_data *data)
1211 struct hist_trigger_data *hist_data = data->private_data;
1213 if (!data->ref && hist_data->attrs->name)
1214 save_named_trigger(hist_data->attrs->name, data);
1221 static void event_hist_trigger_free(struct event_trigger_ops *ops,
1222 struct event_trigger_data *data)
1224 struct hist_trigger_data *hist_data = data->private_data;
1226 if (WARN_ON_ONCE(data->ref <= 0))
1232 del_named_trigger(data);
1233 trigger_data_free(data);
1234 destroy_hist_data(hist_data);
1238 static struct event_trigger_ops event_hist_trigger_ops = {
1239 .func = event_hist_trigger,
1240 .print = event_hist_trigger_print,
1241 .init = event_hist_trigger_init,
1242 .free = event_hist_trigger_free,
1245 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
1246 struct event_trigger_data *data)
1250 save_named_trigger(data->named_data->name, data);
1252 event_hist_trigger_init(ops, data->named_data);
1257 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
1258 struct event_trigger_data *data)
1260 if (WARN_ON_ONCE(data->ref <= 0))
1263 event_hist_trigger_free(ops, data->named_data);
1267 del_named_trigger(data);
1268 trigger_data_free(data);
1272 static struct event_trigger_ops event_hist_trigger_named_ops = {
1273 .func = event_hist_trigger,
1274 .print = event_hist_trigger_print,
1275 .init = event_hist_trigger_named_init,
1276 .free = event_hist_trigger_named_free,
1279 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
1282 return &event_hist_trigger_ops;
1285 static void hist_clear(struct event_trigger_data *data)
1287 struct hist_trigger_data *hist_data = data->private_data;
1290 pause_named_trigger(data);
1292 synchronize_sched();
1294 tracing_map_clear(hist_data->map);
1297 unpause_named_trigger(data);
1300 static bool compatible_field(struct ftrace_event_field *field,
1301 struct ftrace_event_field *test_field)
1303 if (field == test_field)
1305 if (field == NULL || test_field == NULL)
1307 if (strcmp(field->name, test_field->name) != 0)
1309 if (strcmp(field->type, test_field->type) != 0)
1311 if (field->size != test_field->size)
1313 if (field->is_signed != test_field->is_signed)
1319 static bool hist_trigger_match(struct event_trigger_data *data,
1320 struct event_trigger_data *data_test,
1321 struct event_trigger_data *named_data,
1324 struct tracing_map_sort_key *sort_key, *sort_key_test;
1325 struct hist_trigger_data *hist_data, *hist_data_test;
1326 struct hist_field *key_field, *key_field_test;
1329 if (named_data && (named_data != data_test) &&
1330 (named_data != data_test->named_data))
1333 if (!named_data && is_named_trigger(data_test))
1336 hist_data = data->private_data;
1337 hist_data_test = data_test->private_data;
1339 if (hist_data->n_vals != hist_data_test->n_vals ||
1340 hist_data->n_fields != hist_data_test->n_fields ||
1341 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
1344 if (!ignore_filter) {
1345 if ((data->filter_str && !data_test->filter_str) ||
1346 (!data->filter_str && data_test->filter_str))
1350 for_each_hist_field(i, hist_data) {
1351 key_field = hist_data->fields[i];
1352 key_field_test = hist_data_test->fields[i];
1354 if (key_field->flags != key_field_test->flags)
1356 if (!compatible_field(key_field->field, key_field_test->field))
1358 if (key_field->offset != key_field_test->offset)
1362 for (i = 0; i < hist_data->n_sort_keys; i++) {
1363 sort_key = &hist_data->sort_keys[i];
1364 sort_key_test = &hist_data_test->sort_keys[i];
1366 if (sort_key->field_idx != sort_key_test->field_idx ||
1367 sort_key->descending != sort_key_test->descending)
1371 if (!ignore_filter && data->filter_str &&
1372 (strcmp(data->filter_str, data_test->filter_str) != 0))
1378 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
1379 struct event_trigger_data *data,
1380 struct trace_event_file *file)
1382 struct hist_trigger_data *hist_data = data->private_data;
1383 struct event_trigger_data *test, *named_data = NULL;
1386 if (hist_data->attrs->name) {
1387 named_data = find_named_trigger(hist_data->attrs->name);
1389 if (!hist_trigger_match(data, named_data, named_data,
1397 if (hist_data->attrs->name && !named_data)
1400 list_for_each_entry_rcu(test, &file->triggers, list) {
1401 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1402 if (!hist_trigger_match(data, test, named_data, false))
1404 if (hist_data->attrs->pause)
1405 test->paused = true;
1406 else if (hist_data->attrs->cont)
1407 test->paused = false;
1408 else if (hist_data->attrs->clear)
1416 if (hist_data->attrs->cont || hist_data->attrs->clear) {
1422 destroy_hist_data(data->private_data);
1423 data->private_data = named_data->private_data;
1424 set_named_trigger_data(data, named_data);
1425 data->ops = &event_hist_trigger_named_ops;
1428 if (hist_data->attrs->pause)
1429 data->paused = true;
1431 if (data->ops->init) {
1432 ret = data->ops->init(data->ops, data);
1437 list_add_rcu(&data->list, &file->triggers);
1440 update_cond_flag(file);
1442 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1443 list_del_rcu(&data->list);
1444 update_cond_flag(file);
1451 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
1452 struct event_trigger_data *data,
1453 struct trace_event_file *file)
1455 struct hist_trigger_data *hist_data = data->private_data;
1456 struct event_trigger_data *test, *named_data = NULL;
1457 bool unregistered = false;
1459 if (hist_data->attrs->name)
1460 named_data = find_named_trigger(hist_data->attrs->name);
1462 list_for_each_entry_rcu(test, &file->triggers, list) {
1463 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1464 if (!hist_trigger_match(data, test, named_data, false))
1466 unregistered = true;
1467 list_del_rcu(&test->list);
1468 trace_event_trigger_enable_disable(file, 0);
1469 update_cond_flag(file);
1474 if (unregistered && test->ops->free)
1475 test->ops->free(test->ops, test);
1478 static void hist_unreg_all(struct trace_event_file *file)
1480 struct event_trigger_data *test;
1482 list_for_each_entry_rcu(test, &file->triggers, list) {
1483 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1484 list_del_rcu(&test->list);
1485 trace_event_trigger_enable_disable(file, 0);
1486 update_cond_flag(file);
1487 if (test->ops->free)
1488 test->ops->free(test->ops, test);
1493 static int event_hist_trigger_func(struct event_command *cmd_ops,
1494 struct trace_event_file *file,
1495 char *glob, char *cmd, char *param)
1497 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
1498 struct event_trigger_data *trigger_data;
1499 struct hist_trigger_attrs *attrs;
1500 struct event_trigger_ops *trigger_ops;
1501 struct hist_trigger_data *hist_data;
1508 /* separate the trigger from the filter (k:v [if filter]) */
1509 trigger = strsep(¶m, " \t");
1513 attrs = parse_hist_trigger_attrs(trigger);
1515 return PTR_ERR(attrs);
1517 if (attrs->map_bits)
1518 hist_trigger_bits = attrs->map_bits;
1520 hist_data = create_hist_data(hist_trigger_bits, attrs, file);
1521 if (IS_ERR(hist_data)) {
1522 destroy_hist_trigger_attrs(attrs);
1523 return PTR_ERR(hist_data);
1526 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1529 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1533 trigger_data->count = -1;
1534 trigger_data->ops = trigger_ops;
1535 trigger_data->cmd_ops = cmd_ops;
1537 INIT_LIST_HEAD(&trigger_data->list);
1538 RCU_INIT_POINTER(trigger_data->filter, NULL);
1540 trigger_data->private_data = hist_data;
1542 /* if param is non-empty, it's supposed to be a filter */
1543 if (param && cmd_ops->set_filter) {
1544 ret = cmd_ops->set_filter(param, trigger_data, file);
1549 if (glob[0] == '!') {
1550 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1555 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1557 * The above returns on success the # of triggers registered,
1558 * but if it didn't register any it returns zero. Consider no
1559 * triggers registered a failure too.
1562 if (!(attrs->pause || attrs->cont || attrs->clear))
1567 /* Just return zero, not the number of registered triggers */
1572 if (cmd_ops->set_filter)
1573 cmd_ops->set_filter(NULL, trigger_data, NULL);
1575 kfree(trigger_data);
1577 destroy_hist_data(hist_data);
1581 static struct event_command trigger_hist_cmd = {
1583 .trigger_type = ETT_EVENT_HIST,
1584 .flags = EVENT_CMD_FL_NEEDS_REC,
1585 .func = event_hist_trigger_func,
1586 .reg = hist_register_trigger,
1587 .unreg = hist_unregister_trigger,
1588 .unreg_all = hist_unreg_all,
1589 .get_trigger_ops = event_hist_get_trigger_ops,
1590 .set_filter = set_trigger_filter,
1593 __init int register_trigger_hist_cmd(void)
1597 ret = register_event_command(&trigger_hist_cmd);
1604 hist_enable_trigger(struct event_trigger_data *data, void *rec)
1606 struct enable_trigger_data *enable_data = data->private_data;
1607 struct event_trigger_data *test;
1609 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
1610 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1611 if (enable_data->enable)
1612 test->paused = false;
1614 test->paused = true;
1620 hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
1625 if (data->count != -1)
1628 hist_enable_trigger(data, rec);
1631 static struct event_trigger_ops hist_enable_trigger_ops = {
1632 .func = hist_enable_trigger,
1633 .print = event_enable_trigger_print,
1634 .init = event_trigger_init,
1635 .free = event_enable_trigger_free,
1638 static struct event_trigger_ops hist_enable_count_trigger_ops = {
1639 .func = hist_enable_count_trigger,
1640 .print = event_enable_trigger_print,
1641 .init = event_trigger_init,
1642 .free = event_enable_trigger_free,
1645 static struct event_trigger_ops hist_disable_trigger_ops = {
1646 .func = hist_enable_trigger,
1647 .print = event_enable_trigger_print,
1648 .init = event_trigger_init,
1649 .free = event_enable_trigger_free,
1652 static struct event_trigger_ops hist_disable_count_trigger_ops = {
1653 .func = hist_enable_count_trigger,
1654 .print = event_enable_trigger_print,
1655 .init = event_trigger_init,
1656 .free = event_enable_trigger_free,
1659 static struct event_trigger_ops *
1660 hist_enable_get_trigger_ops(char *cmd, char *param)
1662 struct event_trigger_ops *ops;
1665 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
1668 ops = param ? &hist_enable_count_trigger_ops :
1669 &hist_enable_trigger_ops;
1671 ops = param ? &hist_disable_count_trigger_ops :
1672 &hist_disable_trigger_ops;
1677 static void hist_enable_unreg_all(struct trace_event_file *file)
1679 struct event_trigger_data *test;
1681 list_for_each_entry_rcu(test, &file->triggers, list) {
1682 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
1683 list_del_rcu(&test->list);
1684 update_cond_flag(file);
1685 trace_event_trigger_enable_disable(file, 0);
1686 if (test->ops->free)
1687 test->ops->free(test->ops, test);
1692 static struct event_command trigger_hist_enable_cmd = {
1693 .name = ENABLE_HIST_STR,
1694 .trigger_type = ETT_HIST_ENABLE,
1695 .func = event_enable_trigger_func,
1696 .reg = event_enable_register_trigger,
1697 .unreg = event_enable_unregister_trigger,
1698 .unreg_all = hist_enable_unreg_all,
1699 .get_trigger_ops = hist_enable_get_trigger_ops,
1700 .set_filter = set_trigger_filter,
1703 static struct event_command trigger_hist_disable_cmd = {
1704 .name = DISABLE_HIST_STR,
1705 .trigger_type = ETT_HIST_ENABLE,
1706 .func = event_enable_trigger_func,
1707 .reg = event_enable_register_trigger,
1708 .unreg = event_enable_unregister_trigger,
1709 .unreg_all = hist_enable_unreg_all,
1710 .get_trigger_ops = hist_enable_get_trigger_ops,
1711 .set_filter = set_trigger_filter,
1714 static __init void unregister_trigger_hist_enable_disable_cmds(void)
1716 unregister_event_command(&trigger_hist_enable_cmd);
1717 unregister_event_command(&trigger_hist_disable_cmd);
1720 __init int register_trigger_hist_enable_disable_cmds(void)
1724 ret = register_event_command(&trigger_hist_enable_cmd);
1725 if (WARN_ON(ret < 0))
1727 ret = register_event_command(&trigger_hist_disable_cmd);
1728 if (WARN_ON(ret < 0))
1729 unregister_trigger_hist_enable_disable_cmds();