2 * trace_events_hist - trace event hist triggers
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
14 * Copyright (C) 2015 Tom Zanussi <tom.zanussi@linux.intel.com>
17 #include <linux/module.h>
18 #include <linux/kallsyms.h>
19 #include <linux/mutex.h>
20 #include <linux/slab.h>
21 #include <linux/stacktrace.h>
23 #include "tracing_map.h"
28 typedef u64 (*hist_field_fn_t) (struct hist_field *field, void *event);
31 struct ftrace_event_field *field;
38 static u64 hist_field_none(struct hist_field *field, void *event)
43 static u64 hist_field_counter(struct hist_field *field, void *event)
48 static u64 hist_field_string(struct hist_field *hist_field, void *event)
50 char *addr = (char *)(event + hist_field->field->offset);
52 return (u64)(unsigned long)addr;
55 static u64 hist_field_dynstring(struct hist_field *hist_field, void *event)
57 u32 str_item = *(u32 *)(event + hist_field->field->offset);
58 int str_loc = str_item & 0xffff;
59 char *addr = (char *)(event + str_loc);
61 return (u64)(unsigned long)addr;
64 static u64 hist_field_pstring(struct hist_field *hist_field, void *event)
66 char **addr = (char **)(event + hist_field->field->offset);
68 return (u64)(unsigned long)*addr;
71 static u64 hist_field_log2(struct hist_field *hist_field, void *event)
73 u64 val = *(u64 *)(event + hist_field->field->offset);
75 return (u64) ilog2(roundup_pow_of_two(val));
78 #define DEFINE_HIST_FIELD_FN(type) \
79 static u64 hist_field_##type(struct hist_field *hist_field, void *event)\
81 type *addr = (type *)(event + hist_field->field->offset); \
83 return (u64)(unsigned long)*addr; \
86 DEFINE_HIST_FIELD_FN(s64);
87 DEFINE_HIST_FIELD_FN(u64);
88 DEFINE_HIST_FIELD_FN(s32);
89 DEFINE_HIST_FIELD_FN(u32);
90 DEFINE_HIST_FIELD_FN(s16);
91 DEFINE_HIST_FIELD_FN(u16);
92 DEFINE_HIST_FIELD_FN(s8);
93 DEFINE_HIST_FIELD_FN(u8);
95 #define for_each_hist_field(i, hist_data) \
96 for ((i) = 0; (i) < (hist_data)->n_fields; (i)++)
98 #define for_each_hist_val_field(i, hist_data) \
99 for ((i) = 0; (i) < (hist_data)->n_vals; (i)++)
101 #define for_each_hist_key_field(i, hist_data) \
102 for ((i) = (hist_data)->n_vals; (i) < (hist_data)->n_fields; (i)++)
104 #define HIST_STACKTRACE_DEPTH 16
105 #define HIST_STACKTRACE_SIZE (HIST_STACKTRACE_DEPTH * sizeof(unsigned long))
106 #define HIST_STACKTRACE_SKIP 5
108 #define HITCOUNT_IDX 0
109 #define HIST_KEY_SIZE_MAX (MAX_FILTER_STR_VAL + HIST_STACKTRACE_SIZE)
111 enum hist_field_flags {
112 HIST_FIELD_FL_HITCOUNT = 1,
113 HIST_FIELD_FL_KEY = 2,
114 HIST_FIELD_FL_STRING = 4,
115 HIST_FIELD_FL_HEX = 8,
116 HIST_FIELD_FL_SYM = 16,
117 HIST_FIELD_FL_SYM_OFFSET = 32,
118 HIST_FIELD_FL_EXECNAME = 64,
119 HIST_FIELD_FL_SYSCALL = 128,
120 HIST_FIELD_FL_STACKTRACE = 256,
121 HIST_FIELD_FL_LOG2 = 512,
124 struct hist_trigger_attrs {
132 unsigned int map_bits;
135 struct hist_trigger_data {
136 struct hist_field *fields[TRACING_MAP_FIELDS_MAX];
139 unsigned int n_fields;
140 unsigned int key_size;
141 struct tracing_map_sort_key sort_keys[TRACING_MAP_SORT_KEYS_MAX];
142 unsigned int n_sort_keys;
143 struct trace_event_file *event_file;
144 struct hist_trigger_attrs *attrs;
145 struct tracing_map *map;
148 static hist_field_fn_t select_value_fn(int field_size, int field_is_signed)
150 hist_field_fn_t fn = NULL;
152 switch (field_size) {
182 static int parse_map_size(char *str)
184 unsigned long size, map_bits;
193 ret = kstrtoul(str, 0, &size);
197 map_bits = ilog2(roundup_pow_of_two(size));
198 if (map_bits < TRACING_MAP_BITS_MIN ||
199 map_bits > TRACING_MAP_BITS_MAX)
207 static void destroy_hist_trigger_attrs(struct hist_trigger_attrs *attrs)
213 kfree(attrs->sort_key_str);
214 kfree(attrs->keys_str);
215 kfree(attrs->vals_str);
219 static struct hist_trigger_attrs *parse_hist_trigger_attrs(char *trigger_str)
221 struct hist_trigger_attrs *attrs;
224 attrs = kzalloc(sizeof(*attrs), GFP_KERNEL);
226 return ERR_PTR(-ENOMEM);
228 while (trigger_str) {
229 char *str = strsep(&trigger_str, ":");
231 if ((strncmp(str, "key=", strlen("key=")) == 0) ||
232 (strncmp(str, "keys=", strlen("keys=")) == 0))
233 attrs->keys_str = kstrdup(str, GFP_KERNEL);
234 else if ((strncmp(str, "val=", strlen("val=")) == 0) ||
235 (strncmp(str, "vals=", strlen("vals=")) == 0) ||
236 (strncmp(str, "values=", strlen("values=")) == 0))
237 attrs->vals_str = kstrdup(str, GFP_KERNEL);
238 else if (strncmp(str, "sort=", strlen("sort=")) == 0)
239 attrs->sort_key_str = kstrdup(str, GFP_KERNEL);
240 else if (strncmp(str, "name=", strlen("name=")) == 0)
241 attrs->name = kstrdup(str, GFP_KERNEL);
242 else if (strcmp(str, "pause") == 0)
244 else if ((strcmp(str, "cont") == 0) ||
245 (strcmp(str, "continue") == 0))
247 else if (strcmp(str, "clear") == 0)
249 else if (strncmp(str, "size=", strlen("size=")) == 0) {
250 int map_bits = parse_map_size(str);
256 attrs->map_bits = map_bits;
263 if (!attrs->keys_str) {
270 destroy_hist_trigger_attrs(attrs);
275 static inline void save_comm(char *comm, struct task_struct *task)
278 strcpy(comm, "<idle>");
282 if (WARN_ON_ONCE(task->pid < 0)) {
283 strcpy(comm, "<XXX>");
287 memcpy(comm, task->comm, TASK_COMM_LEN);
290 static void hist_trigger_elt_comm_free(struct tracing_map_elt *elt)
292 kfree((char *)elt->private_data);
295 static int hist_trigger_elt_comm_alloc(struct tracing_map_elt *elt)
297 struct hist_trigger_data *hist_data = elt->map->private_data;
298 struct hist_field *key_field;
301 for_each_hist_key_field(i, hist_data) {
302 key_field = hist_data->fields[i];
304 if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
305 unsigned int size = TASK_COMM_LEN + 1;
307 elt->private_data = kzalloc(size, GFP_KERNEL);
308 if (!elt->private_data)
317 static void hist_trigger_elt_comm_copy(struct tracing_map_elt *to,
318 struct tracing_map_elt *from)
320 char *comm_from = from->private_data;
321 char *comm_to = to->private_data;
324 memcpy(comm_to, comm_from, TASK_COMM_LEN + 1);
327 static void hist_trigger_elt_comm_init(struct tracing_map_elt *elt)
329 char *comm = elt->private_data;
332 save_comm(comm, current);
335 static const struct tracing_map_ops hist_trigger_elt_comm_ops = {
336 .elt_alloc = hist_trigger_elt_comm_alloc,
337 .elt_copy = hist_trigger_elt_comm_copy,
338 .elt_free = hist_trigger_elt_comm_free,
339 .elt_init = hist_trigger_elt_comm_init,
342 static void destroy_hist_field(struct hist_field *hist_field)
347 static struct hist_field *create_hist_field(struct ftrace_event_field *field,
350 struct hist_field *hist_field;
352 if (field && is_function_field(field))
355 hist_field = kzalloc(sizeof(struct hist_field), GFP_KERNEL);
359 if (flags & HIST_FIELD_FL_HITCOUNT) {
360 hist_field->fn = hist_field_counter;
364 if (flags & HIST_FIELD_FL_STACKTRACE) {
365 hist_field->fn = hist_field_none;
369 if (flags & HIST_FIELD_FL_LOG2) {
370 hist_field->fn = hist_field_log2;
374 if (is_string_field(field)) {
375 flags |= HIST_FIELD_FL_STRING;
377 if (field->filter_type == FILTER_STATIC_STRING)
378 hist_field->fn = hist_field_string;
379 else if (field->filter_type == FILTER_DYN_STRING)
380 hist_field->fn = hist_field_dynstring;
382 hist_field->fn = hist_field_pstring;
384 hist_field->fn = select_value_fn(field->size,
386 if (!hist_field->fn) {
387 destroy_hist_field(hist_field);
392 hist_field->field = field;
393 hist_field->flags = flags;
398 static void destroy_hist_fields(struct hist_trigger_data *hist_data)
402 for (i = 0; i < TRACING_MAP_FIELDS_MAX; i++) {
403 if (hist_data->fields[i]) {
404 destroy_hist_field(hist_data->fields[i]);
405 hist_data->fields[i] = NULL;
410 static int create_hitcount_val(struct hist_trigger_data *hist_data)
412 hist_data->fields[HITCOUNT_IDX] =
413 create_hist_field(NULL, HIST_FIELD_FL_HITCOUNT);
414 if (!hist_data->fields[HITCOUNT_IDX])
419 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
425 static int create_val_field(struct hist_trigger_data *hist_data,
426 unsigned int val_idx,
427 struct trace_event_file *file,
430 struct ftrace_event_field *field = NULL;
431 unsigned long flags = 0;
435 if (WARN_ON(val_idx >= TRACING_MAP_VALS_MAX))
438 field_name = strsep(&field_str, ".");
440 if (strcmp(field_str, "hex") == 0)
441 flags |= HIST_FIELD_FL_HEX;
448 field = trace_find_event_field(file->event_call, field_name);
454 hist_data->fields[val_idx] = create_hist_field(field, flags);
455 if (!hist_data->fields[val_idx]) {
462 if (WARN_ON(hist_data->n_vals > TRACING_MAP_VALS_MAX))
468 static int create_val_fields(struct hist_trigger_data *hist_data,
469 struct trace_event_file *file)
471 char *fields_str, *field_str;
475 ret = create_hitcount_val(hist_data);
479 fields_str = hist_data->attrs->vals_str;
483 strsep(&fields_str, "=");
487 for (i = 0, j = 1; i < TRACING_MAP_VALS_MAX &&
488 j < TRACING_MAP_VALS_MAX; i++) {
489 field_str = strsep(&fields_str, ",");
492 if (strcmp(field_str, "hitcount") == 0)
494 ret = create_val_field(hist_data, j++, file, field_str);
498 if (fields_str && (strcmp(fields_str, "hitcount") != 0))
504 static int create_key_field(struct hist_trigger_data *hist_data,
505 unsigned int key_idx,
506 unsigned int key_offset,
507 struct trace_event_file *file,
510 struct ftrace_event_field *field = NULL;
511 unsigned long flags = 0;
512 unsigned int key_size;
515 if (WARN_ON(key_idx >= TRACING_MAP_FIELDS_MAX))
518 flags |= HIST_FIELD_FL_KEY;
520 if (strcmp(field_str, "stacktrace") == 0) {
521 flags |= HIST_FIELD_FL_STACKTRACE;
522 key_size = sizeof(unsigned long) * HIST_STACKTRACE_DEPTH;
524 char *field_name = strsep(&field_str, ".");
527 if (strcmp(field_str, "hex") == 0)
528 flags |= HIST_FIELD_FL_HEX;
529 else if (strcmp(field_str, "sym") == 0)
530 flags |= HIST_FIELD_FL_SYM;
531 else if (strcmp(field_str, "sym-offset") == 0)
532 flags |= HIST_FIELD_FL_SYM_OFFSET;
533 else if ((strcmp(field_str, "execname") == 0) &&
534 (strcmp(field_name, "common_pid") == 0))
535 flags |= HIST_FIELD_FL_EXECNAME;
536 else if (strcmp(field_str, "syscall") == 0)
537 flags |= HIST_FIELD_FL_SYSCALL;
538 else if (strcmp(field_str, "log2") == 0)
539 flags |= HIST_FIELD_FL_LOG2;
546 field = trace_find_event_field(file->event_call, field_name);
552 if (is_string_field(field))
553 key_size = MAX_FILTER_STR_VAL;
555 key_size = field->size;
558 hist_data->fields[key_idx] = create_hist_field(field, flags);
559 if (!hist_data->fields[key_idx]) {
564 key_size = ALIGN(key_size, sizeof(u64));
565 hist_data->fields[key_idx]->size = key_size;
566 hist_data->fields[key_idx]->offset = key_offset;
567 hist_data->key_size += key_size;
568 if (hist_data->key_size > HIST_KEY_SIZE_MAX) {
575 if (WARN_ON(hist_data->n_keys > TRACING_MAP_KEYS_MAX))
583 static int create_key_fields(struct hist_trigger_data *hist_data,
584 struct trace_event_file *file)
586 unsigned int i, key_offset = 0, n_vals = hist_data->n_vals;
587 char *fields_str, *field_str;
590 fields_str = hist_data->attrs->keys_str;
594 strsep(&fields_str, "=");
598 for (i = n_vals; i < n_vals + TRACING_MAP_KEYS_MAX; i++) {
599 field_str = strsep(&fields_str, ",");
602 ret = create_key_field(hist_data, i, key_offset,
617 static int create_hist_fields(struct hist_trigger_data *hist_data,
618 struct trace_event_file *file)
622 ret = create_val_fields(hist_data, file);
626 ret = create_key_fields(hist_data, file);
630 hist_data->n_fields = hist_data->n_vals + hist_data->n_keys;
635 static int is_descending(const char *str)
640 if (strcmp(str, "descending") == 0)
643 if (strcmp(str, "ascending") == 0)
649 static int create_sort_keys(struct hist_trigger_data *hist_data)
651 char *fields_str = hist_data->attrs->sort_key_str;
652 struct ftrace_event_field *field = NULL;
653 struct tracing_map_sort_key *sort_key;
654 int descending, ret = 0;
657 hist_data->n_sort_keys = 1; /* we always have at least one, hitcount */
662 strsep(&fields_str, "=");
668 for (i = 0; i < TRACING_MAP_SORT_KEYS_MAX; i++) {
669 char *field_str, *field_name;
671 sort_key = &hist_data->sort_keys[i];
673 field_str = strsep(&fields_str, ",");
680 if ((i == TRACING_MAP_SORT_KEYS_MAX - 1) && fields_str) {
685 field_name = strsep(&field_str, ".");
691 if (strcmp(field_name, "hitcount") == 0) {
692 descending = is_descending(field_str);
693 if (descending < 0) {
697 sort_key->descending = descending;
701 for (j = 1; j < hist_data->n_fields; j++) {
702 field = hist_data->fields[j]->field;
703 if (field && (strcmp(field_name, field->name) == 0)) {
704 sort_key->field_idx = j;
705 descending = is_descending(field_str);
706 if (descending < 0) {
710 sort_key->descending = descending;
714 if (j == hist_data->n_fields) {
719 hist_data->n_sort_keys = i;
724 static void destroy_hist_data(struct hist_trigger_data *hist_data)
726 destroy_hist_trigger_attrs(hist_data->attrs);
727 destroy_hist_fields(hist_data);
728 tracing_map_destroy(hist_data->map);
732 static int create_tracing_map_fields(struct hist_trigger_data *hist_data)
734 struct tracing_map *map = hist_data->map;
735 struct ftrace_event_field *field;
736 struct hist_field *hist_field;
739 for_each_hist_field(i, hist_data) {
740 hist_field = hist_data->fields[i];
741 if (hist_field->flags & HIST_FIELD_FL_KEY) {
742 tracing_map_cmp_fn_t cmp_fn;
744 field = hist_field->field;
746 if (hist_field->flags & HIST_FIELD_FL_STACKTRACE)
747 cmp_fn = tracing_map_cmp_none;
748 else if (is_string_field(field))
749 cmp_fn = tracing_map_cmp_string;
751 cmp_fn = tracing_map_cmp_num(field->size,
753 idx = tracing_map_add_key_field(map,
758 idx = tracing_map_add_sum_field(map);
767 static bool need_tracing_map_ops(struct hist_trigger_data *hist_data)
769 struct hist_field *key_field;
772 for_each_hist_key_field(i, hist_data) {
773 key_field = hist_data->fields[i];
775 if (key_field->flags & HIST_FIELD_FL_EXECNAME)
782 static struct hist_trigger_data *
783 create_hist_data(unsigned int map_bits,
784 struct hist_trigger_attrs *attrs,
785 struct trace_event_file *file)
787 const struct tracing_map_ops *map_ops = NULL;
788 struct hist_trigger_data *hist_data;
791 hist_data = kzalloc(sizeof(*hist_data), GFP_KERNEL);
793 return ERR_PTR(-ENOMEM);
795 hist_data->attrs = attrs;
797 ret = create_hist_fields(hist_data, file);
801 ret = create_sort_keys(hist_data);
805 if (need_tracing_map_ops(hist_data))
806 map_ops = &hist_trigger_elt_comm_ops;
808 hist_data->map = tracing_map_create(map_bits, hist_data->key_size,
810 if (IS_ERR(hist_data->map)) {
811 ret = PTR_ERR(hist_data->map);
812 hist_data->map = NULL;
816 ret = create_tracing_map_fields(hist_data);
820 ret = tracing_map_init(hist_data->map);
824 hist_data->event_file = file;
828 hist_data->attrs = NULL;
830 destroy_hist_data(hist_data);
832 hist_data = ERR_PTR(ret);
837 static void hist_trigger_elt_update(struct hist_trigger_data *hist_data,
838 struct tracing_map_elt *elt,
841 struct hist_field *hist_field;
845 for_each_hist_val_field(i, hist_data) {
846 hist_field = hist_data->fields[i];
847 hist_val = hist_field->fn(hist_field, rec);
848 tracing_map_update_sum(elt, i, hist_val);
852 static inline void add_to_key(char *compound_key, void *key,
853 struct hist_field *key_field, void *rec)
855 size_t size = key_field->size;
857 if (key_field->flags & HIST_FIELD_FL_STRING) {
858 struct ftrace_event_field *field;
860 field = key_field->field;
861 if (field->filter_type == FILTER_DYN_STRING)
862 size = *(u32 *)(rec + field->offset) >> 16;
863 else if (field->filter_type == FILTER_PTR_STRING)
865 else if (field->filter_type == FILTER_STATIC_STRING)
868 /* ensure NULL-termination */
869 if (size > key_field->size - 1)
870 size = key_field->size - 1;
873 memcpy(compound_key + key_field->offset, key, size);
876 static void event_hist_trigger(struct event_trigger_data *data, void *rec)
878 struct hist_trigger_data *hist_data = data->private_data;
879 bool use_compound_key = (hist_data->n_keys > 1);
880 unsigned long entries[HIST_STACKTRACE_DEPTH];
881 char compound_key[HIST_KEY_SIZE_MAX];
882 struct stack_trace stacktrace;
883 struct hist_field *key_field;
884 struct tracing_map_elt *elt;
889 memset(compound_key, 0, hist_data->key_size);
891 for_each_hist_key_field(i, hist_data) {
892 key_field = hist_data->fields[i];
894 if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
895 stacktrace.max_entries = HIST_STACKTRACE_DEPTH;
896 stacktrace.entries = entries;
897 stacktrace.nr_entries = 0;
898 stacktrace.skip = HIST_STACKTRACE_SKIP;
900 memset(stacktrace.entries, 0, HIST_STACKTRACE_SIZE);
901 save_stack_trace(&stacktrace);
905 field_contents = key_field->fn(key_field, rec);
906 if (key_field->flags & HIST_FIELD_FL_STRING) {
907 key = (void *)(unsigned long)field_contents;
908 use_compound_key = true;
910 key = (void *)&field_contents;
913 if (use_compound_key)
914 add_to_key(compound_key, key, key_field, rec);
917 if (use_compound_key)
920 elt = tracing_map_insert(hist_data->map, key);
922 hist_trigger_elt_update(hist_data, elt, rec);
925 static void hist_trigger_stacktrace_print(struct seq_file *m,
926 unsigned long *stacktrace_entries,
927 unsigned int max_entries)
929 char str[KSYM_SYMBOL_LEN];
930 unsigned int spaces = 8;
933 for (i = 0; i < max_entries; i++) {
934 if (stacktrace_entries[i] == ULONG_MAX)
937 seq_printf(m, "%*c", 1 + spaces, ' ');
938 sprint_symbol(str, stacktrace_entries[i]);
939 seq_printf(m, "%s\n", str);
944 hist_trigger_entry_print(struct seq_file *m,
945 struct hist_trigger_data *hist_data, void *key,
946 struct tracing_map_elt *elt)
948 struct hist_field *key_field;
949 char str[KSYM_SYMBOL_LEN];
950 bool multiline = false;
956 for_each_hist_key_field(i, hist_data) {
957 key_field = hist_data->fields[i];
959 if (i > hist_data->n_vals)
962 if (key_field->flags & HIST_FIELD_FL_HEX) {
963 uval = *(u64 *)(key + key_field->offset);
964 seq_printf(m, "%s: %llx",
965 key_field->field->name, uval);
966 } else if (key_field->flags & HIST_FIELD_FL_SYM) {
967 uval = *(u64 *)(key + key_field->offset);
968 sprint_symbol_no_offset(str, uval);
969 seq_printf(m, "%s: [%llx] %-45s",
970 key_field->field->name, uval, str);
971 } else if (key_field->flags & HIST_FIELD_FL_SYM_OFFSET) {
972 uval = *(u64 *)(key + key_field->offset);
973 sprint_symbol(str, uval);
974 seq_printf(m, "%s: [%llx] %-55s",
975 key_field->field->name, uval, str);
976 } else if (key_field->flags & HIST_FIELD_FL_EXECNAME) {
977 char *comm = elt->private_data;
979 uval = *(u64 *)(key + key_field->offset);
980 seq_printf(m, "%s: %-16s[%10llu]",
981 key_field->field->name, comm, uval);
982 } else if (key_field->flags & HIST_FIELD_FL_SYSCALL) {
983 const char *syscall_name;
985 uval = *(u64 *)(key + key_field->offset);
986 syscall_name = get_syscall_name(uval);
988 syscall_name = "unknown_syscall";
990 seq_printf(m, "%s: %-30s[%3llu]",
991 key_field->field->name, syscall_name, uval);
992 } else if (key_field->flags & HIST_FIELD_FL_STACKTRACE) {
993 seq_puts(m, "stacktrace:\n");
994 hist_trigger_stacktrace_print(m,
995 key + key_field->offset,
996 HIST_STACKTRACE_DEPTH);
998 } else if (key_field->flags & HIST_FIELD_FL_LOG2) {
999 seq_printf(m, "%s: ~ 2^%-2llu", key_field->field->name,
1000 *(u64 *)(key + key_field->offset));
1001 } else if (key_field->flags & HIST_FIELD_FL_STRING) {
1002 seq_printf(m, "%s: %-50s", key_field->field->name,
1003 (char *)(key + key_field->offset));
1005 uval = *(u64 *)(key + key_field->offset);
1006 seq_printf(m, "%s: %10llu", key_field->field->name,
1016 seq_printf(m, " hitcount: %10llu",
1017 tracing_map_read_sum(elt, HITCOUNT_IDX));
1019 for (i = 1; i < hist_data->n_vals; i++) {
1020 if (hist_data->fields[i]->flags & HIST_FIELD_FL_HEX) {
1021 seq_printf(m, " %s: %10llx",
1022 hist_data->fields[i]->field->name,
1023 tracing_map_read_sum(elt, i));
1025 seq_printf(m, " %s: %10llu",
1026 hist_data->fields[i]->field->name,
1027 tracing_map_read_sum(elt, i));
1034 static int print_entries(struct seq_file *m,
1035 struct hist_trigger_data *hist_data)
1037 struct tracing_map_sort_entry **sort_entries = NULL;
1038 struct tracing_map *map = hist_data->map;
1039 unsigned int i, n_entries;
1041 n_entries = tracing_map_sort_entries(map, hist_data->sort_keys,
1042 hist_data->n_sort_keys,
1047 for (i = 0; i < n_entries; i++)
1048 hist_trigger_entry_print(m, hist_data,
1049 sort_entries[i]->key,
1050 sort_entries[i]->elt);
1052 tracing_map_destroy_sort_entries(sort_entries, n_entries);
1057 static void hist_trigger_show(struct seq_file *m,
1058 struct event_trigger_data *data, int n)
1060 struct hist_trigger_data *hist_data;
1061 int n_entries, ret = 0;
1064 seq_puts(m, "\n\n");
1066 seq_puts(m, "# event histogram\n#\n# trigger info: ");
1067 data->ops->print(m, data->ops, data);
1068 seq_puts(m, "#\n\n");
1070 hist_data = data->private_data;
1071 n_entries = print_entries(m, hist_data);
1072 if (n_entries < 0) {
1077 seq_printf(m, "\nTotals:\n Hits: %llu\n Entries: %u\n Dropped: %llu\n",
1078 (u64)atomic64_read(&hist_data->map->hits),
1079 n_entries, (u64)atomic64_read(&hist_data->map->drops));
1082 static int hist_show(struct seq_file *m, void *v)
1084 struct event_trigger_data *data;
1085 struct trace_event_file *event_file;
1088 mutex_lock(&event_mutex);
1090 event_file = event_file_data(m->private);
1091 if (unlikely(!event_file)) {
1096 list_for_each_entry_rcu(data, &event_file->triggers, list) {
1097 if (data->cmd_ops->trigger_type == ETT_EVENT_HIST)
1098 hist_trigger_show(m, data, n++);
1102 mutex_unlock(&event_mutex);
1107 static int event_hist_open(struct inode *inode, struct file *file)
1109 return single_open(file, hist_show, file);
1112 const struct file_operations event_hist_fops = {
1113 .open = event_hist_open,
1115 .llseek = seq_lseek,
1116 .release = single_release,
1119 static const char *get_hist_field_flags(struct hist_field *hist_field)
1121 const char *flags_str = NULL;
1123 if (hist_field->flags & HIST_FIELD_FL_HEX)
1125 else if (hist_field->flags & HIST_FIELD_FL_SYM)
1127 else if (hist_field->flags & HIST_FIELD_FL_SYM_OFFSET)
1128 flags_str = "sym-offset";
1129 else if (hist_field->flags & HIST_FIELD_FL_EXECNAME)
1130 flags_str = "execname";
1131 else if (hist_field->flags & HIST_FIELD_FL_SYSCALL)
1132 flags_str = "syscall";
1133 else if (hist_field->flags & HIST_FIELD_FL_LOG2)
1139 static void hist_field_print(struct seq_file *m, struct hist_field *hist_field)
1141 seq_printf(m, "%s", hist_field->field->name);
1142 if (hist_field->flags) {
1143 const char *flags_str = get_hist_field_flags(hist_field);
1146 seq_printf(m, ".%s", flags_str);
1150 static int event_hist_trigger_print(struct seq_file *m,
1151 struct event_trigger_ops *ops,
1152 struct event_trigger_data *data)
1154 struct hist_trigger_data *hist_data = data->private_data;
1155 struct hist_field *key_field;
1158 seq_puts(m, "hist:");
1161 seq_printf(m, "%s:", data->name);
1163 seq_puts(m, "keys=");
1165 for_each_hist_key_field(i, hist_data) {
1166 key_field = hist_data->fields[i];
1168 if (i > hist_data->n_vals)
1171 if (key_field->flags & HIST_FIELD_FL_STACKTRACE)
1172 seq_puts(m, "stacktrace");
1174 hist_field_print(m, key_field);
1177 seq_puts(m, ":vals=");
1179 for_each_hist_val_field(i, hist_data) {
1180 if (i == HITCOUNT_IDX)
1181 seq_puts(m, "hitcount");
1184 hist_field_print(m, hist_data->fields[i]);
1188 seq_puts(m, ":sort=");
1190 for (i = 0; i < hist_data->n_sort_keys; i++) {
1191 struct tracing_map_sort_key *sort_key;
1193 sort_key = &hist_data->sort_keys[i];
1198 if (sort_key->field_idx == HITCOUNT_IDX)
1199 seq_puts(m, "hitcount");
1201 unsigned int idx = sort_key->field_idx;
1203 if (WARN_ON(idx >= TRACING_MAP_FIELDS_MAX))
1206 hist_field_print(m, hist_data->fields[idx]);
1209 if (sort_key->descending)
1210 seq_puts(m, ".descending");
1213 seq_printf(m, ":size=%u", (1 << hist_data->map->map_bits));
1215 if (data->filter_str)
1216 seq_printf(m, " if %s", data->filter_str);
1219 seq_puts(m, " [paused]");
1221 seq_puts(m, " [active]");
1228 static int event_hist_trigger_init(struct event_trigger_ops *ops,
1229 struct event_trigger_data *data)
1231 struct hist_trigger_data *hist_data = data->private_data;
1233 if (!data->ref && hist_data->attrs->name)
1234 save_named_trigger(hist_data->attrs->name, data);
1241 static void event_hist_trigger_free(struct event_trigger_ops *ops,
1242 struct event_trigger_data *data)
1244 struct hist_trigger_data *hist_data = data->private_data;
1246 if (WARN_ON_ONCE(data->ref <= 0))
1252 del_named_trigger(data);
1253 trigger_data_free(data);
1254 destroy_hist_data(hist_data);
1258 static struct event_trigger_ops event_hist_trigger_ops = {
1259 .func = event_hist_trigger,
1260 .print = event_hist_trigger_print,
1261 .init = event_hist_trigger_init,
1262 .free = event_hist_trigger_free,
1265 static int event_hist_trigger_named_init(struct event_trigger_ops *ops,
1266 struct event_trigger_data *data)
1270 save_named_trigger(data->named_data->name, data);
1272 event_hist_trigger_init(ops, data->named_data);
1277 static void event_hist_trigger_named_free(struct event_trigger_ops *ops,
1278 struct event_trigger_data *data)
1280 if (WARN_ON_ONCE(data->ref <= 0))
1283 event_hist_trigger_free(ops, data->named_data);
1287 del_named_trigger(data);
1288 trigger_data_free(data);
1292 static struct event_trigger_ops event_hist_trigger_named_ops = {
1293 .func = event_hist_trigger,
1294 .print = event_hist_trigger_print,
1295 .init = event_hist_trigger_named_init,
1296 .free = event_hist_trigger_named_free,
1299 static struct event_trigger_ops *event_hist_get_trigger_ops(char *cmd,
1302 return &event_hist_trigger_ops;
1305 static void hist_clear(struct event_trigger_data *data)
1307 struct hist_trigger_data *hist_data = data->private_data;
1310 pause_named_trigger(data);
1312 synchronize_sched();
1314 tracing_map_clear(hist_data->map);
1317 unpause_named_trigger(data);
1320 static bool compatible_field(struct ftrace_event_field *field,
1321 struct ftrace_event_field *test_field)
1323 if (field == test_field)
1325 if (field == NULL || test_field == NULL)
1327 if (strcmp(field->name, test_field->name) != 0)
1329 if (strcmp(field->type, test_field->type) != 0)
1331 if (field->size != test_field->size)
1333 if (field->is_signed != test_field->is_signed)
1339 static bool hist_trigger_match(struct event_trigger_data *data,
1340 struct event_trigger_data *data_test,
1341 struct event_trigger_data *named_data,
1344 struct tracing_map_sort_key *sort_key, *sort_key_test;
1345 struct hist_trigger_data *hist_data, *hist_data_test;
1346 struct hist_field *key_field, *key_field_test;
1349 if (named_data && (named_data != data_test) &&
1350 (named_data != data_test->named_data))
1353 if (!named_data && is_named_trigger(data_test))
1356 hist_data = data->private_data;
1357 hist_data_test = data_test->private_data;
1359 if (hist_data->n_vals != hist_data_test->n_vals ||
1360 hist_data->n_fields != hist_data_test->n_fields ||
1361 hist_data->n_sort_keys != hist_data_test->n_sort_keys)
1364 if (!ignore_filter) {
1365 if ((data->filter_str && !data_test->filter_str) ||
1366 (!data->filter_str && data_test->filter_str))
1370 for_each_hist_field(i, hist_data) {
1371 key_field = hist_data->fields[i];
1372 key_field_test = hist_data_test->fields[i];
1374 if (key_field->flags != key_field_test->flags)
1376 if (!compatible_field(key_field->field, key_field_test->field))
1378 if (key_field->offset != key_field_test->offset)
1382 for (i = 0; i < hist_data->n_sort_keys; i++) {
1383 sort_key = &hist_data->sort_keys[i];
1384 sort_key_test = &hist_data_test->sort_keys[i];
1386 if (sort_key->field_idx != sort_key_test->field_idx ||
1387 sort_key->descending != sort_key_test->descending)
1391 if (!ignore_filter && data->filter_str &&
1392 (strcmp(data->filter_str, data_test->filter_str) != 0))
1398 static int hist_register_trigger(char *glob, struct event_trigger_ops *ops,
1399 struct event_trigger_data *data,
1400 struct trace_event_file *file)
1402 struct hist_trigger_data *hist_data = data->private_data;
1403 struct event_trigger_data *test, *named_data = NULL;
1406 if (hist_data->attrs->name) {
1407 named_data = find_named_trigger(hist_data->attrs->name);
1409 if (!hist_trigger_match(data, named_data, named_data,
1417 if (hist_data->attrs->name && !named_data)
1420 list_for_each_entry_rcu(test, &file->triggers, list) {
1421 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1422 if (!hist_trigger_match(data, test, named_data, false))
1424 if (hist_data->attrs->pause)
1425 test->paused = true;
1426 else if (hist_data->attrs->cont)
1427 test->paused = false;
1428 else if (hist_data->attrs->clear)
1436 if (hist_data->attrs->cont || hist_data->attrs->clear) {
1442 destroy_hist_data(data->private_data);
1443 data->private_data = named_data->private_data;
1444 set_named_trigger_data(data, named_data);
1445 data->ops = &event_hist_trigger_named_ops;
1448 if (hist_data->attrs->pause)
1449 data->paused = true;
1451 if (data->ops->init) {
1452 ret = data->ops->init(data->ops, data);
1457 list_add_rcu(&data->list, &file->triggers);
1460 update_cond_flag(file);
1462 if (trace_event_trigger_enable_disable(file, 1) < 0) {
1463 list_del_rcu(&data->list);
1464 update_cond_flag(file);
1471 static void hist_unregister_trigger(char *glob, struct event_trigger_ops *ops,
1472 struct event_trigger_data *data,
1473 struct trace_event_file *file)
1475 struct hist_trigger_data *hist_data = data->private_data;
1476 struct event_trigger_data *test, *named_data = NULL;
1477 bool unregistered = false;
1479 if (hist_data->attrs->name)
1480 named_data = find_named_trigger(hist_data->attrs->name);
1482 list_for_each_entry_rcu(test, &file->triggers, list) {
1483 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1484 if (!hist_trigger_match(data, test, named_data, false))
1486 unregistered = true;
1487 list_del_rcu(&test->list);
1488 trace_event_trigger_enable_disable(file, 0);
1489 update_cond_flag(file);
1494 if (unregistered && test->ops->free)
1495 test->ops->free(test->ops, test);
1498 static void hist_unreg_all(struct trace_event_file *file)
1500 struct event_trigger_data *test;
1502 list_for_each_entry_rcu(test, &file->triggers, list) {
1503 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1504 list_del_rcu(&test->list);
1505 trace_event_trigger_enable_disable(file, 0);
1506 update_cond_flag(file);
1507 if (test->ops->free)
1508 test->ops->free(test->ops, test);
1513 static int event_hist_trigger_func(struct event_command *cmd_ops,
1514 struct trace_event_file *file,
1515 char *glob, char *cmd, char *param)
1517 unsigned int hist_trigger_bits = TRACING_MAP_BITS_DEFAULT;
1518 struct event_trigger_data *trigger_data;
1519 struct hist_trigger_attrs *attrs;
1520 struct event_trigger_ops *trigger_ops;
1521 struct hist_trigger_data *hist_data;
1528 /* separate the trigger from the filter (k:v [if filter]) */
1529 trigger = strsep(¶m, " \t");
1533 attrs = parse_hist_trigger_attrs(trigger);
1535 return PTR_ERR(attrs);
1537 if (attrs->map_bits)
1538 hist_trigger_bits = attrs->map_bits;
1540 hist_data = create_hist_data(hist_trigger_bits, attrs, file);
1541 if (IS_ERR(hist_data)) {
1542 destroy_hist_trigger_attrs(attrs);
1543 return PTR_ERR(hist_data);
1546 trigger_ops = cmd_ops->get_trigger_ops(cmd, trigger);
1549 trigger_data = kzalloc(sizeof(*trigger_data), GFP_KERNEL);
1553 trigger_data->count = -1;
1554 trigger_data->ops = trigger_ops;
1555 trigger_data->cmd_ops = cmd_ops;
1557 INIT_LIST_HEAD(&trigger_data->list);
1558 RCU_INIT_POINTER(trigger_data->filter, NULL);
1560 trigger_data->private_data = hist_data;
1562 /* if param is non-empty, it's supposed to be a filter */
1563 if (param && cmd_ops->set_filter) {
1564 ret = cmd_ops->set_filter(param, trigger_data, file);
1569 if (glob[0] == '!') {
1570 cmd_ops->unreg(glob+1, trigger_ops, trigger_data, file);
1575 ret = cmd_ops->reg(glob, trigger_ops, trigger_data, file);
1577 * The above returns on success the # of triggers registered,
1578 * but if it didn't register any it returns zero. Consider no
1579 * triggers registered a failure too.
1582 if (!(attrs->pause || attrs->cont || attrs->clear))
1587 /* Just return zero, not the number of registered triggers */
1592 if (cmd_ops->set_filter)
1593 cmd_ops->set_filter(NULL, trigger_data, NULL);
1595 kfree(trigger_data);
1597 destroy_hist_data(hist_data);
1601 static struct event_command trigger_hist_cmd = {
1603 .trigger_type = ETT_EVENT_HIST,
1604 .flags = EVENT_CMD_FL_NEEDS_REC,
1605 .func = event_hist_trigger_func,
1606 .reg = hist_register_trigger,
1607 .unreg = hist_unregister_trigger,
1608 .unreg_all = hist_unreg_all,
1609 .get_trigger_ops = event_hist_get_trigger_ops,
1610 .set_filter = set_trigger_filter,
1613 __init int register_trigger_hist_cmd(void)
1617 ret = register_event_command(&trigger_hist_cmd);
1624 hist_enable_trigger(struct event_trigger_data *data, void *rec)
1626 struct enable_trigger_data *enable_data = data->private_data;
1627 struct event_trigger_data *test;
1629 list_for_each_entry_rcu(test, &enable_data->file->triggers, list) {
1630 if (test->cmd_ops->trigger_type == ETT_EVENT_HIST) {
1631 if (enable_data->enable)
1632 test->paused = false;
1634 test->paused = true;
1640 hist_enable_count_trigger(struct event_trigger_data *data, void *rec)
1645 if (data->count != -1)
1648 hist_enable_trigger(data, rec);
1651 static struct event_trigger_ops hist_enable_trigger_ops = {
1652 .func = hist_enable_trigger,
1653 .print = event_enable_trigger_print,
1654 .init = event_trigger_init,
1655 .free = event_enable_trigger_free,
1658 static struct event_trigger_ops hist_enable_count_trigger_ops = {
1659 .func = hist_enable_count_trigger,
1660 .print = event_enable_trigger_print,
1661 .init = event_trigger_init,
1662 .free = event_enable_trigger_free,
1665 static struct event_trigger_ops hist_disable_trigger_ops = {
1666 .func = hist_enable_trigger,
1667 .print = event_enable_trigger_print,
1668 .init = event_trigger_init,
1669 .free = event_enable_trigger_free,
1672 static struct event_trigger_ops hist_disable_count_trigger_ops = {
1673 .func = hist_enable_count_trigger,
1674 .print = event_enable_trigger_print,
1675 .init = event_trigger_init,
1676 .free = event_enable_trigger_free,
1679 static struct event_trigger_ops *
1680 hist_enable_get_trigger_ops(char *cmd, char *param)
1682 struct event_trigger_ops *ops;
1685 enable = (strcmp(cmd, ENABLE_HIST_STR) == 0);
1688 ops = param ? &hist_enable_count_trigger_ops :
1689 &hist_enable_trigger_ops;
1691 ops = param ? &hist_disable_count_trigger_ops :
1692 &hist_disable_trigger_ops;
1697 static void hist_enable_unreg_all(struct trace_event_file *file)
1699 struct event_trigger_data *test;
1701 list_for_each_entry_rcu(test, &file->triggers, list) {
1702 if (test->cmd_ops->trigger_type == ETT_HIST_ENABLE) {
1703 list_del_rcu(&test->list);
1704 update_cond_flag(file);
1705 trace_event_trigger_enable_disable(file, 0);
1706 if (test->ops->free)
1707 test->ops->free(test->ops, test);
1712 static struct event_command trigger_hist_enable_cmd = {
1713 .name = ENABLE_HIST_STR,
1714 .trigger_type = ETT_HIST_ENABLE,
1715 .func = event_enable_trigger_func,
1716 .reg = event_enable_register_trigger,
1717 .unreg = event_enable_unregister_trigger,
1718 .unreg_all = hist_enable_unreg_all,
1719 .get_trigger_ops = hist_enable_get_trigger_ops,
1720 .set_filter = set_trigger_filter,
1723 static struct event_command trigger_hist_disable_cmd = {
1724 .name = DISABLE_HIST_STR,
1725 .trigger_type = ETT_HIST_ENABLE,
1726 .func = event_enable_trigger_func,
1727 .reg = event_enable_register_trigger,
1728 .unreg = event_enable_unregister_trigger,
1729 .unreg_all = hist_enable_unreg_all,
1730 .get_trigger_ops = hist_enable_get_trigger_ops,
1731 .set_filter = set_trigger_filter,
1734 static __init void unregister_trigger_hist_enable_disable_cmds(void)
1736 unregister_event_command(&trigger_hist_enable_cmd);
1737 unregister_event_command(&trigger_hist_disable_cmd);
1740 __init int register_trigger_hist_enable_disable_cmds(void)
1744 ret = register_event_command(&trigger_hist_enable_cmd);
1745 if (WARN_ON(ret < 0))
1747 ret = register_event_command(&trigger_hist_disable_cmd);
1748 if (WARN_ON(ret < 0))
1749 unregister_trigger_hist_enable_disable_cmds();