perf callchain: Per-event type selection support
[cascardo/linux.git] / tools / perf / util / evsel.c
index 33449de..6647925 100644 (file)
@@ -207,9 +207,11 @@ void perf_evsel__init(struct perf_evsel *evsel,
        evsel->unit        = "";
        evsel->scale       = 1.0;
        INIT_LIST_HEAD(&evsel->node);
+       INIT_LIST_HEAD(&evsel->config_terms);
        perf_evsel__object.init(evsel);
        evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
        perf_evsel__calc_id_pos(evsel);
+       evsel->cmdline_group_boundary = false;
 }
 
 struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
@@ -543,14 +545,15 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
 
 static void
 perf_evsel__config_callgraph(struct perf_evsel *evsel,
-                            struct record_opts *opts)
+                            struct record_opts *opts,
+                            struct callchain_param *param)
 {
        bool function = perf_evsel__is_function_event(evsel);
        struct perf_event_attr *attr = &evsel->attr;
 
        perf_evsel__set_sample_bit(evsel, CALLCHAIN);
 
-       if (callchain_param.record_mode == CALLCHAIN_LBR) {
+       if (param->record_mode == CALLCHAIN_LBR) {
                if (!opts->branch_stack) {
                        if (attr->exclude_user) {
                                pr_warning("LBR callstack option is only available "
@@ -566,12 +569,12 @@ perf_evsel__config_callgraph(struct perf_evsel *evsel,
                                    "Falling back to framepointers.\n");
        }
 
-       if (callchain_param.record_mode == CALLCHAIN_DWARF) {
+       if (param->record_mode == CALLCHAIN_DWARF) {
                if (!function) {
                        perf_evsel__set_sample_bit(evsel, REGS_USER);
                        perf_evsel__set_sample_bit(evsel, STACK_USER);
                        attr->sample_regs_user = PERF_REGS_MASK;
-                       attr->sample_stack_user = callchain_param.dump_size;
+                       attr->sample_stack_user = param->dump_size;
                        attr->exclude_callchain_user = 1;
                } else {
                        pr_info("Cannot use DWARF unwind for function trace event,"
@@ -585,6 +588,92 @@ perf_evsel__config_callgraph(struct perf_evsel *evsel,
        }
 }
 
+static void
+perf_evsel__reset_callgraph(struct perf_evsel *evsel,
+                           struct callchain_param *param)
+{
+       struct perf_event_attr *attr = &evsel->attr;
+
+       perf_evsel__reset_sample_bit(evsel, CALLCHAIN);
+       if (param->record_mode == CALLCHAIN_LBR) {
+               perf_evsel__reset_sample_bit(evsel, BRANCH_STACK);
+               attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
+                                             PERF_SAMPLE_BRANCH_CALL_STACK);
+       }
+       if (param->record_mode == CALLCHAIN_DWARF) {
+               perf_evsel__reset_sample_bit(evsel, REGS_USER);
+               perf_evsel__reset_sample_bit(evsel, STACK_USER);
+       }
+}
+
+static void apply_config_terms(struct perf_evsel *evsel,
+                              struct record_opts *opts)
+{
+       struct perf_evsel_config_term *term;
+       struct list_head *config_terms = &evsel->config_terms;
+       struct perf_event_attr *attr = &evsel->attr;
+       struct callchain_param param;
+       u32 dump_size = 0;
+       char *callgraph_buf = NULL;
+
+       /* callgraph default */
+       param.record_mode = callchain_param.record_mode;
+
+       list_for_each_entry(term, config_terms, list) {
+               switch (term->type) {
+               case PERF_EVSEL__CONFIG_TERM_PERIOD:
+                       attr->sample_period = term->val.period;
+                       attr->freq = 0;
+                       break;
+               case PERF_EVSEL__CONFIG_TERM_FREQ:
+                       attr->sample_freq = term->val.freq;
+                       attr->freq = 1;
+                       break;
+               case PERF_EVSEL__CONFIG_TERM_TIME:
+                       if (term->val.time)
+                               perf_evsel__set_sample_bit(evsel, TIME);
+                       else
+                               perf_evsel__reset_sample_bit(evsel, TIME);
+                       break;
+               case PERF_EVSEL__CONFIG_TERM_CALLGRAPH:
+                       callgraph_buf = term->val.callgraph;
+                       break;
+               case PERF_EVSEL__CONFIG_TERM_STACK_USER:
+                       dump_size = term->val.stack_user;
+                       break;
+               default:
+                       break;
+               }
+       }
+
+       /* User explicitly set per-event callgraph, clear the old setting and reset. */
+       if ((callgraph_buf != NULL) || (dump_size > 0)) {
+
+               /* parse callgraph parameters */
+               if (callgraph_buf != NULL) {
+                       param.enabled = true;
+                       if (parse_callchain_record(callgraph_buf, &param)) {
+                               pr_err("per-event callgraph setting for %s failed. "
+                                      "Apply callgraph global setting for it\n",
+                                      evsel->name);
+                               return;
+                       }
+               }
+               if (dump_size > 0) {
+                       dump_size = round_up(dump_size, sizeof(u64));
+                       param.dump_size = dump_size;
+               }
+
+               /* If global callgraph set, clear it */
+               if (callchain_param.enabled)
+                       perf_evsel__reset_callgraph(evsel, &callchain_param);
+
+               /* set perf-event callgraph */
+               if (param.enabled)
+                       perf_evsel__config_callgraph(evsel, opts, &param);
+       }
+}
+
 /*
  * The enable_on_exec/disabled value strategy:
  *
@@ -689,7 +778,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
                evsel->attr.exclude_callchain_user = 1;
 
        if (callchain_param.enabled && !evsel->no_aux_samples)
-               perf_evsel__config_callgraph(evsel, opts);
+               perf_evsel__config_callgraph(evsel, opts, &callchain_param);
 
        if (opts->sample_intr_regs) {
                attr->sample_regs_intr = PERF_REGS_MASK;
@@ -707,7 +796,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
         */
        if (opts->sample_time &&
            (!perf_missing_features.sample_id_all &&
-           (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu)))
+           (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
+            opts->sample_time_set)))
                perf_evsel__set_sample_bit(evsel, TIME);
 
        if (opts->raw_samples && !evsel->no_aux_samples) {
@@ -736,6 +826,9 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
        attr->mmap2 = track && !perf_missing_features.mmap2;
        attr->comm  = track;
 
+       if (opts->record_switch_events)
+               attr->context_switch = track;
+
        if (opts->sample_transaction)
                perf_evsel__set_sample_bit(evsel, TRANSACTION);
 
@@ -772,6 +865,12 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
                attr->use_clockid = 1;
                attr->clockid = opts->clockid;
        }
+
+       /*
+        * Apply event specific term settings,
+        * it overloads any global configuration.
+        */
+       apply_config_terms(evsel, opts);
 }
 
 static int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
@@ -815,14 +914,44 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthrea
        return 0;
 }
 
-int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
-                          const char *filter)
+int perf_evsel__apply_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
+                            const char *filter)
 {
        return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
                                     PERF_EVENT_IOC_SET_FILTER,
                                     (void *)filter);
 }
 
+int perf_evsel__set_filter(struct perf_evsel *evsel, const char *filter)
+{
+       char *new_filter = strdup(filter);
+
+       if (new_filter != NULL) {
+               free(evsel->filter);
+               evsel->filter = new_filter;
+               return 0;
+       }
+
+       return -1;
+}
+
+int perf_evsel__append_filter(struct perf_evsel *evsel,
+                             const char *op, const char *filter)
+{
+       char *new_filter;
+
+       if (evsel->filter == NULL)
+               return perf_evsel__set_filter(evsel, filter);
+
+       if (asprintf(&new_filter,"(%s) %s (%s)", evsel->filter, op, filter) > 0) {
+               free(evsel->filter);
+               evsel->filter = new_filter;
+               return 0;
+       }
+
+       return -1;
+}
+
 int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
        return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
@@ -865,6 +994,16 @@ static void perf_evsel__free_id(struct perf_evsel *evsel)
        zfree(&evsel->id);
 }
 
+static void perf_evsel__free_config_terms(struct perf_evsel *evsel)
+{
+       struct perf_evsel_config_term *term, *h;
+
+       list_for_each_entry_safe(term, h, &evsel->config_terms, list) {
+               list_del(&term->list);
+               free(term);
+       }
+}
+
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
 {
        int cpu, thread;
@@ -884,7 +1023,10 @@ void perf_evsel__exit(struct perf_evsel *evsel)
        assert(list_empty(&evsel->node));
        perf_evsel__free_fd(evsel);
        perf_evsel__free_id(evsel);
+       perf_evsel__free_config_terms(evsel);
        close_cgroup(evsel->cgrp);
+       cpu_map__put(evsel->cpus);
+       thread_map__put(evsel->threads);
        zfree(&evsel->group_name);
        zfree(&evsel->name);
        perf_evsel__object.fini(evsel);
@@ -896,7 +1038,7 @@ void perf_evsel__delete(struct perf_evsel *evsel)
        free(evsel);
 }
 
-void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu,
+void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu, int thread,
                                struct perf_counts_values *count)
 {
        struct perf_counts_values tmp;
@@ -908,8 +1050,8 @@ void perf_evsel__compute_deltas(struct perf_evsel *evsel, int cpu,
                tmp = evsel->prev_raw_counts->aggr;
                evsel->prev_raw_counts->aggr = *count;
        } else {
-               tmp = evsel->prev_raw_counts->cpu[cpu];
-               evsel->prev_raw_counts->cpu[cpu] = *count;
+               tmp = *perf_counts(evsel->prev_raw_counts, cpu, thread);
+               *perf_counts(evsel->prev_raw_counts, cpu, thread) = *count;
        }
 
        count->val = count->val - tmp.val;
@@ -937,20 +1079,18 @@ void perf_counts_values__scale(struct perf_counts_values *count,
                *pscaled = scaled;
 }
 
-int perf_evsel__read_cb(struct perf_evsel *evsel, int cpu, int thread,
-                       perf_evsel__read_cb_t cb)
+int perf_evsel__read(struct perf_evsel *evsel, int cpu, int thread,
+                    struct perf_counts_values *count)
 {
-       struct perf_counts_values count;
-
-       memset(&count, 0, sizeof(count));
+       memset(count, 0, sizeof(*count));
 
        if (FD(evsel, cpu, thread) < 0)
                return -EINVAL;
 
-       if (readn(FD(evsel, cpu, thread), &count, sizeof(count)) < 0)
+       if (readn(FD(evsel, cpu, thread), count, sizeof(*count)) < 0)
                return -errno;
 
-       return cb(evsel, cpu, thread, &count);
+       return 0;
 }
 
 int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
@@ -962,15 +1102,15 @@ int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
        if (FD(evsel, cpu, thread) < 0)
                return -EINVAL;
 
-       if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
+       if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1, thread + 1) < 0)
                return -ENOMEM;
 
        if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
                return -errno;
 
-       perf_evsel__compute_deltas(evsel, cpu, &count);
+       perf_evsel__compute_deltas(evsel, cpu, thread, &count);
        perf_counts_values__scale(&count, scale, NULL);
-       evsel->counts->cpu[cpu] = count;
+       *perf_counts(evsel->counts, cpu, thread) = count;
        return 0;
 }
 
@@ -1095,6 +1235,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
        PRINT_ATTRf(mmap2, p_unsigned);
        PRINT_ATTRf(comm_exec, p_unsigned);
        PRINT_ATTRf(use_clockid, p_unsigned);
+       PRINT_ATTRf(context_switch, p_unsigned);
 
        PRINT_ATTRn("{ wakeup_events, wakeup_watermark }", wakeup_events, p_unsigned);
        PRINT_ATTRf(bp_type, p_unsigned);
@@ -1167,7 +1308,7 @@ retry_sample_id:
                        int group_fd;
 
                        if (!evsel->cgrp && !evsel->system_wide)
-                               pid = threads->map[thread];
+                               pid = thread_map__pid(threads, thread);
 
                        group_fd = get_group_fd(evsel, cpu, thread);
 retry_open:
@@ -2075,8 +2216,13 @@ int perf_evsel__fprintf(struct perf_evsel *evsel,
                printed += perf_event_attr__fprintf(fp, &evsel->attr,
                                                    __print_attr__fprintf, &first);
        } else if (details->freq) {
-               printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
-                                        (u64)evsel->attr.sample_freq);
+               const char *term = "sample_freq";
+
+               if (!evsel->attr.freq)
+                       term = "sample_period";
+
+               printed += comma_fprintf(fp, &first, " %s=%" PRIu64,
+                                        term, (u64)evsel->attr.sample_freq);
        }
 out:
        fputc('\n', fp);