Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Sep 2015 17:37:46 +0000 (10:37 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Thu, 17 Sep 2015 17:37:46 +0000 (10:37 -0700)
Pull perf fixes from Ingo MOlnar:
 "Mostly tooling fixes, but also two x86 PMU driver fixes"

* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  perf tests: Fix software clock events test setting maps
  perf tests: Fix task exit test setting maps
  perf evlist: Fix create_syswide_maps() not propagating maps
  perf evlist: Fix add() not propagating maps
  perf evlist: Factor out a function to propagate maps for a single evsel
  perf evlist: Make create_maps() use set_maps()
  perf evlist: Make set_maps() more resilient
  perf evsel: Add own_cpus member
  perf evlist: Fix missing thread_map__put in propagate_maps()
  perf evlist: Fix splice_list_tail() not setting evlist
  perf evlist: Add has_user_cpus member
  perf evlist: Remove redundant validation from propagate_maps()
  perf evlist: Simplify set_maps() logic
  perf evlist: Simplify propagate_maps() logic
  perf top: Fix segfault pressing -> with no hist entries
  perf header: Fixup reading of HEADER_NRCPUS feature
  perf/x86/intel: Fix constraint access
  perf/x86/intel/bts: Set event->hw.itrace_started in pmu::start to match the new logic
  perf tools: Fix use of wrong event when processing exit events
  perf tools: Fix parse_events_add_pmu caller

15 files changed:
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_bts.c
tools/perf/builtin-script.c
tools/perf/tests/sw-clock.c
tools/perf/tests/task-exit.c
tools/perf/ui/browsers/hists.c
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/intel-bts.c
tools/perf/util/intel-pt.c
tools/perf/util/parse-events.c
tools/perf/util/parse-events.y

index cd9b6d0..3fefebf 100644 (file)
@@ -2316,9 +2316,12 @@ static struct event_constraint *
 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                            struct perf_event *event)
 {
-       struct event_constraint *c1 = cpuc->event_constraint[idx];
+       struct event_constraint *c1 = NULL;
        struct event_constraint *c2;
 
+       if (idx >= 0) /* fake does < 0 */
+               c1 = cpuc->event_constraint[idx];
+
        /*
         * first time only
         * - static constraint: no change across incremental scheduling calls
index 54690e8..d1c0f25 100644 (file)
@@ -222,6 +222,7 @@ static void __bts_event_start(struct perf_event *event)
        if (!buf || bts_buffer_is_full(buf, bts))
                return;
 
+       event->hw.itrace_started = 1;
        event->hw.state = 0;
 
        if (!buf->snapshot)
index eb51325..284a76e 100644 (file)
@@ -768,8 +768,8 @@ static int process_exit_event(struct perf_tool *tool,
        if (!evsel->attr.sample_id_all) {
                sample->cpu = 0;
                sample->time = 0;
-               sample->tid = event->comm.tid;
-               sample->pid = event->comm.pid;
+               sample->tid = event->fork.tid;
+               sample->pid = event->fork.pid;
        }
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
index 1aa21c9..5b83f56 100644 (file)
@@ -34,6 +34,8 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
                .disabled = 1,
                .freq = 1,
        };
+       struct cpu_map *cpus;
+       struct thread_map *threads;
 
        attr.sample_freq = 500;
 
@@ -50,14 +52,19 @@ static int __test__sw_clock_freq(enum perf_sw_ids clock_id)
        }
        perf_evlist__add(evlist, evsel);
 
-       evlist->cpus = cpu_map__dummy_new();
-       evlist->threads = thread_map__new_by_tid(getpid());
-       if (!evlist->cpus || !evlist->threads) {
+       cpus = cpu_map__dummy_new();
+       threads = thread_map__new_by_tid(getpid());
+       if (!cpus || !threads) {
                err = -ENOMEM;
                pr_debug("Not enough memory to create thread/cpu maps\n");
-               goto out_delete_evlist;
+               goto out_free_maps;
        }
 
+       perf_evlist__set_maps(evlist, cpus, threads);
+
+       cpus    = NULL;
+       threads = NULL;
+
        if (perf_evlist__open(evlist)) {
                const char *knob = "/proc/sys/kernel/perf_event_max_sample_rate";
 
@@ -107,6 +114,9 @@ next_event:
                err = -1;
        }
 
+out_free_maps:
+       cpu_map__put(cpus);
+       thread_map__put(threads);
 out_delete_evlist:
        perf_evlist__delete(evlist);
        return err;
index 3a8fede..add1638 100644 (file)
@@ -43,6 +43,8 @@ int test__task_exit(void)
        };
        const char *argv[] = { "true", NULL };
        char sbuf[STRERR_BUFSIZE];
+       struct cpu_map *cpus;
+       struct thread_map *threads;
 
        signal(SIGCHLD, sig_handler);
 
@@ -58,14 +60,19 @@ int test__task_exit(void)
         * perf_evlist__prepare_workload we'll fill in the only thread
         * we're monitoring, the one forked there.
         */
-       evlist->cpus = cpu_map__dummy_new();
-       evlist->threads = thread_map__new_by_tid(-1);
-       if (!evlist->cpus || !evlist->threads) {
+       cpus = cpu_map__dummy_new();
+       threads = thread_map__new_by_tid(-1);
+       if (!cpus || !threads) {
                err = -ENOMEM;
                pr_debug("Not enough memory to create thread/cpu maps\n");
-               goto out_delete_evlist;
+               goto out_free_maps;
        }
 
+       perf_evlist__set_maps(evlist, cpus, threads);
+
+       cpus    = NULL;
+       threads = NULL;
+
        err = perf_evlist__prepare_workload(evlist, &target, argv, false,
                                            workload_exec_failed_signal);
        if (err < 0) {
@@ -114,6 +121,9 @@ retry:
                err = -1;
        }
 
+out_free_maps:
+       cpu_map__put(cpus);
+       thread_map__put(threads);
 out_delete_evlist:
        perf_evlist__delete(evlist);
        return err;
index cf86f2d..c04c60d 100644 (file)
@@ -1968,7 +1968,8 @@ skip_annotation:
                                          &options[nr_options], dso);
                nr_options += add_map_opt(browser, &actions[nr_options],
                                          &options[nr_options],
-                                         browser->selection->map);
+                                         browser->selection ?
+                                               browser->selection->map : NULL);
 
                /* perf script support */
                if (browser->he_selection) {
@@ -1976,6 +1977,15 @@ skip_annotation:
                                                     &actions[nr_options],
                                                     &options[nr_options],
                                                     thread, NULL);
+                       /*
+                        * Note that browser->selection != NULL
+                        * when browser->he_selection is not NULL,
+                        * so we don't need to check browser->selection
+                        * before fetching browser->selection->sym like what
+                        * we do before fetching browser->selection->map.
+                        *
+                        * See hist_browser__show_entry.
+                        */
                        nr_options += add_script_opt(browser,
                                                     &actions[nr_options],
                                                     &options[nr_options],
index d51a520..c8fc8a2 100644 (file)
@@ -124,6 +124,33 @@ void perf_evlist__delete(struct perf_evlist *evlist)
        free(evlist);
 }
 
+static void __perf_evlist__propagate_maps(struct perf_evlist *evlist,
+                                         struct perf_evsel *evsel)
+{
+       /*
+        * We already have cpus for evsel (via PMU sysfs) so
+        * keep it, if there's no target cpu list defined.
+        */
+       if (!evsel->own_cpus || evlist->has_user_cpus) {
+               cpu_map__put(evsel->cpus);
+               evsel->cpus = cpu_map__get(evlist->cpus);
+       } else if (evsel->cpus != evsel->own_cpus) {
+               cpu_map__put(evsel->cpus);
+               evsel->cpus = cpu_map__get(evsel->own_cpus);
+       }
+
+       thread_map__put(evsel->threads);
+       evsel->threads = thread_map__get(evlist->threads);
+}
+
+static void perf_evlist__propagate_maps(struct perf_evlist *evlist)
+{
+       struct perf_evsel *evsel;
+
+       evlist__for_each(evlist, evsel)
+               __perf_evlist__propagate_maps(evlist, evsel);
+}
+
 void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 {
        entry->evlist = evlist;
@@ -133,18 +160,19 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
 
        if (!evlist->nr_entries++)
                perf_evlist__set_id_pos(evlist);
+
+       __perf_evlist__propagate_maps(evlist, entry);
 }
 
 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
-                                  struct list_head *list,
-                                  int nr_entries)
+                                  struct list_head *list)
 {
-       bool set_id_pos = !evlist->nr_entries;
+       struct perf_evsel *evsel, *temp;
 
-       list_splice_tail(list, &evlist->entries);
-       evlist->nr_entries += nr_entries;
-       if (set_id_pos)
-               perf_evlist__set_id_pos(evlist);
+       __evlist__for_each_safe(list, temp, evsel) {
+               list_del_init(&evsel->node);
+               perf_evlist__add(evlist, evsel);
+       }
 }
 
 void __perf_evlist__set_leader(struct list_head *list)
@@ -210,7 +238,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
                list_add_tail(&evsel->node, &head);
        }
 
-       perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
+       perf_evlist__splice_list_tail(evlist, &head);
 
        return 0;
 
@@ -1103,71 +1131,56 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
        return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
 }
 
-static int perf_evlist__propagate_maps(struct perf_evlist *evlist,
-                                      bool has_user_cpus)
-{
-       struct perf_evsel *evsel;
-
-       evlist__for_each(evlist, evsel) {
-               /*
-                * We already have cpus for evsel (via PMU sysfs) so
-                * keep it, if there's no target cpu list defined.
-                */
-               if (evsel->cpus && has_user_cpus)
-                       cpu_map__put(evsel->cpus);
-
-               if (!evsel->cpus || has_user_cpus)
-                       evsel->cpus = cpu_map__get(evlist->cpus);
-
-               evsel->threads = thread_map__get(evlist->threads);
-
-               if ((evlist->cpus && !evsel->cpus) ||
-                   (evlist->threads && !evsel->threads))
-                       return -ENOMEM;
-       }
-
-       return 0;
-}
-
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
 {
-       evlist->threads = thread_map__new_str(target->pid, target->tid,
-                                             target->uid);
+       struct cpu_map *cpus;
+       struct thread_map *threads;
+
+       threads = thread_map__new_str(target->pid, target->tid, target->uid);
 
-       if (evlist->threads == NULL)
+       if (!threads)
                return -1;
 
        if (target__uses_dummy_map(target))
-               evlist->cpus = cpu_map__dummy_new();
+               cpus = cpu_map__dummy_new();
        else
-               evlist->cpus = cpu_map__new(target->cpu_list);
+               cpus = cpu_map__new(target->cpu_list);
 
-       if (evlist->cpus == NULL)
+       if (!cpus)
                goto out_delete_threads;
 
-       return perf_evlist__propagate_maps(evlist, !!target->cpu_list);
+       evlist->has_user_cpus = !!target->cpu_list;
+
+       perf_evlist__set_maps(evlist, cpus, threads);
+
+       return 0;
 
 out_delete_threads:
-       thread_map__put(evlist->threads);
-       evlist->threads = NULL;
+       thread_map__put(threads);
        return -1;
 }
 
-int perf_evlist__set_maps(struct perf_evlist *evlist,
-                         struct cpu_map *cpus,
-                         struct thread_map *threads)
+void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+                          struct thread_map *threads)
 {
-       if (evlist->cpus)
+       /*
+        * Allow for the possibility that one or another of the maps isn't being
+        * changed i.e. don't put it.  Note we are assuming the maps that are
+        * being applied are brand new and evlist is taking ownership of the
+        * original reference count of 1.  If that is not the case it is up to
+        * the caller to increase the reference count.
+        */
+       if (cpus != evlist->cpus) {
                cpu_map__put(evlist->cpus);
+               evlist->cpus = cpus;
+       }
 
-       evlist->cpus = cpus;
-
-       if (evlist->threads)
+       if (threads != evlist->threads) {
                thread_map__put(evlist->threads);
+               evlist->threads = threads;
+       }
 
-       evlist->threads = threads;
-
-       return perf_evlist__propagate_maps(evlist, false);
+       perf_evlist__propagate_maps(evlist);
 }
 
 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel)
@@ -1387,6 +1400,8 @@ void perf_evlist__close(struct perf_evlist *evlist)
 
 static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
 {
+       struct cpu_map    *cpus;
+       struct thread_map *threads;
        int err = -ENOMEM;
 
        /*
@@ -1398,20 +1413,19 @@ static int perf_evlist__create_syswide_maps(struct perf_evlist *evlist)
         * error, and we may not want to do that fallback to a
         * default cpu identity map :-\
         */
-       evlist->cpus = cpu_map__new(NULL);
-       if (evlist->cpus == NULL)
+       cpus = cpu_map__new(NULL);
+       if (!cpus)
                goto out;
 
-       evlist->threads = thread_map__new_dummy();
-       if (evlist->threads == NULL)
-               goto out_free_cpus;
+       threads = thread_map__new_dummy();
+       if (!threads)
+               goto out_put;
 
-       err = 0;
+       perf_evlist__set_maps(evlist, cpus, threads);
 out:
        return err;
-out_free_cpus:
-       cpu_map__put(evlist->cpus);
-       evlist->cpus = NULL;
+out_put:
+       cpu_map__put(cpus);
        goto out;
 }
 
index b39a619..115d8b5 100644 (file)
@@ -42,6 +42,7 @@ struct perf_evlist {
        int              nr_mmaps;
        bool             overwrite;
        bool             enabled;
+       bool             has_user_cpus;
        size_t           mmap_len;
        int              id_pos;
        int              is_pos;
@@ -155,9 +156,8 @@ int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
 void perf_evlist__set_selected(struct perf_evlist *evlist,
                               struct perf_evsel *evsel);
 
-int perf_evlist__set_maps(struct perf_evlist *evlist,
-                         struct cpu_map *cpus,
-                         struct thread_map *threads);
+void perf_evlist__set_maps(struct perf_evlist *evlist, struct cpu_map *cpus,
+                          struct thread_map *threads);
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target);
 int perf_evlist__apply_filters(struct perf_evlist *evlist, struct perf_evsel **err_evsel);
 
@@ -179,8 +179,7 @@ bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist);
 bool perf_evlist__valid_read_format(struct perf_evlist *evlist);
 
 void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
-                                  struct list_head *list,
-                                  int nr_entries);
+                                  struct list_head *list);
 
 static inline struct perf_evsel *perf_evlist__first(struct perf_evlist *evlist)
 {
index c53f791..5410483 100644 (file)
@@ -1033,6 +1033,7 @@ void perf_evsel__exit(struct perf_evsel *evsel)
        perf_evsel__free_config_terms(evsel);
        close_cgroup(evsel->cgrp);
        cpu_map__put(evsel->cpus);
+       cpu_map__put(evsel->own_cpus);
        thread_map__put(evsel->threads);
        zfree(&evsel->group_name);
        zfree(&evsel->name);
index 298e6bb..ef8925f 100644 (file)
@@ -98,6 +98,7 @@ struct perf_evsel {
        struct cgroup_sel       *cgrp;
        void                    *handler;
        struct cpu_map          *cpus;
+       struct cpu_map          *own_cpus;
        struct thread_map       *threads;
        unsigned int            sample_size;
        int                     id_pos;
index 4181454..fce6634 100644 (file)
@@ -1438,7 +1438,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
        if (ph->needs_swap)
                nr = bswap_32(nr);
 
-       ph->env.nr_cpus_online = nr;
+       ph->env.nr_cpus_avail = nr;
 
        ret = readn(fd, &nr, sizeof(nr));
        if (ret != sizeof(nr))
@@ -1447,7 +1447,7 @@ static int process_nrcpus(struct perf_file_section *section __maybe_unused,
        if (ph->needs_swap)
                nr = bswap_32(nr);
 
-       ph->env.nr_cpus_avail = nr;
+       ph->env.nr_cpus_online = nr;
        return 0;
 }
 
index ea76862..eb0e7f8 100644 (file)
@@ -623,7 +623,7 @@ static int intel_bts_process_event(struct perf_session *session,
        if (err)
                return err;
        if (event->header.type == PERF_RECORD_EXIT) {
-               err = intel_bts_process_tid_exit(bts, event->comm.tid);
+               err = intel_bts_process_tid_exit(bts, event->fork.tid);
                if (err)
                        return err;
        }
index bb41c20..535d86f 100644 (file)
@@ -1494,7 +1494,7 @@ static int intel_pt_process_event(struct perf_session *session,
        if (pt->timeless_decoding) {
                if (event->header.type == PERF_RECORD_EXIT) {
                        err = intel_pt_process_timeless_queues(pt,
-                                                              event->comm.tid,
+                                                              event->fork.tid,
                                                               sample->time);
                }
        } else if (timestamp) {
index d826e6f..21ed6ee 100644 (file)
@@ -287,8 +287,8 @@ __add_event(struct list_head *list, int *idx,
        if (!evsel)
                return NULL;
 
-       if (cpus)
-               evsel->cpus = cpu_map__get(cpus);
+       evsel->cpus     = cpu_map__get(cpus);
+       evsel->own_cpus = cpu_map__get(cpus);
 
        if (name)
                evsel->name = strdup(name);
@@ -1140,10 +1140,9 @@ int parse_events(struct perf_evlist *evlist, const char *str,
        ret = parse_events__scanner(str, &data, PE_START_EVENTS);
        perf_pmu__parse_cleanup();
        if (!ret) {
-               int entries = data.idx - evlist->nr_entries;
                struct perf_evsel *last;
 
-               perf_evlist__splice_list_tail(evlist, &data.list, entries);
+               perf_evlist__splice_list_tail(evlist, &data.list);
                evlist->nr_groups += data.nr_groups;
                last = perf_evlist__last(evlist);
                last->cmdline_group_boundary = true;
index 591905a..9cd7081 100644 (file)
@@ -255,7 +255,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
        list_add_tail(&term->list, head);
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head));
+       ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
        parse_events__free_terms(head);
        $$ = list;
 }