Merge branch 'perf/urgent' into perf/core, to pick up dependencies
authorIngo Molnar <mingo@kernel.org>
Thu, 18 Aug 2016 08:36:21 +0000 (10:36 +0200)
committerIngo Molnar <mingo@kernel.org>
Thu, 18 Aug 2016 08:36:21 +0000 (10:36 +0200)
Signed-off-by: Ingo Molnar <mingo@kernel.org>
1  2 
kernel/events/core.c

diff --combined kernel/events/core.c
@@@ -242,18 -242,6 +242,6 @@@ unlock
        return ret;
  }
  
- static void event_function_local(struct perf_event *event, event_f func, void *data)
- {
-       struct event_function_struct efs = {
-               .event = event,
-               .func = func,
-               .data = data,
-       };
-       int ret = event_function(&efs);
-       WARN_ON_ONCE(ret);
- }
  static void event_function_call(struct perf_event *event, event_f func, void *data)
  {
        struct perf_event_context *ctx = event->ctx;
@@@ -303,6 -291,54 +291,54 @@@ again
        raw_spin_unlock_irq(&ctx->lock);
  }
  
+ /*
+  * Similar to event_function_call() + event_function(), but hard assumes IRQs
+  * are already disabled and we're on the right CPU.
+  */
+ static void event_function_local(struct perf_event *event, event_f func, void *data)
+ {
+       struct perf_event_context *ctx = event->ctx;
+       struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
+       struct task_struct *task = READ_ONCE(ctx->task);
+       struct perf_event_context *task_ctx = NULL;
+       WARN_ON_ONCE(!irqs_disabled());
+       if (task) {
+               if (task == TASK_TOMBSTONE)
+                       return;
+               task_ctx = ctx;
+       }
+       perf_ctx_lock(cpuctx, task_ctx);
+       task = ctx->task;
+       if (task == TASK_TOMBSTONE)
+               goto unlock;
+       if (task) {
+               /*
+                * We must be either inactive or active and the right task,
+                * otherwise we're screwed, since we cannot IPI to somewhere
+                * else.
+                */
+               if (ctx->is_active) {
+                       if (WARN_ON_ONCE(task != current))
+                               goto unlock;
+                       if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
+                               goto unlock;
+               }
+       } else {
+               WARN_ON_ONCE(&cpuctx->ctx != ctx);
+       }
+       func(event, cpuctx, ctx, data);
+ unlock:
+       perf_ctx_unlock(cpuctx, task_ctx);
+ }
  #define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
                       PERF_FLAG_FD_OUTPUT  |\
                       PERF_FLAG_PID_CGROUP |\
@@@ -1796,8 -1832,6 +1832,8 @@@ group_sched_out(struct perf_event *grou
        struct perf_event *event;
        int state = group_event->state;
  
 +      perf_pmu_disable(ctx->pmu);
 +
        event_sched_out(group_event, cpuctx, ctx);
  
        /*
        list_for_each_entry(event, &group_event->sibling_list, group_entry)
                event_sched_out(event, cpuctx, ctx);
  
 +      perf_pmu_enable(ctx->pmu);
 +
        if (state == PERF_EVENT_STATE_ACTIVE && group_event->attr.exclusive)
                cpuctx->exclusive = 0;
  }
@@@ -2805,36 -2837,19 +2841,36 @@@ unlock
        }
  }
  
 +static DEFINE_PER_CPU(struct list_head, sched_cb_list);
 +
  void perf_sched_cb_dec(struct pmu *pmu)
  {
 +      struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 +
        this_cpu_dec(perf_sched_cb_usages);
 +
 +      if (!--cpuctx->sched_cb_usage)
 +              list_del(&cpuctx->sched_cb_entry);
  }
  
 +
  void perf_sched_cb_inc(struct pmu *pmu)
  {
 +      struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 +
 +      if (!cpuctx->sched_cb_usage++)
 +              list_add(&cpuctx->sched_cb_entry, this_cpu_ptr(&sched_cb_list));
 +
        this_cpu_inc(perf_sched_cb_usages);
  }
  
  /*
   * This function provides the context switch callback to the lower code
   * layer. It is invoked ONLY when the context switch callback is enabled.
 + *
 + * This callback is relevant even to per-cpu events; for example multi event
 + * PEBS requires this to provide PID/TID information. This requires we flush
 + * all queued PEBS records before we context switch to a new task.
   */
  static void perf_pmu_sched_task(struct task_struct *prev,
                                struct task_struct *next,
  {
        struct perf_cpu_context *cpuctx;
        struct pmu *pmu;
 -      unsigned long flags;
  
        if (prev == next)
                return;
  
 -      local_irq_save(flags);
 +      list_for_each_entry(cpuctx, this_cpu_ptr(&sched_cb_list), sched_cb_entry) {
 +              pmu = cpuctx->unique_pmu; /* software PMUs will not have sched_task */
  
 -      rcu_read_lock();
 -
 -      list_for_each_entry_rcu(pmu, &pmus, entry) {
 -              if (pmu->sched_task) {
 -                      cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
 -
 -                      perf_ctx_lock(cpuctx, cpuctx->task_ctx);
 -
 -                      perf_pmu_disable(pmu);
 +              if (WARN_ON_ONCE(!pmu->sched_task))
 +                      continue;
  
 -                      pmu->sched_task(cpuctx->task_ctx, sched_in);
 +              perf_ctx_lock(cpuctx, cpuctx->task_ctx);
 +              perf_pmu_disable(pmu);
  
 -                      perf_pmu_enable(pmu);
 +              pmu->sched_task(cpuctx->task_ctx, sched_in);
  
 -                      perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
 -              }
 +              perf_pmu_enable(pmu);
 +              perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
        }
 -
 -      rcu_read_unlock();
 -
 -      local_irq_restore(flags);
  }
  
  static void perf_event_switch(struct task_struct *task,
@@@ -3524,9 -3549,10 +3560,10 @@@ static int perf_event_read(struct perf_
                        .group = group,
                        .ret = 0,
                };
-               smp_call_function_single(event->oncpu,
-                                        __perf_event_read, &data, 1);
-               ret = data.ret;
+               ret = smp_call_function_single(event->oncpu, __perf_event_read, &data, 1);
+               /* The event must have been read from an online CPU: */
+               WARN_ON_ONCE(ret);
+               ret = ret ? : data.ret;
        } else if (event->state == PERF_EVENT_STATE_INACTIVE) {
                struct perf_event_context *ctx = event->ctx;
                unsigned long flags;
@@@ -6594,15 -6620,6 +6631,6 @@@ got_name
        kfree(buf);
  }
  
- /*
-  * Whether this @filter depends on a dynamic object which is not loaded
-  * yet or its load addresses are not known.
-  */
- static bool perf_addr_filter_needs_mmap(struct perf_addr_filter *filter)
- {
-       return filter->filter && filter->inode;
- }
  /*
   * Check whether inode and address range match filter criteria.
   */
@@@ -6664,6 -6681,13 +6692,13 @@@ static void perf_addr_filters_adjust(st
        struct perf_event_context *ctx;
        int ctxn;
  
+       /*
+        * Data tracing isn't supported yet and as such there is no need
+        * to keep track of anything that isn't related to executable code:
+        */
+       if (!(vma->vm_flags & VM_EXEC))
+               return;
        rcu_read_lock();
        for_each_task_context_nr(ctxn) {
                ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
@@@ -7816,7 -7840,11 +7851,11 @@@ static void perf_event_addr_filters_app
        list_for_each_entry(filter, &ifh->list, entry) {
                event->addr_filters_offs[count] = 0;
  
-               if (perf_addr_filter_needs_mmap(filter))
+               /*
+                * Adjust base offset if the filter is associated to a binary
+                * that needs to be mapped:
+                */
+               if (filter->inode)
                        event->addr_filters_offs[count] =
                                perf_addr_filter_apply(filter, mm);
  
@@@ -7947,8 -7975,10 +7986,10 @@@ perf_event_parse_addr_filter(struct per
                                        goto fail;
                        }
  
-                       if (token == IF_SRC_FILE) {
-                               filename = match_strdup(&args[2]);
+                       if (token == IF_SRC_FILE || token == IF_SRC_FILEADDR) {
+                               int fpos = filter->range ? 2 : 1;
+                               filename = match_strdup(&args[fpos]);
                                if (!filename) {
                                        ret = -ENOMEM;
                                        goto fail;
@@@ -10396,8 -10426,6 +10437,8 @@@ static void __init perf_event_init_all_
  
                INIT_LIST_HEAD(&per_cpu(pmu_sb_events.list, cpu));
                raw_spin_lock_init(&per_cpu(pmu_sb_events.lock, cpu));
 +
 +              INIT_LIST_HEAD(&per_cpu(sched_cb_list, cpu));
        }
  }