Merge branch 'for-3.15' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
[cascardo/linux.git] / kernel / events / core.c
index 6dd7149..f83a71a 100644 (file)
@@ -231,11 +231,29 @@ int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
 #define NR_ACCUMULATED_SAMPLES 128
 static DEFINE_PER_CPU(u64, running_sample_length);
 
-void perf_sample_event_took(u64 sample_len_ns)
+static void perf_duration_warn(struct irq_work *w)
 {
+       u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
        u64 avg_local_sample_len;
        u64 local_samples_len;
+
+       local_samples_len = __get_cpu_var(running_sample_length);
+       avg_local_sample_len = local_samples_len/NR_ACCUMULATED_SAMPLES;
+
+       printk_ratelimited(KERN_WARNING
+                       "perf interrupt took too long (%lld > %lld), lowering "
+                       "kernel.perf_event_max_sample_rate to %d\n",
+                       avg_local_sample_len, allowed_ns >> 1,
+                       sysctl_perf_event_sample_rate);
+}
+
+static DEFINE_IRQ_WORK(perf_duration_work, perf_duration_warn);
+
+void perf_sample_event_took(u64 sample_len_ns)
+{
        u64 allowed_ns = ACCESS_ONCE(perf_sample_allowed_ns);
+       u64 avg_local_sample_len;
+       u64 local_samples_len;
 
        if (allowed_ns == 0)
                return;
@@ -263,13 +281,14 @@ void perf_sample_event_took(u64 sample_len_ns)
        sysctl_perf_event_sample_rate = max_samples_per_tick * HZ;
        perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
 
-       printk_ratelimited(KERN_WARNING
-                       "perf samples too long (%lld > %lld), lowering "
-                       "kernel.perf_event_max_sample_rate to %d\n",
-                       avg_local_sample_len, allowed_ns,
-                       sysctl_perf_event_sample_rate);
-
        update_perf_cpu_limits();
+
+       if (!irq_work_queue(&perf_duration_work)) {
+               early_printk("perf interrupt took too long (%lld > %lld), lowering "
+                            "kernel.perf_event_max_sample_rate to %d\n",
+                            avg_local_sample_len, allowed_ns >> 1,
+                            sysctl_perf_event_sample_rate);
+       }
 }
 
 static atomic64_t perf_event_id;
@@ -1699,7 +1718,7 @@ group_sched_in(struct perf_event *group_event,
               struct perf_event_context *ctx)
 {
        struct perf_event *event, *partial_group = NULL;
-       struct pmu *pmu = group_event->pmu;
+       struct pmu *pmu = ctx->pmu;
        u64 now = ctx->time;
        bool simulate = false;
 
@@ -2548,8 +2567,6 @@ static void perf_branch_stack_sched_in(struct task_struct *prev,
                if (cpuctx->ctx.nr_branch_stack > 0
                    && pmu->flush_branch_stack) {
 
-                       pmu = cpuctx->ctx.pmu;
-
                        perf_ctx_lock(cpuctx, cpuctx->task_ctx);
 
                        perf_pmu_disable(pmu);
@@ -6279,7 +6296,7 @@ static int perf_event_idx_default(struct perf_event *event)
  * Ensures all contexts with the same task_ctx_nr have the same
  * pmu_cpu_context too.
  */
-static void *find_pmu_context(int ctxn)
+static struct perf_cpu_context __percpu *find_pmu_context(int ctxn)
 {
        struct pmu *pmu;
 
@@ -7841,14 +7858,14 @@ static void perf_pmu_rotate_stop(struct pmu *pmu)
 static void __perf_event_exit_context(void *__info)
 {
        struct perf_event_context *ctx = __info;
-       struct perf_event *event, *tmp;
+       struct perf_event *event;
 
        perf_pmu_rotate_stop(ctx->pmu);
 
-       list_for_each_entry_safe(event, tmp, &ctx->pinned_groups, group_entry)
-               __perf_remove_from_context(event);
-       list_for_each_entry_safe(event, tmp, &ctx->flexible_groups, group_entry)
+       rcu_read_lock();
+       list_for_each_entry_rcu(event, &ctx->event_list, event_entry)
                __perf_remove_from_context(event);
+       rcu_read_unlock();
 }
 
 static void perf_event_exit_cpu_context(int cpu)
@@ -7872,11 +7889,11 @@ static void perf_event_exit_cpu(int cpu)
 {
        struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
 
+       perf_event_exit_cpu_context(cpu);
+
        mutex_lock(&swhash->hlist_mutex);
        swevent_hlist_release(swhash);
        mutex_unlock(&swhash->hlist_mutex);
-
-       perf_event_exit_cpu_context(cpu);
 }
 #else
 static inline void perf_event_exit_cpu(int cpu) { }