Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[cascardo/linux.git] / kernel / events / core.c
index cfc227c..bf82441 100644 (file)
@@ -126,6 +126,37 @@ static int cpu_function_call(int cpu, remote_function_f func, void *info)
        return data.ret;
 }
 
+static void event_function_call(struct perf_event *event,
+                               int (*active)(void *),
+                               void (*inactive)(void *),
+                               void *data)
+{
+       struct perf_event_context *ctx = event->ctx;
+       struct task_struct *task = ctx->task;
+
+       if (!task) {
+               cpu_function_call(event->cpu, active, data);
+               return;
+       }
+
+again:
+       if (!task_function_call(task, active, data))
+               return;
+
+       raw_spin_lock_irq(&ctx->lock);
+       if (ctx->is_active) {
+               /*
+                * Reload the task pointer, it might have been changed by
+                * a concurrent perf_event_context_sched_out().
+                */
+               task = ctx->task;
+               raw_spin_unlock_irq(&ctx->lock);
+               goto again;
+       }
+       inactive(data);
+       raw_spin_unlock_irq(&ctx->lock);
+}
+
 #define EVENT_OWNER_KERNEL ((void *) -1)
 
 static bool is_kernel_event(struct perf_event *event)
@@ -1629,6 +1660,17 @@ struct remove_event {
        bool detach_group;
 };
 
+static void ___perf_remove_from_context(void *info)
+{
+       struct remove_event *re = info;
+       struct perf_event *event = re->event;
+       struct perf_event_context *ctx = event->ctx;
+
+       if (re->detach_group)
+               perf_group_detach(event);
+       list_del_event(event, ctx);
+}
+
 /*
  * Cross CPU call to remove a performance event
  *
@@ -1656,7 +1698,6 @@ static int __perf_remove_from_context(void *info)
        return 0;
 }
 
-
 /*
  * Remove the event from a task's (or a CPU's) list of events.
  *
@@ -1673,7 +1714,6 @@ static int __perf_remove_from_context(void *info)
 static void perf_remove_from_context(struct perf_event *event, bool detach_group)
 {
        struct perf_event_context *ctx = event->ctx;
-       struct task_struct *task = ctx->task;
        struct remove_event re = {
                .event = event,
                .detach_group = detach_group,
@@ -1681,44 +1721,8 @@ static void perf_remove_from_context(struct perf_event *event, bool detach_group
 
        lockdep_assert_held(&ctx->mutex);
 
-       if (!task) {
-               /*
-                * Per cpu events are removed via an smp call. The removal can
-                * fail if the CPU is currently offline, but in that case we
-                * already called __perf_remove_from_context from
-                * perf_event_exit_cpu.
-                */
-               cpu_function_call(event->cpu, __perf_remove_from_context, &re);
-               return;
-       }
-
-retry:
-       if (!task_function_call(task, __perf_remove_from_context, &re))
-               return;
-
-       raw_spin_lock_irq(&ctx->lock);
-       /*
-        * If we failed to find a running task, but find the context active now
-        * that we've acquired the ctx->lock, retry.
-        */
-       if (ctx->is_active) {
-               raw_spin_unlock_irq(&ctx->lock);
-               /*
-                * Reload the task pointer, it might have been changed by
-                * a concurrent perf_event_context_sched_out().
-                */
-               task = ctx->task;
-               goto retry;
-       }
-
-       /*
-        * Since the task isn't running, its safe to remove the event, us
-        * holding the ctx->lock ensures the task won't get scheduled in.
-        */
-       if (detach_group)
-               perf_group_detach(event);
-       list_del_event(event, ctx);
-       raw_spin_unlock_irq(&ctx->lock);
+       event_function_call(event, __perf_remove_from_context,
+                           ___perf_remove_from_context, &re);
 }
 
 /*
@@ -1762,6 +1766,20 @@ int __perf_event_disable(void *info)
        return 0;
 }
 
+void ___perf_event_disable(void *info)
+{
+       struct perf_event *event = info;
+
+       /*
+        * Since we have the lock this context can't be scheduled
+        * in, so we can change the state safely.
+        */
+       if (event->state == PERF_EVENT_STATE_INACTIVE) {
+               update_group_times(event);
+               event->state = PERF_EVENT_STATE_OFF;
+       }
+}
+
 /*
  * Disable a event.
  *
@@ -1778,43 +1796,16 @@ int __perf_event_disable(void *info)
 static void _perf_event_disable(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
-       struct task_struct *task = ctx->task;
-
-       if (!task) {
-               /*
-                * Disable the event on the cpu that it's on
-                */
-               cpu_function_call(event->cpu, __perf_event_disable, event);
-               return;
-       }
-
-retry:
-       if (!task_function_call(task, __perf_event_disable, event))
-               return;
 
        raw_spin_lock_irq(&ctx->lock);
-       /*
-        * If the event is still active, we need to retry the cross-call.
-        */
-       if (event->state == PERF_EVENT_STATE_ACTIVE) {
+       if (event->state <= PERF_EVENT_STATE_OFF) {
                raw_spin_unlock_irq(&ctx->lock);
-               /*
-                * Reload the task pointer, it might have been changed by
-                * a concurrent perf_event_context_sched_out().
-                */
-               task = ctx->task;
-               goto retry;
-       }
-
-       /*
-        * Since we have the lock this context can't be scheduled
-        * in, so we can change the state safely.
-        */
-       if (event->state == PERF_EVENT_STATE_INACTIVE) {
-               update_group_times(event);
-               event->state = PERF_EVENT_STATE_OFF;
+               return;
        }
        raw_spin_unlock_irq(&ctx->lock);
+
+       event_function_call(event, __perf_event_disable,
+                           ___perf_event_disable, event);
 }
 
 /*
@@ -2067,6 +2058,18 @@ static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
                ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
 }
 
+static void ___perf_install_in_context(void *info)
+{
+       struct perf_event *event = info;
+       struct perf_event_context *ctx = event->ctx;
+
+       /*
+        * Since the task isn't running, its safe to add the event, us holding
+        * the ctx->lock ensures the task won't get scheduled in.
+        */
+       add_event_to_ctx(event, ctx);
+}
+
 /*
  * Cross CPU call to install and enable a performance event
  *
@@ -2143,48 +2146,14 @@ perf_install_in_context(struct perf_event_context *ctx,
                        struct perf_event *event,
                        int cpu)
 {
-       struct task_struct *task = ctx->task;
-
        lockdep_assert_held(&ctx->mutex);
 
        event->ctx = ctx;
        if (event->cpu != -1)
                event->cpu = cpu;
 
-       if (!task) {
-               /*
-                * Per cpu events are installed via an smp call and
-                * the install is always successful.
-                */
-               cpu_function_call(cpu, __perf_install_in_context, event);
-               return;
-       }
-
-retry:
-       if (!task_function_call(task, __perf_install_in_context, event))
-               return;
-
-       raw_spin_lock_irq(&ctx->lock);
-       /*
-        * If we failed to find a running task, but find the context active now
-        * that we've acquired the ctx->lock, retry.
-        */
-       if (ctx->is_active) {
-               raw_spin_unlock_irq(&ctx->lock);
-               /*
-                * Reload the task pointer, it might have been changed by
-                * a concurrent perf_event_context_sched_out().
-                */
-               task = ctx->task;
-               goto retry;
-       }
-
-       /*
-        * Since the task isn't running, its safe to add the event, us holding
-        * the ctx->lock ensures the task won't get scheduled in.
-        */
-       add_event_to_ctx(event, ctx);
-       raw_spin_unlock_irq(&ctx->lock);
+       event_function_call(event, __perf_install_in_context,
+                           ___perf_install_in_context, event);
 }
 
 /*
@@ -2287,6 +2256,11 @@ unlock:
        return 0;
 }
 
+void ___perf_event_enable(void *info)
+{
+       __perf_event_mark_enabled((struct perf_event *)info);
+}
+
 /*
  * Enable a event.
  *
@@ -2299,58 +2273,26 @@ unlock:
 static void _perf_event_enable(struct perf_event *event)
 {
        struct perf_event_context *ctx = event->ctx;
-       struct task_struct *task = ctx->task;
 
-       if (!task) {
-               /*
-                * Enable the event on the cpu that it's on
-                */
-               cpu_function_call(event->cpu, __perf_event_enable, event);
+       raw_spin_lock_irq(&ctx->lock);
+       if (event->state >= PERF_EVENT_STATE_INACTIVE) {
+               raw_spin_unlock_irq(&ctx->lock);
                return;
        }
 
-       raw_spin_lock_irq(&ctx->lock);
-       if (event->state >= PERF_EVENT_STATE_INACTIVE)
-               goto out;
-
        /*
         * If the event is in error state, clear that first.
-        * That way, if we see the event in error state below, we
-        * know that it has gone back into error state, as distinct
-        * from the task having been scheduled away before the
-        * cross-call arrived.
+        *
+        * That way, if we see the event in error state below, we know that it
+        * has gone back into error state, as distinct from the task having
+        * been scheduled away before the cross-call arrived.
         */
        if (event->state == PERF_EVENT_STATE_ERROR)
                event->state = PERF_EVENT_STATE_OFF;
-
-retry:
-       if (!ctx->is_active) {
-               __perf_event_mark_enabled(event);
-               goto out;
-       }
-
        raw_spin_unlock_irq(&ctx->lock);
 
-       if (!task_function_call(task, __perf_event_enable, event))
-               return;
-
-       raw_spin_lock_irq(&ctx->lock);
-
-       /*
-        * If the context is active and the event is still off,
-        * we need to retry the cross-call.
-        */
-       if (ctx->is_active && event->state == PERF_EVENT_STATE_OFF) {
-               /*
-                * task could have been flipped by a concurrent
-                * perf_event_context_sched_out()
-                */
-               task = ctx->task;
-               goto retry;
-       }
-
-out:
-       raw_spin_unlock_irq(&ctx->lock);
+       event_function_call(event, __perf_event_enable,
+                           ___perf_event_enable, event);
 }
 
 /*
@@ -4149,6 +4091,22 @@ struct period_event {
        u64 value;
 };
 
+static void ___perf_event_period(void *info)
+{
+       struct period_event *pe = info;
+       struct perf_event *event = pe->event;
+       u64 value = pe->value;
+
+       if (event->attr.freq) {
+               event->attr.sample_freq = value;
+       } else {
+               event->attr.sample_period = value;
+               event->hw.sample_period = value;
+       }
+
+       local64_set(&event->hw.period_left, 0);
+}
+
 static int __perf_event_period(void *info)
 {
        struct period_event *pe = info;
@@ -4185,8 +4143,6 @@ static int __perf_event_period(void *info)
 static int perf_event_period(struct perf_event *event, u64 __user *arg)
 {
        struct period_event pe = { .event = event, };
-       struct perf_event_context *ctx = event->ctx;
-       struct task_struct *task;
        u64 value;
 
        if (!is_sampling_event(event))
@@ -4201,34 +4157,10 @@ static int perf_event_period(struct perf_event *event, u64 __user *arg)
        if (event->attr.freq && value > sysctl_perf_event_sample_rate)
                return -EINVAL;
 
-       task = ctx->task;
        pe.value = value;
 
-       if (!task) {
-               cpu_function_call(event->cpu, __perf_event_period, &pe);
-               return 0;
-       }
-
-retry:
-       if (!task_function_call(task, __perf_event_period, &pe))
-               return 0;
-
-       raw_spin_lock_irq(&ctx->lock);
-       if (ctx->is_active) {
-               raw_spin_unlock_irq(&ctx->lock);
-               task = ctx->task;
-               goto retry;
-       }
-
-       if (event->attr.freq) {
-               event->attr.sample_freq = value;
-       } else {
-               event->attr.sample_period = value;
-               event->hw.sample_period = value;
-       }
-
-       local64_set(&event->hw.period_left, 0);
-       raw_spin_unlock_irq(&ctx->lock);
+       event_function_call(event, __perf_event_period,
+                           ___perf_event_period, &pe);
 
        return 0;
 }