projects
/
cascardo
/
linux.git
/ blobdiff
commit
grep
author
committer
pickaxe
?
search:
re
summary
|
shortlog
|
log
|
commit
|
commitdiff
|
tree
raw
|
inline
| side by side
Merge branch 'perf/fast' into perf/core
[cascardo/linux.git]
/
kernel
/
events
/
core.c
diff --git
a/kernel/events/core.c
b/kernel/events/core.c
index
a8f4ac0
..
de859fb
100644
(file)
--- a/
kernel/events/core.c
+++ b/
kernel/events/core.c
@@
-815,7
+815,7
@@
static void update_event_times(struct perf_event *event)
* here.
*/
if (is_cgroup_event(event))
* here.
*/
if (is_cgroup_event(event))
- run_end = perf_event_time(event);
+ run_end = perf_
cgroup_
event_time(event);
else if (ctx->is_active)
run_end = ctx->time;
else
else if (ctx->is_active)
run_end = ctx->time;
else
@@
-3208,10
+3208,6
@@
int perf_event_task_disable(void)
return 0;
}
return 0;
}
-#ifndef PERF_EVENT_INDEX_OFFSET
-# define PERF_EVENT_INDEX_OFFSET 0
-#endif
-
static int perf_event_index(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
static int perf_event_index(struct perf_event *event)
{
if (event->hw.state & PERF_HES_STOPPED)
@@
-3220,21
+3216,26
@@
static int perf_event_index(struct perf_event *event)
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
if (event->state != PERF_EVENT_STATE_ACTIVE)
return 0;
- return event->
hw.idx + 1 - PERF_EVENT_INDEX_OFFSET
;
+ return event->
pmu->event_idx(event)
;
}
static void calc_timer_values(struct perf_event *event,
}
static void calc_timer_values(struct perf_event *event,
+ u64 *now,
u64 *enabled,
u64 *running)
{
u64 *enabled,
u64 *running)
{
- u64
now,
ctx_time;
+ u64 ctx_time;
- now = perf_clock();
- ctx_time = event->shadow_ctx_time + now;
+
*
now = perf_clock();
+ ctx_time = event->shadow_ctx_time +
*
now;
*enabled = ctx_time - event->tstamp_enabled;
*running = ctx_time - event->tstamp_running;
}
*enabled = ctx_time - event->tstamp_enabled;
*running = ctx_time - event->tstamp_running;
}
+void __weak perf_update_user_clock(struct perf_event_mmap_page *userpg, u64 now)
+{
+}
+
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
/*
* Callers need to ensure there can be no nesting of this function, otherwise
* the seqlock logic goes bad. We can not serialize this because the arch
@@
-3244,7
+3245,7
@@
void perf_event_update_userpage(struct perf_event *event)
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
{
struct perf_event_mmap_page *userpg;
struct ring_buffer *rb;
- u64 enabled, running;
+ u64 enabled, running
, now
;
rcu_read_lock();
/*
rcu_read_lock();
/*
@@
-3256,7
+3257,7
@@
void perf_event_update_userpage(struct perf_event *event)
* because of locking issue as we can be called in
* NMI context
*/
* because of locking issue as we can be called in
* NMI context
*/
- calc_timer_values(event, &enabled, &running);
+ calc_timer_values(event, &
now, &
enabled, &running);
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
rb = rcu_dereference(event->rb);
if (!rb)
goto unlock;
@@
-3272,7
+3273,7
@@
void perf_event_update_userpage(struct perf_event *event)
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
barrier();
userpg->index = perf_event_index(event);
userpg->offset = perf_event_count(event);
- if (
event->state == PERF_EVENT_STATE_ACTIVE
)
+ if (
userpg->index
)
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
userpg->offset -= local64_read(&event->hw.prev_count);
userpg->time_enabled = enabled +
@@
-3281,6
+3282,8
@@
void perf_event_update_userpage(struct perf_event *event)
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
userpg->time_running = running +
atomic64_read(&event->child_total_time_running);
+ perf_update_user_clock(userpg, now);
+
barrier();
++userpg->lock;
preempt_enable();
barrier();
++userpg->lock;
preempt_enable();
@@
-3538,6
+3541,8
@@
static int perf_mmap(struct file *file, struct vm_area_struct *vma)
event->mmap_user = get_current_user();
vma->vm_mm->pinned_vm += event->mmap_locked;
event->mmap_user = get_current_user();
vma->vm_mm->pinned_vm += event->mmap_locked;
+ perf_event_update_userpage(event);
+
unlock:
if (!ret)
atomic_inc(&event->mmap_count);
unlock:
if (!ret)
atomic_inc(&event->mmap_count);
@@
-3769,7
+3774,7
@@
static void perf_output_read_group(struct perf_output_handle *handle,
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
static void perf_output_read(struct perf_output_handle *handle,
struct perf_event *event)
{
- u64 enabled = 0, running = 0;
+ u64 enabled = 0, running = 0
, now
;
u64 read_format = event->attr.read_format;
/*
u64 read_format = event->attr.read_format;
/*
@@
-3782,7
+3787,7
@@
static void perf_output_read(struct perf_output_handle *handle,
* NMI context
*/
if (read_format & PERF_FORMAT_TOTAL_TIMES)
* NMI context
*/
if (read_format & PERF_FORMAT_TOTAL_TIMES)
- calc_timer_values(event, &enabled, &running);
+ calc_timer_values(event, &
now, &
enabled, &running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
if (event->attr.read_format & PERF_FORMAT_GROUP)
perf_output_read_group(handle, event, enabled, running);
@@
-4994,6
+4999,11
@@
static int perf_swevent_init(struct perf_event *event)
return 0;
}
return 0;
}
+static int perf_swevent_event_idx(struct perf_event *event)
+{
+ return 0;
+}
+
static struct pmu perf_swevent = {
.task_ctx_nr = perf_sw_context,
static struct pmu perf_swevent = {
.task_ctx_nr = perf_sw_context,
@@
-5003,6
+5013,8
@@
static struct pmu perf_swevent = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .event_idx = perf_swevent_event_idx,
};
#ifdef CONFIG_EVENT_TRACING
};
#ifdef CONFIG_EVENT_TRACING
@@
-5089,6
+5101,8
@@
static struct pmu perf_tracepoint = {
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
.start = perf_swevent_start,
.stop = perf_swevent_stop,
.read = perf_swevent_read,
+
+ .event_idx = perf_swevent_event_idx,
};
static inline void perf_tp_register(void)
};
static inline void perf_tp_register(void)
@@
-5308,6
+5322,8
@@
static struct pmu perf_cpu_clock = {
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
.start = cpu_clock_event_start,
.stop = cpu_clock_event_stop,
.read = cpu_clock_event_read,
+
+ .event_idx = perf_swevent_event_idx,
};
/*
};
/*
@@
-5380,6
+5396,8
@@
static struct pmu perf_task_clock = {
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
.start = task_clock_event_start,
.stop = task_clock_event_stop,
.read = task_clock_event_read,
+
+ .event_idx = perf_swevent_event_idx,
};
static void perf_pmu_nop_void(struct pmu *pmu)
};
static void perf_pmu_nop_void(struct pmu *pmu)
@@
-5407,6
+5425,11
@@
static void perf_pmu_cancel_txn(struct pmu *pmu)
perf_pmu_enable(pmu);
}
perf_pmu_enable(pmu);
}
+static int perf_event_idx_default(struct perf_event *event)
+{
+ return event->hw.idx + 1;
+}
+
/*
* Ensures all contexts with the same task_ctx_nr have the same
* pmu_cpu_context too.
/*
* Ensures all contexts with the same task_ctx_nr have the same
* pmu_cpu_context too.
@@
-5493,6
+5516,7
@@
static int pmu_dev_alloc(struct pmu *pmu)
if (!pmu->dev)
goto out;
if (!pmu->dev)
goto out;
+ pmu->dev->groups = pmu->attr_groups;
device_initialize(pmu->dev);
ret = dev_set_name(pmu->dev, "%s", pmu->name);
if (ret)
device_initialize(pmu->dev);
ret = dev_set_name(pmu->dev, "%s", pmu->name);
if (ret)
@@
-5596,6
+5620,9
@@
got_cpu_context:
pmu->pmu_disable = perf_pmu_nop_void;
}
pmu->pmu_disable = perf_pmu_nop_void;
}
+ if (!pmu->event_idx)
+ pmu->event_idx = perf_event_idx_default;
+
list_add_rcu(&pmu->entry, &pmus);
ret = 0;
unlock:
list_add_rcu(&pmu->entry, &pmus);
ret = 0;
unlock: