perf: Reduce perf_disable() usage
Since the current perf_disable() usage is only an optimization, remove it for now. This eases the removal of the __weak hw_perf_enable() interface. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Will Deacon <will.deacon@arm.com> Cc: Paul Mundt <lethal@linux-sh.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> Cc: Deng-Cheng Zhu <dengcheng.zhu@gmail.com> Cc: David Miller <davem@davemloft.net> Cc: Michael Cree <mcree@orcon.net.nz> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
This commit is contained in:
parent
9ed6060d28
commit
24cd7f54a0
8 changed files with 48 additions and 59 deletions
|
@ -478,11 +478,6 @@ static void __perf_event_remove_from_context(void *info)
|
|||
return;
|
||||
|
||||
raw_spin_lock(&ctx->lock);
|
||||
/*
|
||||
* Protect the list operation against NMI by disabling the
|
||||
* events on a global level.
|
||||
*/
|
||||
perf_disable();
|
||||
|
||||
event_sched_out(event, cpuctx, ctx);
|
||||
|
||||
|
@ -498,7 +493,6 @@ static void __perf_event_remove_from_context(void *info)
|
|||
perf_max_events - perf_reserved_percpu);
|
||||
}
|
||||
|
||||
perf_enable();
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
|
@ -803,12 +797,6 @@ static void __perf_install_in_context(void *info)
|
|||
ctx->is_active = 1;
|
||||
update_context_time(ctx);
|
||||
|
||||
/*
|
||||
* Protect the list operation against NMI by disabling the
|
||||
* events on a global level. NOP for non NMI based events.
|
||||
*/
|
||||
perf_disable();
|
||||
|
||||
add_event_to_ctx(event, ctx);
|
||||
|
||||
if (event->cpu != -1 && event->cpu != smp_processor_id())
|
||||
|
@ -850,8 +838,6 @@ static void __perf_install_in_context(void *info)
|
|||
cpuctx->max_pertask--;
|
||||
|
||||
unlock:
|
||||
perf_enable();
|
||||
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
||||
|
@ -972,12 +958,10 @@ static void __perf_event_enable(void *info)
|
|||
if (!group_can_go_on(event, cpuctx, 1)) {
|
||||
err = -EEXIST;
|
||||
} else {
|
||||
perf_disable();
|
||||
if (event == leader)
|
||||
err = group_sched_in(event, cpuctx, ctx);
|
||||
else
|
||||
err = event_sched_in(event, cpuctx, ctx);
|
||||
perf_enable();
|
||||
}
|
||||
|
||||
if (err) {
|
||||
|
@ -1090,9 +1074,8 @@ static void ctx_sched_out(struct perf_event_context *ctx,
|
|||
goto out;
|
||||
update_context_time(ctx);
|
||||
|
||||
perf_disable();
|
||||
if (!ctx->nr_active)
|
||||
goto out_enable;
|
||||
goto out;
|
||||
|
||||
if (event_type & EVENT_PINNED) {
|
||||
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
|
||||
|
@ -1103,9 +1086,6 @@ static void ctx_sched_out(struct perf_event_context *ctx,
|
|||
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
|
||||
group_sched_out(event, cpuctx, ctx);
|
||||
}
|
||||
|
||||
out_enable:
|
||||
perf_enable();
|
||||
out:
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
@ -1364,8 +1344,6 @@ ctx_sched_in(struct perf_event_context *ctx,
|
|||
|
||||
ctx->timestamp = perf_clock();
|
||||
|
||||
perf_disable();
|
||||
|
||||
/*
|
||||
* First go through the list and put on any pinned groups
|
||||
* in order to give them the best chance of going on.
|
||||
|
@ -1377,7 +1355,6 @@ ctx_sched_in(struct perf_event_context *ctx,
|
|||
if (event_type & EVENT_FLEXIBLE)
|
||||
ctx_flexible_sched_in(ctx, cpuctx);
|
||||
|
||||
perf_enable();
|
||||
out:
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
@ -1425,8 +1402,6 @@ void perf_event_task_sched_in(struct task_struct *task)
|
|||
if (cpuctx->task_ctx == ctx)
|
||||
return;
|
||||
|
||||
perf_disable();
|
||||
|
||||
/*
|
||||
* We want to keep the following priority order:
|
||||
* cpu pinned (that don't need to move), task pinned,
|
||||
|
@ -1439,8 +1414,6 @@ void perf_event_task_sched_in(struct task_struct *task)
|
|||
ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
|
||||
|
||||
cpuctx->task_ctx = ctx;
|
||||
|
||||
perf_enable();
|
||||
}
|
||||
|
||||
#define MAX_INTERRUPTS (~0ULL)
|
||||
|
@ -1555,11 +1528,9 @@ static void perf_adjust_period(struct perf_event *event, u64 nsec, u64 count)
|
|||
hwc->sample_period = sample_period;
|
||||
|
||||
if (local64_read(&hwc->period_left) > 8*sample_period) {
|
||||
perf_disable();
|
||||
perf_event_stop(event);
|
||||
local64_set(&hwc->period_left, 0);
|
||||
perf_event_start(event);
|
||||
perf_enable();
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1588,15 +1559,12 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
|||
*/
|
||||
if (interrupts == MAX_INTERRUPTS) {
|
||||
perf_log_throttle(event, 1);
|
||||
perf_disable();
|
||||
event->pmu->unthrottle(event);
|
||||
perf_enable();
|
||||
}
|
||||
|
||||
if (!event->attr.freq || !event->attr.sample_freq)
|
||||
continue;
|
||||
|
||||
perf_disable();
|
||||
event->pmu->read(event);
|
||||
now = local64_read(&event->count);
|
||||
delta = now - hwc->freq_count_stamp;
|
||||
|
@ -1604,7 +1572,6 @@ static void perf_ctx_adjust_freq(struct perf_event_context *ctx)
|
|||
|
||||
if (delta > 0)
|
||||
perf_adjust_period(event, TICK_NSEC, delta);
|
||||
perf_enable();
|
||||
}
|
||||
raw_spin_unlock(&ctx->lock);
|
||||
}
|
||||
|
@ -1647,7 +1614,6 @@ void perf_event_task_tick(struct task_struct *curr)
|
|||
if (!rotate)
|
||||
return;
|
||||
|
||||
perf_disable();
|
||||
cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
|
||||
if (ctx)
|
||||
task_ctx_sched_out(ctx, EVENT_FLEXIBLE);
|
||||
|
@ -1659,7 +1625,6 @@ void perf_event_task_tick(struct task_struct *curr)
|
|||
cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
|
||||
if (ctx)
|
||||
task_ctx_sched_in(curr, EVENT_FLEXIBLE);
|
||||
perf_enable();
|
||||
}
|
||||
|
||||
static int event_enable_on_exec(struct perf_event *event,
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue