From 6e37738a2fac964583debe91099bc3248554f6e5 Mon Sep 17 00:00:00 2001 From: Peter Zijlstra Date: Thu, 11 Feb 2010 13:21:58 +0100 Subject: perf_events: Simplify code by removing cpu argument to hw_perf_group_sched_in() Since the cpu argument to hw_perf_group_sched_in() is always smp_processor_id(), simplify the code a little by removing this argument and using the current cpu where needed. Signed-off-by: Peter Zijlstra Cc: David Miller Cc: Paul Mackerras Cc: Benjamin Herrenschmidt Cc: Frederic Weisbecker LKML-Reference: <1265890918.5396.3.camel@laptop> Signed-off-by: Ingo Molnar --- arch/powerpc/kernel/perf_event.c | 10 +++++----- arch/sparc/kernel/perf_event.c | 10 +++++----- arch/x86/kernel/cpu/perf_event.c | 18 +++++++++--------- 3 files changed, 19 insertions(+), 19 deletions(-) (limited to 'arch') diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c index 1eb85fbf53a5..b6cf8f1f4d35 100644 --- a/arch/powerpc/kernel/perf_event.c +++ b/arch/powerpc/kernel/perf_event.c @@ -718,10 +718,10 @@ static int collect_events(struct perf_event *group, int max_count, return n; } -static void event_sched_in(struct perf_event *event, int cpu) +static void event_sched_in(struct perf_event *event) { event->state = PERF_EVENT_STATE_ACTIVE; - event->oncpu = cpu; + event->oncpu = smp_processor_id(); event->tstamp_running += event->ctx->time - event->tstamp_stopped; if (is_software_event(event)) event->pmu->enable(event); @@ -735,7 +735,7 @@ static void event_sched_in(struct perf_event *event, int cpu) */ int hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx, int cpu) + struct perf_event_context *ctx) { struct cpu_hw_events *cpuhw; long i, n, n0; @@ -766,10 +766,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader, cpuhw->event[i]->hw.config = cpuhw->events[i]; cpuctx->active_oncpu += n; n = 1; - event_sched_in(group_leader, cpu); + event_sched_in(group_leader); list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { if (sub->state != PERF_EVENT_STATE_OFF) { - event_sched_in(sub, cpu); + event_sched_in(sub); ++n; } } diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index e856456ec02f..9f2b2bac8b2b 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -980,10 +980,10 @@ static int collect_events(struct perf_event *group, int max_count, return n; } -static void event_sched_in(struct perf_event *event, int cpu) +static void event_sched_in(struct perf_event *event) { event->state = PERF_EVENT_STATE_ACTIVE; - event->oncpu = cpu; + event->oncpu = smp_processor_id(); event->tstamp_running += event->ctx->time - event->tstamp_stopped; if (is_software_event(event)) event->pmu->enable(event); @@ -991,7 +991,7 @@ static void event_sched_in(struct perf_event *event, int cpu) int hw_perf_group_sched_in(struct perf_event *group_leader, struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx, int cpu) + struct perf_event_context *ctx) { struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct perf_event *sub; @@ -1015,10 +1015,10 @@ int hw_perf_group_sched_in(struct perf_event *group_leader, cpuctx->active_oncpu += n; n = 1; - event_sched_in(group_leader, cpu); + event_sched_in(group_leader); list_for_each_entry(sub, &group_leader->sibling_list, group_entry) { if (sub->state != PERF_EVENT_STATE_OFF) { - event_sched_in(sub, cpu); + event_sched_in(sub); n++; } } diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c index aa12f36e4711..ad096562d694 100644 --- a/arch/x86/kernel/cpu/perf_event.c +++ b/arch/x86/kernel/cpu/perf_event.c @@ -2403,12 +2403,12 @@ done: } static int x86_event_sched_in(struct perf_event *event, - struct perf_cpu_context *cpuctx, int cpu) + struct perf_cpu_context *cpuctx) { int ret = 0; event->state = PERF_EVENT_STATE_ACTIVE; - event->oncpu = cpu; + event->oncpu = smp_processor_id(); event->tstamp_running += event->ctx->time - event->tstamp_stopped; if (!is_x86_event(event)) @@ -2424,7 +2424,7 @@ static int x86_event_sched_in(struct perf_event *event, } static void x86_event_sched_out(struct perf_event *event, - struct perf_cpu_context *cpuctx, int cpu) + struct perf_cpu_context *cpuctx) { event->state = PERF_EVENT_STATE_INACTIVE; event->oncpu = -1; @@ -2452,9 +2452,9 @@ static void x86_event_sched_out(struct perf_event *event, */ int hw_perf_group_sched_in(struct perf_event *leader, struct perf_cpu_context *cpuctx, - struct perf_event_context *ctx, int cpu) + struct perf_event_context *ctx) { - struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu); + struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); struct perf_event *sub; int assign[X86_PMC_IDX_MAX]; int n0, n1, ret; @@ -2468,14 +2468,14 @@ int hw_perf_group_sched_in(struct perf_event *leader, if (ret) return ret; - ret = x86_event_sched_in(leader, cpuctx, cpu); + ret = x86_event_sched_in(leader, cpuctx); if (ret) return ret; n1 = 1; list_for_each_entry(sub, &leader->sibling_list, group_entry) { if (sub->state > PERF_EVENT_STATE_OFF) { - ret = x86_event_sched_in(sub, cpuctx, cpu); + ret = x86_event_sched_in(sub, cpuctx); if (ret) goto undo; ++n1; @@ -2500,11 +2500,11 @@ int hw_perf_group_sched_in(struct perf_event *leader, */ return 1; undo: - x86_event_sched_out(leader, cpuctx, cpu); + x86_event_sched_out(leader, cpuctx); n0 = 1; list_for_each_entry(sub, &leader->sibling_list, group_entry) { if (sub->state == PERF_EVENT_STATE_ACTIVE) { - x86_event_sched_out(sub, cpuctx, cpu); + x86_event_sched_out(sub, cpuctx); if (++n0 == n1) break; } -- cgit v1.2.3