From d6d5cfaf4551aa7713ca6ab73bb77e832602204b Mon Sep 17 00:00:00 2001 From: Nick Piggin Date: Sat, 10 Sep 2005 00:26:16 -0700 Subject: [PATCH] sched: less newidle locking Similarly to the earlier change in load_balance, only lock the runqueue in load_balance_newidle if the busiest queue found has a nr_running > 1. This will reduce frequency of expensive remote runqueue lock aquisitions in the schedule() path on some workloads. Signed-off-by: Nick Piggin Acked-by: Ingo Molnar Signed-off-by: Andrew Morton Signed-off-by: Linus Torvalds --- kernel/sched.c | 17 ++++++++++------- 1 file changed, 10 insertions(+), 7 deletions(-) (limited to 'kernel/sched.c') diff --git a/kernel/sched.c b/kernel/sched.c index c61ee3451a04..930189540f3b 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2104,8 +2104,7 @@ static int load_balance(int this_cpu, runqueue_t *this_rq, */ double_lock_balance(this_rq, busiest); nr_moved = move_tasks(this_rq, this_cpu, busiest, - imbalance, sd, idle, - &all_pinned); + imbalance, sd, idle, &all_pinned); spin_unlock(&busiest->lock); /* All tasks on this runqueue were pinned by CPU affinity */ @@ -2200,18 +2199,22 @@ static int load_balance_newidle(int this_cpu, runqueue_t *this_rq, BUG_ON(busiest == this_rq); - /* Attempt to move tasks */ - double_lock_balance(this_rq, busiest); - schedstat_add(sd, lb_imbalance[NEWLY_IDLE], imbalance); - nr_moved = move_tasks(this_rq, this_cpu, busiest, + + nr_moved = 0; + if (busiest->nr_running > 1) { + /* Attempt to move tasks */ + double_lock_balance(this_rq, busiest); + nr_moved = move_tasks(this_rq, this_cpu, busiest, imbalance, sd, NEWLY_IDLE, NULL); + spin_unlock(&busiest->lock); + } + if (!nr_moved) schedstat_inc(sd, lb_failed[NEWLY_IDLE]); else sd->nr_balance_failed = 0; - spin_unlock(&busiest->lock); return nr_moved; out_balanced: -- cgit v1.2.3