sched, rcu: Fix rcu_dereference() for RCU-lockdep
[sfrench/cifs-2.6.git] / kernel / sched_fair.c
index 93fccbadde239616039720ebc34b2f2fbc48347c..5a5ea2cd924fa8494abfa21f8203f919f40ff1ca 100644 (file)
@@ -1053,7 +1053,8 @@ static inline void hrtick_update(struct rq *rq)
  * increased. Here we update the fair scheduling stats and
  * then put the task into the rbtree:
  */
-static void enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup)
+static void
+enqueue_task_fair(struct rq *rq, struct task_struct *p, int wakeup, bool head)
 {
        struct cfs_rq *cfs_rq;
        struct sched_entity *se = &p->se;
@@ -1508,7 +1509,7 @@ static int select_task_rq_fair(struct task_struct *p, int sd_flag, int wake_flag
                         * If there's an idle sibling in this domain, make that
                         * the wake_affine target instead of the current cpu.
                         */
-                       if (tmp->flags & SD_PREFER_SIBLING)
+                       if (tmp->flags & SD_SHARE_PKG_RESOURCES)
                                target = select_idle_sibling(p, tmp, target);
 
                        if (target >= 0) {
@@ -2065,6 +2066,10 @@ static int move_tasks(struct rq *this_rq, int this_cpu, struct rq *busiest,
                 */
                if (idle == CPU_NEWLY_IDLE && this_rq->nr_running)
                        break;
+
+               if (raw_spin_is_contended(&this_rq->lock) ||
+                               raw_spin_is_contended(&busiest->lock))
+                       break;
 #endif
        } while (load_moved && max_load_move > total_load_moved);
 
@@ -2092,6 +2097,7 @@ struct sd_lb_stats {
        unsigned long max_load;
        unsigned long busiest_load_per_task;
        unsigned long busiest_nr_running;
+       unsigned long busiest_group_capacity;
 
        int group_imb; /* Is there imbalance in this sd */
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
@@ -2411,17 +2417,12 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
        unsigned long load, max_cpu_load, min_cpu_load;
        int i;
        unsigned int balance_cpu = -1, first_idle_cpu = 0;
-       unsigned long sum_avg_load_per_task;
-       unsigned long avg_load_per_task;
+       unsigned long avg_load_per_task = 0;
 
-       if (local_group) {
+       if (local_group)
                balance_cpu = group_first_cpu(group);
-               if (balance_cpu == this_cpu)
-                       update_group_power(sd, this_cpu);
-       }
 
        /* Tally up the load of all CPUs in the group */
-       sum_avg_load_per_task = avg_load_per_task = 0;
        max_cpu_load = 0;
        min_cpu_load = ~0UL;
 
@@ -2451,7 +2452,6 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
                sgs->sum_nr_running += rq->nr_running;
                sgs->sum_weighted_load += weighted_cpuload(i);
 
-               sum_avg_load_per_task += cpu_avg_load_per_task(i);
        }
 
        /*
@@ -2461,15 +2461,16 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
         * to do the newly idle load balance.
         */
        if (idle != CPU_NEWLY_IDLE && local_group &&
-           balance_cpu != this_cpu && balance) {
+           balance_cpu != this_cpu) {
                *balance = 0;
                return;
        }
 
+       update_group_power(sd, this_cpu);
+
        /* Adjust by relative CPU power of the group */
        sgs->avg_load = (sgs->group_load * SCHED_LOAD_SCALE) / group->cpu_power;
 
-
        /*
         * Consider the group unbalanced when the imbalance is larger
         * than the average weight of two tasks.
@@ -2479,8 +2480,8 @@ static inline void update_sg_lb_stats(struct sched_domain *sd,
         *      normalized nr_running number somewhere that negates
         *      the hierarchy?
         */
-       avg_load_per_task = (sum_avg_load_per_task * SCHED_LOAD_SCALE) /
-               group->cpu_power;
+       if (sgs->sum_nr_running)
+               avg_load_per_task = sgs->sum_weighted_load / sgs->sum_nr_running;
 
        if ((max_cpu_load - min_cpu_load) > 2*avg_load_per_task)
                sgs->group_imb = 1;
@@ -2524,7 +2525,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                update_sg_lb_stats(sd, group, this_cpu, idle, load_idx, sd_idle,
                                local_group, cpus, balance, &sgs);
 
-               if (local_group && balance && !(*balance))
+               if (local_group && !(*balance))
                        return;
 
                sds->total_load += sgs.group_load;
@@ -2549,6 +2550,7 @@ static inline void update_sd_lb_stats(struct sched_domain *sd, int this_cpu,
                        sds->max_load = sgs.avg_load;
                        sds->busiest = group;
                        sds->busiest_nr_running = sgs.sum_nr_running;
+                       sds->busiest_group_capacity = sgs.group_capacity;
                        sds->busiest_load_per_task = sgs.sum_weighted_load;
                        sds->group_imb = sgs.group_imb;
                }
@@ -2571,6 +2573,7 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
 {
        unsigned long tmp, pwr_now = 0, pwr_move = 0;
        unsigned int imbn = 2;
+       unsigned long scaled_busy_load_per_task;
 
        if (sds->this_nr_running) {
                sds->this_load_per_task /= sds->this_nr_running;
@@ -2581,8 +2584,12 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
                sds->this_load_per_task =
                        cpu_avg_load_per_task(this_cpu);
 
-       if (sds->max_load - sds->this_load + sds->busiest_load_per_task >=
-                       sds->busiest_load_per_task * imbn) {
+       scaled_busy_load_per_task = sds->busiest_load_per_task
+                                                * SCHED_LOAD_SCALE;
+       scaled_busy_load_per_task /= sds->busiest->cpu_power;
+
+       if (sds->max_load - sds->this_load + scaled_busy_load_per_task >=
+                       (scaled_busy_load_per_task * imbn)) {
                *imbalance = sds->busiest_load_per_task;
                return;
        }
@@ -2633,7 +2640,14 @@ static inline void fix_small_imbalance(struct sd_lb_stats *sds,
 static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
                unsigned long *imbalance)
 {
-       unsigned long max_pull;
+       unsigned long max_pull, load_above_capacity = ~0UL;
+
+       sds->busiest_load_per_task /= sds->busiest_nr_running;
+       if (sds->group_imb) {
+               sds->busiest_load_per_task =
+                       min(sds->busiest_load_per_task, sds->avg_load);
+       }
+
        /*
         * In the presence of smp nice balancing, certain scenarios can have
         * max load less than avg load(as we skip the groups at or below
@@ -2644,9 +2658,29 @@ static inline void calculate_imbalance(struct sd_lb_stats *sds, int this_cpu,
                return fix_small_imbalance(sds, this_cpu, imbalance);
        }
 
-       /* Don't want to pull so many tasks that a group would go idle */
-       max_pull = min(sds->max_load - sds->avg_load,
-                       sds->max_load - sds->busiest_load_per_task);
+       if (!sds->group_imb) {
+               /*
+                * Don't want to pull so many tasks that a group would go idle.
+                */
+               load_above_capacity = (sds->busiest_nr_running -
+                                               sds->busiest_group_capacity);
+
+               load_above_capacity *= (SCHED_LOAD_SCALE * SCHED_LOAD_SCALE);
+
+               load_above_capacity /= sds->busiest->cpu_power;
+       }
+
+       /*
+        * We're trying to get all the cpus to the average_load, so we don't
+        * want to push ourselves above the average load, nor do we wish to
+        * reduce the max loaded cpu below the average load. At the same time,
+        * we also don't want to reduce the group load below the group capacity
+        * (so that we can implement power-savings policies etc). Thus we look
+        * for the minimum possible imbalance.
+        * Be careful of negative numbers as they'll appear as very large values
+        * with unsigned longs.
+        */
+       max_pull = min(sds->max_load - sds->avg_load, load_above_capacity);
 
        /* How much load to actually move to equalise the imbalance */
        *imbalance = min(max_pull * sds->busiest->cpu_power,
@@ -2714,9 +2748,8 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
         * 4) This group is more busy than the avg busieness at this
         *    sched_domain.
         * 5) The imbalance is within the specified limit.
-        * 6) Any rebalance would lead to ping-pong
         */
-       if (balance && !(*balance))
+       if (!(*balance))
                goto ret;
 
        if (!sds.busiest || sds.busiest_nr_running == 0)
@@ -2733,25 +2766,6 @@ find_busiest_group(struct sched_domain *sd, int this_cpu,
        if (100 * sds.max_load <= sd->imbalance_pct * sds.this_load)
                goto out_balanced;
 
-       sds.busiest_load_per_task /= sds.busiest_nr_running;
-       if (sds.group_imb)
-               sds.busiest_load_per_task =
-                       min(sds.busiest_load_per_task, sds.avg_load);
-
-       /*
-        * We're trying to get all the cpus to the average_load, so we don't
-        * want to push ourselves above the average load, nor do we wish to
-        * reduce the max loaded cpu below the average load, as either of these
-        * actions would just result in more rebalancing later, and ping-pong
-        * tasks around. Thus we look for the minimum possible imbalance.
-        * Negative imbalances (*we* are more loaded than anyone else) will
-        * be counted as no imbalance for these purposes -- we can't fix that
-        * by pulling tasks to us. Be careful of negative numbers as they'll
-        * appear as very large values with unsigned longs.
-        */
-       if (sds.max_load <= sds.busiest_load_per_task)
-               goto out_balanced;
-
        /* Looks like there is an imbalance. Compute it */
        calculate_imbalance(&sds, this_cpu, imbalance);
        return sds.busiest;
@@ -2788,12 +2802,23 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
                        continue;
 
                rq = cpu_rq(i);
-               wl = weighted_cpuload(i) * SCHED_LOAD_SCALE;
-               wl /= power;
+               wl = weighted_cpuload(i);
 
+               /*
+                * When comparing with imbalance, use weighted_cpuload()
+                * which is not scaled with the cpu power.
+                */
                if (capacity && rq->nr_running == 1 && wl > imbalance)
                        continue;
 
+               /*
+                * For the load comparisons with the other cpu's, consider
+                * the weighted_cpuload() scaled with the cpu power, so that
+                * the load can be moved away from the cpu that is potentially
+                * running at a lower capacity.
+                */
+               wl = (wl * SCHED_LOAD_SCALE) / power;
+
                if (wl > max_load) {
                        max_load = wl;
                        busiest = rq;
@@ -2812,6 +2837,39 @@ find_busiest_queue(struct sched_group *group, enum cpu_idle_type idle,
 /* Working cpumask for load_balance and load_balance_newidle. */
 static DEFINE_PER_CPU(cpumask_var_t, load_balance_tmpmask);
 
+static int need_active_balance(struct sched_domain *sd, int sd_idle, int idle)
+{
+       if (idle == CPU_NEWLY_IDLE) {
+               /*
+                * The only task running in a non-idle cpu can be moved to this
+                * cpu in an attempt to completely freeup the other CPU
+                * package.
+                *
+                * The package power saving logic comes from
+                * find_busiest_group(). If there are no imbalance, then
+                * f_b_g() will return NULL. However when sched_mc={1,2} then
+                * f_b_g() will select a group from which a running task may be
+                * pulled to this cpu in order to make the other package idle.
+                * If there is no opportunity to make a package idle and if
+                * there are no imbalance, then f_b_g() will return NULL and no
+                * action will be taken in load_balance_newidle().
+                *
+                * Under normal task pull operation due to imbalance, there
+                * will be more than one task in the source run queue and
+                * move_tasks() will succeed.  ld_moved will be true and this
+                * active balance code will not be triggered.
+                */
+               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
+                   !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
+                       return 0;
+
+               if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
+                       return 0;
+       }
+
+       return unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2);
+}
+
 /*
  * Check this_cpu to ensure it is balanced within domain. Attempt to move
  * tasks if there is an imbalance.
@@ -2898,8 +2956,7 @@ redo:
                schedstat_inc(sd, lb_failed[idle]);
                sd->nr_balance_failed++;
 
-               if (unlikely(sd->nr_balance_failed > sd->cache_nice_tries+2)) {
-
+               if (need_active_balance(sd, sd_idle, idle)) {
                        raw_spin_lock_irqsave(&busiest->lock, flags);
 
                        /* don't kick the migration_thread, if the curr
@@ -2973,154 +3030,6 @@ out:
        return ld_moved;
 }
 
-/*
- * Check this_cpu to ensure it is balanced within domain. Attempt to move
- * tasks if there is an imbalance.
- *
- * Called from schedule when this_rq is about to become idle (CPU_NEWLY_IDLE).
- * this_rq is locked.
- */
-static int
-load_balance_newidle(int this_cpu, struct rq *this_rq, struct sched_domain *sd)
-{
-       struct sched_group *group;
-       struct rq *busiest = NULL;
-       unsigned long imbalance;
-       int ld_moved = 0;
-       int sd_idle = 0;
-       int all_pinned = 0;
-       struct cpumask *cpus = __get_cpu_var(load_balance_tmpmask);
-
-       cpumask_copy(cpus, cpu_active_mask);
-
-       /*
-        * When power savings policy is enabled for the parent domain, idle
-        * sibling can pick up load irrespective of busy siblings. In this case,
-        * let the state of idle sibling percolate up as IDLE, instead of
-        * portraying it as CPU_NOT_IDLE.
-        */
-       if (sd->flags & SD_SHARE_CPUPOWER &&
-           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-               sd_idle = 1;
-
-       schedstat_inc(sd, lb_count[CPU_NEWLY_IDLE]);
-redo:
-       update_shares_locked(this_rq, sd);
-       group = find_busiest_group(sd, this_cpu, &imbalance, CPU_NEWLY_IDLE,
-                                  &sd_idle, cpus, NULL);
-       if (!group) {
-               schedstat_inc(sd, lb_nobusyg[CPU_NEWLY_IDLE]);
-               goto out_balanced;
-       }
-
-       busiest = find_busiest_queue(group, CPU_NEWLY_IDLE, imbalance, cpus);
-       if (!busiest) {
-               schedstat_inc(sd, lb_nobusyq[CPU_NEWLY_IDLE]);
-               goto out_balanced;
-       }
-
-       BUG_ON(busiest == this_rq);
-
-       schedstat_add(sd, lb_imbalance[CPU_NEWLY_IDLE], imbalance);
-
-       ld_moved = 0;
-       if (busiest->nr_running > 1) {
-               /* Attempt to move tasks */
-               double_lock_balance(this_rq, busiest);
-               /* this_rq->clock is already updated */
-               update_rq_clock(busiest);
-               ld_moved = move_tasks(this_rq, this_cpu, busiest,
-                                       imbalance, sd, CPU_NEWLY_IDLE,
-                                       &all_pinned);
-               double_unlock_balance(this_rq, busiest);
-
-               if (unlikely(all_pinned)) {
-                       cpumask_clear_cpu(cpu_of(busiest), cpus);
-                       if (!cpumask_empty(cpus))
-                               goto redo;
-               }
-       }
-
-       if (!ld_moved) {
-               int active_balance = 0;
-
-               schedstat_inc(sd, lb_failed[CPU_NEWLY_IDLE]);
-               if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
-                   !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-                       return -1;
-
-               if (sched_mc_power_savings < POWERSAVINGS_BALANCE_WAKEUP)
-                       return -1;
-
-               if (sd->nr_balance_failed++ < 2)
-                       return -1;
-
-               /*
-                * The only task running in a non-idle cpu can be moved to this
-                * cpu in an attempt to completely freeup the other CPU
-                * package. The same method used to move task in load_balance()
-                * have been extended for load_balance_newidle() to speedup
-                * consolidation at sched_mc=POWERSAVINGS_BALANCE_WAKEUP (2)
-                *
-                * The package power saving logic comes from
-                * find_busiest_group().  If there are no imbalance, then
-                * f_b_g() will return NULL.  However when sched_mc={1,2} then
-                * f_b_g() will select a group from which a running task may be
-                * pulled to this cpu in order to make the other package idle.
-                * If there is no opportunity to make a package idle and if
-                * there are no imbalance, then f_b_g() will return NULL and no
-                * action will be taken in load_balance_newidle().
-                *
-                * Under normal task pull operation due to imbalance, there
-                * will be more than one task in the source run queue and
-                * move_tasks() will succeed.  ld_moved will be true and this
-                * active balance code will not be triggered.
-                */
-
-               /* Lock busiest in correct order while this_rq is held */
-               double_lock_balance(this_rq, busiest);
-
-               /*
-                * don't kick the migration_thread, if the curr
-                * task on busiest cpu can't be moved to this_cpu
-                */
-               if (!cpumask_test_cpu(this_cpu, &busiest->curr->cpus_allowed)) {
-                       double_unlock_balance(this_rq, busiest);
-                       all_pinned = 1;
-                       return ld_moved;
-               }
-
-               if (!busiest->active_balance) {
-                       busiest->active_balance = 1;
-                       busiest->push_cpu = this_cpu;
-                       active_balance = 1;
-               }
-
-               double_unlock_balance(this_rq, busiest);
-               /*
-                * Should not call ttwu while holding a rq->lock
-                */
-               raw_spin_unlock(&this_rq->lock);
-               if (active_balance)
-                       wake_up_process(busiest->migration_thread);
-               raw_spin_lock(&this_rq->lock);
-
-       } else
-               sd->nr_balance_failed = 0;
-
-       update_shares_locked(this_rq, sd);
-       return ld_moved;
-
-out_balanced:
-       schedstat_inc(sd, lb_balanced[CPU_NEWLY_IDLE]);
-       if (!sd_idle && sd->flags & SD_SHARE_CPUPOWER &&
-           !test_sd_parent(sd, SD_POWERSAVINGS_BALANCE))
-               return -1;
-       sd->nr_balance_failed = 0;
-
-       return 0;
-}
-
 /*
  * idle_balance is called by schedule() if this_cpu is about to become
  * idle. Attempts to pull tasks from other CPUs.
@@ -3136,16 +3045,23 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
        if (this_rq->avg_idle < sysctl_sched_migration_cost)
                return;
 
+       /*
+        * Drop the rq->lock, but keep IRQ/preempt disabled.
+        */
+       raw_spin_unlock(&this_rq->lock);
+
        for_each_domain(this_cpu, sd) {
                unsigned long interval;
+               int balance = 1;
 
                if (!(sd->flags & SD_LOAD_BALANCE))
                        continue;
 
-               if (sd->flags & SD_BALANCE_NEWIDLE)
+               if (sd->flags & SD_BALANCE_NEWIDLE) {
                        /* If we've pulled tasks over stop searching: */
-                       pulled_task = load_balance_newidle(this_cpu, this_rq,
-                                                          sd);
+                       pulled_task = load_balance(this_cpu, this_rq,
+                                                  sd, CPU_NEWLY_IDLE, &balance);
+               }
 
                interval = msecs_to_jiffies(sd->balance_interval);
                if (time_after(next_balance, sd->last_balance + interval))
@@ -3155,6 +3071,9 @@ static void idle_balance(int this_cpu, struct rq *this_rq)
                        break;
                }
        }
+
+       raw_spin_lock(&this_rq->lock);
+
        if (pulled_task || time_after(jiffies, this_rq->next_balance)) {
                /*
                 * We are going idle. next_balance may be set based on
@@ -3557,7 +3476,7 @@ static void run_rebalance_domains(struct softirq_action *h)
 
 static inline int on_null_domain(int cpu)
 {
-       return !rcu_dereference(cpu_rq(cpu)->sd);
+       return !rcu_dereference_sched(cpu_rq(cpu)->sd);
 }
 
 /*