Merge branch 'linus' into sched/urgent
authorIngo Molnar <mingo@elte.hu>
Mon, 5 Jan 2009 12:53:39 +0000 (13:53 +0100)
committerIngo Molnar <mingo@elte.hu>
Mon, 5 Jan 2009 12:53:39 +0000 (13:53 +0100)
1  2 
kernel/sched_fair.c

diff --combined kernel/sched_fair.c
index b808563f4f19af061b687abdd86de60f96ee4ab7,56c0efe902a79bca1c578aa7fcd5e7d5f0df3144..e0c0b4bc3f08e822d976fe2d6c222300a61c6352
@@@ -385,6 -385,20 +385,6 @@@ int sched_nr_latency_handler(struct ctl
  }
  #endif
  
 -/*
 - * delta *= P[w / rw]
 - */
 -static inline unsigned long
 -calc_delta_weight(unsigned long delta, struct sched_entity *se)
 -{
 -      for_each_sched_entity(se) {
 -              delta = calc_delta_mine(delta,
 -                              se->load.weight, &cfs_rq_of(se)->load);
 -      }
 -
 -      return delta;
 -}
 -
  /*
   * delta /= w
   */
@@@ -426,20 -440,12 +426,20 @@@ static u64 __sched_period(unsigned lon
   */
  static u64 sched_slice(struct cfs_rq *cfs_rq, struct sched_entity *se)
  {
 -      unsigned long nr_running = cfs_rq->nr_running;
 +      u64 slice = __sched_period(cfs_rq->nr_running + !se->on_rq);
  
 -      if (unlikely(!se->on_rq))
 -              nr_running++;
 +      for_each_sched_entity(se) {
 +              struct load_weight *load = &cfs_rq->load;
  
 -      return calc_delta_weight(__sched_period(nr_running), se);
 +              if (unlikely(!se->on_rq)) {
 +                      struct load_weight lw = cfs_rq->load;
 +
 +                      update_load_add(&lw, se->load.weight);
 +                      load = &lw;
 +              }
 +              slice = calc_delta_mine(slice, se->load.weight, load);
 +      }
 +      return slice;
  }
  
  /*
@@@ -1013,16 -1019,33 +1013,33 @@@ static void yield_task_fair(struct rq *
   * search starts with cpus closest then further out as needed,
   * so we always favor a closer, idle cpu.
   * Domains may include CPUs that are not usable for migration,
-  * hence we need to mask them out (cpu_active_map)
+  * hence we need to mask them out (cpu_active_mask)
   *
   * Returns the CPU we should wake onto.
   */
  #if defined(ARCH_HAS_SCHED_WAKE_IDLE)
  static int wake_idle(int cpu, struct task_struct *p)
  {
-       cpumask_t tmp;
        struct sched_domain *sd;
        int i;
+       unsigned int chosen_wakeup_cpu;
+       int this_cpu;
+       /*
+        * At POWERSAVINGS_BALANCE_WAKEUP level, if both this_cpu and prev_cpu
+        * are idle and this is not a kernel thread and this task's affinity
+        * allows it to be moved to preferred cpu, then just move!
+        */
+       this_cpu = smp_processor_id();
+       chosen_wakeup_cpu =
+               cpu_rq(this_cpu)->rd->sched_mc_preferred_wakeup_cpu;
+       if (sched_mc_power_savings >= POWERSAVINGS_BALANCE_WAKEUP &&
+               idle_cpu(cpu) && idle_cpu(this_cpu) &&
+               p->mm && !(p->flags & PF_KTHREAD) &&
+               cpu_isset(chosen_wakeup_cpu, p->cpus_allowed))
+               return chosen_wakeup_cpu;
  
        /*
         * If it is idle, then it is the best cpu to run this task.
                if ((sd->flags & SD_WAKE_IDLE)
                    || ((sd->flags & SD_WAKE_IDLE_FAR)
                        && !task_hot(p, task_rq(p)->clock, sd))) {
-                       cpus_and(tmp, sd->span, p->cpus_allowed);
-                       cpus_and(tmp, tmp, cpu_active_map);
-                       for_each_cpu_mask_nr(i, tmp) {
-                               if (idle_cpu(i)) {
+                       for_each_cpu_and(i, sched_domain_span(sd),
+                                        &p->cpus_allowed) {
+                               if (cpu_active(i) && idle_cpu(i)) {
                                        if (i != task_cpu(p)) {
                                                schedstat_inc(p,
                                                       se.nr_wakeups_idle);
@@@ -1236,13 -1258,13 +1252,13 @@@ static int select_task_rq_fair(struct t
         * this_cpu and prev_cpu are present in:
         */
        for_each_domain(this_cpu, sd) {
-               if (cpu_isset(prev_cpu, sd->span)) {
+               if (cpumask_test_cpu(prev_cpu, sched_domain_span(sd))) {
                        this_sd = sd;
                        break;
                }
        }
  
-       if (unlikely(!cpu_isset(this_cpu, p->cpus_allowed)))
+       if (unlikely(!cpumask_test_cpu(this_cpu, &p->cpus_allowed)))
                goto out;
  
        /*