Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux...
authorLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Mar 2008 17:05:19 +0000 (10:05 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Fri, 21 Mar 2008 17:05:19 +0000 (10:05 -0700)
* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mingo/linux-2.6-sched-devel:
  sched: add arch_update_cpu_topology hook.
  sched: add exported arch_reinit_sched_domains() to header file.
  sched: remove double unlikely from schedule()
  sched: cleanup old and rarely used 'debug' features.

include/linux/sched.h
include/linux/topology.h
kernel/sched.c
kernel/sched_fair.c

index 3625fcaf5d0f2e3f08ff928a567793aa8f890309..fed07d03364e869df06d7f1405d23d75850fe2b7 100644 (file)
@@ -790,6 +790,7 @@ struct sched_domain {
 };
 
 extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new);
+extern int arch_reinit_sched_domains(void);
 
 #endif /* CONFIG_SMP */
 
index 2d8dac8799cf3191084eea9a185ce6ac66051c1c..bd14f8b30f0998a874ecc33a220a673da5aae9c8 100644 (file)
@@ -50,6 +50,8 @@
        for_each_online_node(node)                                              \
                if (nr_cpus_node(node))
 
+void arch_update_cpu_topology(void);
+
 /* Conform to ACPI 2.0 SLIT distance definitions */
 #define LOCAL_DISTANCE         10
 #define REMOTE_DISTANCE                20
index 3f7c5eb254e205c24bc95bf5ac264e28d918d9b8..28c73f07efb2f7506f70be5ceb7a44c134770afa 100644 (file)
@@ -594,18 +594,14 @@ enum {
        SCHED_FEAT_NEW_FAIR_SLEEPERS    = 1,
        SCHED_FEAT_WAKEUP_PREEMPT       = 2,
        SCHED_FEAT_START_DEBIT          = 4,
-       SCHED_FEAT_TREE_AVG             = 8,
-       SCHED_FEAT_APPROX_AVG           = 16,
-       SCHED_FEAT_HRTICK               = 32,
-       SCHED_FEAT_DOUBLE_TICK          = 64,
+       SCHED_FEAT_HRTICK               = 8,
+       SCHED_FEAT_DOUBLE_TICK          = 16,
 };
 
 const_debug unsigned int sysctl_sched_features =
                SCHED_FEAT_NEW_FAIR_SLEEPERS    * 1 |
                SCHED_FEAT_WAKEUP_PREEMPT       * 1 |
                SCHED_FEAT_START_DEBIT          * 1 |
-               SCHED_FEAT_TREE_AVG             * 0 |
-               SCHED_FEAT_APPROX_AVG           * 0 |
                SCHED_FEAT_HRTICK               * 1 |
                SCHED_FEAT_DOUBLE_TICK          * 0;
 
@@ -3886,7 +3882,7 @@ need_resched_nonpreemptible:
 
        if (prev->state && !(preempt_count() & PREEMPT_ACTIVE)) {
                if (unlikely((prev->state & TASK_INTERRUPTIBLE) &&
-                               unlikely(signal_pending(prev)))) {
+                               signal_pending(prev))) {
                        prev->state = TASK_RUNNING;
                } else {
                        deactivate_task(rq, prev, 1);
@@ -6811,6 +6807,10 @@ static int ndoms_cur;            /* number of sched domains in 'doms_cur' */
  */
 static cpumask_t fallback_doms;
 
+void __attribute__((weak)) arch_update_cpu_topology(void)
+{
+}
+
 /*
  * Set up scheduler domains and groups. Callers must hold the hotplug lock.
  * For now this just excludes isolated cpus, but could be used to
@@ -6820,6 +6820,7 @@ static int arch_init_sched_domains(const cpumask_t *cpu_map)
 {
        int err;
 
+       arch_update_cpu_topology();
        ndoms_cur = 1;
        doms_cur = kmalloc(sizeof(cpumask_t), GFP_KERNEL);
        if (!doms_cur)
@@ -6924,7 +6925,7 @@ match2:
 }
 
 #if defined(CONFIG_SCHED_MC) || defined(CONFIG_SCHED_SMT)
-static int arch_reinit_sched_domains(void)
+int arch_reinit_sched_domains(void)
 {
        int err;
 
index b85cac4b5e25627d057773316f534116d3b95905..86a93376282c2e43773dba5d6a11762e111a5eb6 100644 (file)
@@ -302,11 +302,6 @@ static u64 __sched_vslice(unsigned long rq_weight, unsigned long nr_running)
        return vslice;
 }
 
-static u64 sched_vslice(struct cfs_rq *cfs_rq)
-{
-       return __sched_vslice(cfs_rq->load.weight, cfs_rq->nr_running);
-}
-
 static u64 sched_vslice_add(struct cfs_rq *cfs_rq, struct sched_entity *se)
 {
        return __sched_vslice(cfs_rq->load.weight + se->load.weight,
@@ -504,15 +499,6 @@ place_entity(struct cfs_rq *cfs_rq, struct sched_entity *se, int initial)
        } else
                vruntime = cfs_rq->min_vruntime;
 
-       if (sched_feat(TREE_AVG)) {
-               struct sched_entity *last = __pick_last_entity(cfs_rq);
-               if (last) {
-                       vruntime += last->vruntime;
-                       vruntime >>= 1;
-               }
-       } else if (sched_feat(APPROX_AVG) && cfs_rq->nr_running)
-               vruntime += sched_vslice(cfs_rq)/2;
-
        /*
         * The 'current' period is already promised to the current tasks,
         * however the extra weight of the new task will slow them down a