sched, powerpc: Create a dedicated topology table
authorVincent Guittot <vincent.guittot@linaro.org>
Fri, 11 Apr 2014 09:44:39 +0000 (11:44 +0200)
committerIngo Molnar <mingo@kernel.org>
Wed, 7 May 2014 11:33:51 +0000 (13:33 +0200)
Create a dedicated topology table for handling asymetric feature of powerpc.

Signed-off-by: Vincent Guittot <vincent.guittot@linaro.org>
Reviewed-by: Preeti U Murthy <preeti@linux.vnet.ibm.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Cc: Andy Fleming <afleming@freescale.com>
Cc: Anton Blanchard <anton@samba.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Grant Likely <grant.likely@linaro.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: Paul Gortmaker <paul.gortmaker@windriver.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Preeti U. Murthy <preeti@linux.vnet.ibm.com>
Cc: Rob Herring <robh+dt@kernel.org>
Cc: Srivatsa S. Bhat <srivatsa.bhat@linux.vnet.ibm.com>
Cc: Toshi Kani <toshi.kani@hp.com>
Cc: Vasant Hegde <hegdevasant@linux.vnet.ibm.com>
Cc: tony.luck@intel.com
Cc: fenghua.yu@intel.com
Cc: schwidefsky@de.ibm.com
Cc: cmetcalf@tilera.com
Cc: dietmar.eggemann@arm.com
Cc: devicetree@vger.kernel.org
Cc: linuxppc-dev@lists.ozlabs.org
Link: http://lkml.kernel.org/r/1397209481-28542-4-git-send-email-vincent.guittot@linaro.org
Signed-off-by: Ingo Molnar <mingo@kernel.org>
arch/powerpc/kernel/smp.c
include/linux/sched.h
kernel/sched/core.c

index e2a4232c5871a19056b0b5b666adb540a17b1403..10ffffef041413dc6da76a7c9564e4b79804c72a 100644 (file)
@@ -766,6 +766,28 @@ int setup_profiling_timer(unsigned int multiplier)
        return 0;
 }
 
+#ifdef CONFIG_SCHED_SMT
+/* cpumask of CPUs with asymetric SMT dependancy */
+static const int powerpc_smt_flags(void)
+{
+       int flags = SD_SHARE_CPUPOWER | SD_SHARE_PKG_RESOURCES;
+
+       if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
+               printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
+               flags |= SD_ASYM_PACKING;
+       }
+       return flags;
+}
+#endif
+
+static struct sched_domain_topology_level powerpc_topology[] = {
+#ifdef CONFIG_SCHED_SMT
+       { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) },
+#endif
+       { cpu_cpu_mask, SD_INIT_NAME(DIE) },
+       { NULL, },
+};
+
 void __init smp_cpus_done(unsigned int max_cpus)
 {
        cpumask_var_t old_mask;
@@ -790,15 +812,8 @@ void __init smp_cpus_done(unsigned int max_cpus)
 
        dump_numa_cpu_topology();
 
-}
+       set_sched_topology(powerpc_topology);
 
-int arch_sd_sibling_asym_packing(void)
-{
-       if (cpu_has_feature(CPU_FTR_ASYM_SMT)) {
-               printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n");
-               return SD_ASYM_PACKING;
-       }
-       return 0;
 }
 
 #ifdef CONFIG_HOTPLUG_CPU
index 656b035c30e52032aa8b48c87aa8ef371fdbf74c..439a153b84032b28a52127391ca95e905c7c369a 100644 (file)
@@ -877,8 +877,6 @@ enum cpu_idle_type {
 #define SD_OVERLAP             0x2000  /* sched_domains of this level overlap */
 #define SD_NUMA                        0x4000  /* cross-node balancing */
 
-extern int __weak arch_sd_sibiling_asym_packing(void);
-
 #ifdef CONFIG_SCHED_SMT
 static inline const int cpu_smt_flags(void)
 {
index e59e5aec745afeeeaeac7da7f82f7c0a53b754fe..7e348e238bf1723fb6eebe7070789d8b9f9e2003 100644 (file)
@@ -5796,11 +5796,6 @@ static void init_sched_groups_power(int cpu, struct sched_domain *sd)
        atomic_set(&sg->sgp->nr_busy_cpus, sg->group_weight);
 }
 
-int __weak arch_sd_sibling_asym_packing(void)
-{
-       return 0*SD_ASYM_PACKING;
-}
-
 /*
  * Initializers for schedule domains
  * Non-inlined to reduce accumulated stack pressure in build_sched_domains()
@@ -5981,7 +5976,6 @@ sd_init(struct sched_domain_topology_level *tl, int cpu)
        if (sd->flags & SD_SHARE_CPUPOWER) {
                sd->imbalance_pct = 110;
                sd->smt_gain = 1178; /* ~15% */
-               sd->flags |= arch_sd_sibling_asym_packing();
 
        } else if (sd->flags & SD_SHARE_PKG_RESOURCES) {
                sd->imbalance_pct = 117;