P-state software coordination for ACPI core
authorVenkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Wed, 14 Dec 2005 20:05:00 +0000 (15:05 -0500)
committerLen Brown <len.brown@intel.com>
Thu, 9 Feb 2006 08:21:48 +0000 (03:21 -0500)
http://bugzilla.kernel.org/show_bug.cgi?id=5737

Signed-off-by: Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
Signed-off-by: Len Brown <len.brown@intel.com>
drivers/acpi/processor_perflib.c
include/acpi/processor.h
include/linux/cpufreq.h

index abbdb37a7f5f37e062f58800318d66b67a37c5d1..ffc5280334c80c69cc5b71c731a8109a1d40d3cc 100644 (file)
@@ -553,6 +553,234 @@ static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
 }
 #endif                         /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
 
+static int acpi_processor_get_psd(struct acpi_processor        *pr)
+{
+       int result = 0;
+       acpi_status status = AE_OK;
+       struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+       struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
+       struct acpi_buffer state = {0, NULL};
+       union acpi_object  *psd = NULL;
+       struct acpi_psd_package *pdomain;
+
+       ACPI_FUNCTION_TRACE("acpi_processor_get_psd");
+
+       status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
+       if (ACPI_FAILURE(status)) {
+               return_VALUE(-ENODEV);
+       }
+
+       psd = (union acpi_object *) buffer.pointer;
+       if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       if (psd->package.count != 1) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       pdomain = &(pr->performance->domain_info);
+
+       state.length = sizeof(struct acpi_psd_package);
+       state.pointer = pdomain;
+
+       status = acpi_extract_package(&(psd->package.elements[0]),
+               &format, &state);
+       if (ACPI_FAILURE(status)) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+       if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
+               result = -EFAULT;
+               goto end;
+       }
+
+end:
+       acpi_os_free(buffer.pointer);
+       return_VALUE(result);
+}
+
+int acpi_processor_preregister_performance(
+               struct acpi_processor_performance **performance)
+{
+       int count, count_target;
+       int retval = 0;
+       unsigned int i, j;
+       cpumask_t covered_cpus;
+       struct acpi_processor *pr;
+       struct acpi_psd_package *pdomain;
+       struct acpi_processor *match_pr;
+       struct acpi_psd_package *match_pdomain;
+
+       ACPI_FUNCTION_TRACE("acpi_processor_preregister_performance");
+
+       down(&performance_sem);
+
+       retval = 0;
+
+       /* Call _PSD for all CPUs */
+       for_each_cpu(i) {
+               pr = processors[i];
+               if (!pr) {
+                       /* Look only at processors in ACPI namespace */
+                       continue;
+               }
+
+               if (pr->performance) {
+                       retval = -EBUSY;
+                       continue;
+               }
+
+               if (!performance || !performance[i]) {
+                       retval = -EINVAL;
+                       continue;
+               }
+
+               pr->performance = performance[i];
+               cpu_set(i, pr->performance->shared_cpu_map);
+               if (acpi_processor_get_psd(pr)) {
+                       retval = -EINVAL;
+                       continue;
+               }
+       }
+       if (retval)
+               goto err_ret;
+
+       /*
+        * Now that we have _PSD data from all CPUs, lets setup P-state 
+        * domain info.
+        */
+       for_each_cpu(i) {
+               pr = processors[i];
+               if (!pr)
+                       continue;
+
+               /* Basic validity check for domain info */
+               pdomain = &(pr->performance->domain_info);
+               if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
+                   (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
+                       retval = -EINVAL;
+                       goto err_ret;
+               }
+               if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
+                   pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
+                   pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
+                       retval = -EINVAL;
+                       goto err_ret;
+               }
+       }
+
+       cpus_clear(covered_cpus);
+       for_each_cpu(i) {
+               pr = processors[i];
+               if (!pr)
+                       continue;
+
+               if (cpu_isset(i, covered_cpus))
+                       continue;
+
+               pdomain = &(pr->performance->domain_info);
+               cpu_set(i, pr->performance->shared_cpu_map);
+               cpu_set(i, covered_cpus);
+               if (pdomain->num_processors <= 1)
+                       continue;
+
+               /* Validate the Domain info */
+               count_target = pdomain->num_processors;
+               count = 1;
+               if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL ||
+                   pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL) {
+                       pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+               } else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY) {
+                       pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
+               }
+
+               for_each_cpu(j) {
+                       if (i == j)
+                               continue;
+
+                       match_pr = processors[j];
+                       if (!match_pr)
+                               continue;
+
+                       match_pdomain = &(match_pr->performance->domain_info);
+                       if (match_pdomain->domain != pdomain->domain)
+                               continue;
+
+                       /* Here i and j are in the same domain */
+
+                       if (match_pdomain->num_processors != count_target) {
+                               retval = -EINVAL;
+                               goto err_ret;
+                       }
+
+                       if (pdomain->coord_type != match_pdomain->coord_type) {
+                               retval = -EINVAL;
+                               goto err_ret;
+                       }
+
+                       cpu_set(j, covered_cpus);
+                       cpu_set(j, pr->performance->shared_cpu_map);
+                       count++;
+               }
+
+               for_each_cpu(j) {
+                       if (i == j)
+                               continue;
+
+                       match_pr = processors[j];
+                       if (!match_pr)
+                               continue;
+
+                       match_pdomain = &(match_pr->performance->domain_info);
+                       if (match_pdomain->domain != pdomain->domain)
+                               continue;
+
+                       match_pr->performance->shared_type = 
+                                       pr->performance->shared_type;
+                       match_pr->performance->shared_cpu_map =
+                               pr->performance->shared_cpu_map;
+               }
+       }
+
+err_ret:
+       if (retval) {
+               ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
+       }
+
+       for_each_cpu(i) {
+               pr = processors[i];
+               if (!pr || !pr->performance)
+                       continue;
+
+               /* Assume no coordination on any error parsing domain info */
+               if (retval) {
+                       cpus_clear(pr->performance->shared_cpu_map);
+                       cpu_set(i, pr->performance->shared_cpu_map);
+                       pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
+               }
+               pr->performance = NULL; /* Will be set for real in register */
+       }
+
+       up(&performance_sem);
+       return_VALUE(retval);
+}
+EXPORT_SYMBOL(acpi_processor_preregister_performance);
+
+
 int
 acpi_processor_register_performance(struct acpi_processor_performance
                                    *performance, unsigned int cpu)
index badf0277b1be7314a59f9b6a8c7836e25b14c314..0c46d1b3dda265407a62067bd27cd5ef86aa8844 100644 (file)
@@ -3,6 +3,7 @@
 
 #include <linux/kernel.h>
 #include <linux/config.h>
+#include <linux/cpu.h>
 
 #include <asm/acpi.h>
 
 
 #define ACPI_PDC_REVISION_ID           0x1
 
+#define ACPI_PSD_REV0_REVISION         0 /* Support for _PSD as in ACPI 3.0 */
+#define ACPI_PSD_REV0_ENTRIES          5
+
+/*
+ * Types of coordination defined in ACPI 3.0. Same macros can be used across
+ * P, C and T states
+ */
+#define DOMAIN_COORD_TYPE_SW_ALL       0xfc
+#define DOMAIN_COORD_TYPE_SW_ANY       0xfd
+#define DOMAIN_COORD_TYPE_HW_ALL       0xfe
+
 /* Power Management */
 
 struct acpi_processor_cx;
@@ -66,6 +78,14 @@ struct acpi_processor_power {
 
 /* Performance Management */
 
+struct acpi_psd_package {
+       acpi_integer num_entries;
+       acpi_integer revision;
+       acpi_integer domain;
+       acpi_integer coord_type;
+       acpi_integer num_processors;
+} __attribute__ ((packed));
+
 struct acpi_pct_register {
        u8 descriptor;
        u16 length;
@@ -92,7 +112,9 @@ struct acpi_processor_performance {
        struct acpi_pct_register status_register;
        unsigned int state_count;
        struct acpi_processor_px *states;
-
+       struct acpi_psd_package domain_info;
+       cpumask_t shared_cpu_map;
+       unsigned int shared_type;
 };
 
 /* Throttling Control */
@@ -161,6 +183,9 @@ struct acpi_processor_errata {
        } piix4;
 };
 
+extern int acpi_processor_preregister_performance(
+               struct acpi_processor_performance **performance);
+
 extern int acpi_processor_register_performance(struct acpi_processor_performance
                                               *performance, unsigned int cpu);
 extern void acpi_processor_unregister_performance(struct
index 17866d7e2b71ad08e12fc9340d5b77448032da48..f7d9883669414b251eba04ea58850c40c08e07ed 100644 (file)
@@ -73,6 +73,8 @@ struct cpufreq_real_policy {
 
 struct cpufreq_policy {
        cpumask_t               cpus;   /* affected CPUs */
+       unsigned int            shared_type; /* ANY or ALL affected CPUs
+                                               should set cpufreq */
        unsigned int            cpu;    /* cpu nr of registered CPU */
        struct cpufreq_cpuinfo  cpuinfo;/* see above */
 
@@ -99,6 +101,8 @@ struct cpufreq_policy {
 #define CPUFREQ_INCOMPATIBLE   (1)
 #define CPUFREQ_NOTIFY         (2)
 
+#define CPUFREQ_SHARED_TYPE_ALL        (0) /* All dependent CPUs should set freq */
+#define CPUFREQ_SHARED_TYPE_ANY        (1) /* Freq can be set from any dependent CPU */
 
 /******************** cpufreq transition notifiers *******************/