Pull acpi_device_handle_cleanup into release branch
[sfrench/cifs-2.6.git] / drivers / acpi / processor_perflib.c
1 /*
2  * processor_perflib.c - ACPI Processor P-States Library ($Revision: 71 $)
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *
10  *
11  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12  *
13  *  This program is free software; you can redistribute it and/or modify
14  *  it under the terms of the GNU General Public License as published by
15  *  the Free Software Foundation; either version 2 of the License, or (at
16  *  your option) any later version.
17  *
18  *  This program is distributed in the hope that it will be useful, but
19  *  WITHOUT ANY WARRANTY; without even the implied warranty of
20  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
21  *  General Public License for more details.
22  *
23  *  You should have received a copy of the GNU General Public License along
24  *  with this program; if not, write to the Free Software Foundation, Inc.,
25  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
26  *
27  */
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33
34 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
35 #include <linux/proc_fs.h>
36 #include <linux/seq_file.h>
37 #include <linux/mutex.h>
38
39 #include <asm/uaccess.h>
40 #endif
41
42 #include <acpi/acpi_bus.h>
43 #include <acpi/processor.h>
44
45 #define ACPI_PROCESSOR_COMPONENT        0x01000000
46 #define ACPI_PROCESSOR_CLASS            "processor"
47 #define ACPI_PROCESSOR_DRIVER_NAME      "ACPI Processor Driver"
48 #define ACPI_PROCESSOR_FILE_PERFORMANCE "performance"
49 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
50 ACPI_MODULE_NAME("acpi_processor")
51
52 static DEFINE_MUTEX(performance_mutex);
53
54 /*
55  * _PPC support is implemented as a CPUfreq policy notifier:
56  * This means each time a CPUfreq driver registered also with
57  * the ACPI core is asked to change the speed policy, the maximum
58  * value is adjusted so that it is within the platform limit.
59  *
60  * Also, when a new platform limit value is detected, the CPUfreq
61  * policy is adjusted accordingly.
62  */
63
64 #define PPC_REGISTERED   1
65 #define PPC_IN_USE       2
66
67 static int acpi_processor_ppc_status = 0;
68
69 static int acpi_processor_ppc_notifier(struct notifier_block *nb,
70                                        unsigned long event, void *data)
71 {
72         struct cpufreq_policy *policy = data;
73         struct acpi_processor *pr;
74         unsigned int ppc = 0;
75
76         mutex_lock(&performance_mutex);
77
78         if (event != CPUFREQ_INCOMPATIBLE)
79                 goto out;
80
81         pr = processors[policy->cpu];
82         if (!pr || !pr->performance)
83                 goto out;
84
85         ppc = (unsigned int)pr->performance_platform_limit;
86         if (!ppc)
87                 goto out;
88
89         if (ppc > pr->performance->state_count)
90                 goto out;
91
92         cpufreq_verify_within_limits(policy, 0,
93                                      pr->performance->states[ppc].
94                                      core_frequency * 1000);
95
96       out:
97         mutex_unlock(&performance_mutex);
98
99         return 0;
100 }
101
102 static struct notifier_block acpi_ppc_notifier_block = {
103         .notifier_call = acpi_processor_ppc_notifier,
104 };
105
106 static int acpi_processor_get_platform_limit(struct acpi_processor *pr)
107 {
108         acpi_status status = 0;
109         unsigned long ppc = 0;
110
111
112         if (!pr)
113                 return -EINVAL;
114
115         /*
116          * _PPC indicates the maximum state currently supported by the platform
117          * (e.g. 0 = states 0..n; 1 = states 1..n; etc.
118          */
119         status = acpi_evaluate_integer(pr->handle, "_PPC", NULL, &ppc);
120
121         if (status != AE_NOT_FOUND)
122                 acpi_processor_ppc_status |= PPC_IN_USE;
123
124         if (ACPI_FAILURE(status) && status != AE_NOT_FOUND) {
125                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PPC"));
126                 return -ENODEV;
127         }
128
129         pr->performance_platform_limit = (int)ppc;
130
131         return 0;
132 }
133
134 int acpi_processor_ppc_has_changed(struct acpi_processor *pr)
135 {
136         int ret = acpi_processor_get_platform_limit(pr);
137         if (ret < 0)
138                 return (ret);
139         else
140                 return cpufreq_update_policy(pr->id);
141 }
142
143 void acpi_processor_ppc_init(void)
144 {
145         if (!cpufreq_register_notifier
146             (&acpi_ppc_notifier_block, CPUFREQ_POLICY_NOTIFIER))
147                 acpi_processor_ppc_status |= PPC_REGISTERED;
148         else
149                 printk(KERN_DEBUG
150                        "Warning: Processor Platform Limit not supported.\n");
151 }
152
153 void acpi_processor_ppc_exit(void)
154 {
155         if (acpi_processor_ppc_status & PPC_REGISTERED)
156                 cpufreq_unregister_notifier(&acpi_ppc_notifier_block,
157                                             CPUFREQ_POLICY_NOTIFIER);
158
159         acpi_processor_ppc_status &= ~PPC_REGISTERED;
160 }
161
162 static int acpi_processor_get_performance_control(struct acpi_processor *pr)
163 {
164         int result = 0;
165         acpi_status status = 0;
166         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
167         union acpi_object *pct = NULL;
168         union acpi_object obj = { 0 };
169
170
171         status = acpi_evaluate_object(pr->handle, "_PCT", NULL, &buffer);
172         if (ACPI_FAILURE(status)) {
173                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PCT"));
174                 return -ENODEV;
175         }
176
177         pct = (union acpi_object *)buffer.pointer;
178         if (!pct || (pct->type != ACPI_TYPE_PACKAGE)
179             || (pct->package.count != 2)) {
180                 printk(KERN_ERR PREFIX "Invalid _PCT data\n");
181                 result = -EFAULT;
182                 goto end;
183         }
184
185         /*
186          * control_register
187          */
188
189         obj = pct->package.elements[0];
190
191         if ((obj.type != ACPI_TYPE_BUFFER)
192             || (obj.buffer.length < sizeof(struct acpi_pct_register))
193             || (obj.buffer.pointer == NULL)) {
194                 printk(KERN_ERR PREFIX "Invalid _PCT data (control_register)\n");
195                 result = -EFAULT;
196                 goto end;
197         }
198         memcpy(&pr->performance->control_register, obj.buffer.pointer,
199                sizeof(struct acpi_pct_register));
200
201         /*
202          * status_register
203          */
204
205         obj = pct->package.elements[1];
206
207         if ((obj.type != ACPI_TYPE_BUFFER)
208             || (obj.buffer.length < sizeof(struct acpi_pct_register))
209             || (obj.buffer.pointer == NULL)) {
210                 printk(KERN_ERR PREFIX "Invalid _PCT data (status_register)\n");
211                 result = -EFAULT;
212                 goto end;
213         }
214
215         memcpy(&pr->performance->status_register, obj.buffer.pointer,
216                sizeof(struct acpi_pct_register));
217
218       end:
219         kfree(buffer.pointer);
220
221         return result;
222 }
223
224 static int acpi_processor_get_performance_states(struct acpi_processor *pr)
225 {
226         int result = 0;
227         acpi_status status = AE_OK;
228         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
229         struct acpi_buffer format = { sizeof("NNNNNN"), "NNNNNN" };
230         struct acpi_buffer state = { 0, NULL };
231         union acpi_object *pss = NULL;
232         int i;
233
234
235         status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
236         if (ACPI_FAILURE(status)) {
237                 ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PSS"));
238                 return -ENODEV;
239         }
240
241         pss = (union acpi_object *)buffer.pointer;
242         if (!pss || (pss->type != ACPI_TYPE_PACKAGE)) {
243                 printk(KERN_ERR PREFIX "Invalid _PSS data\n");
244                 result = -EFAULT;
245                 goto end;
246         }
247
248         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d performance states\n",
249                           pss->package.count));
250
251         pr->performance->state_count = pss->package.count;
252         pr->performance->states =
253             kmalloc(sizeof(struct acpi_processor_px) * pss->package.count,
254                     GFP_KERNEL);
255         if (!pr->performance->states) {
256                 result = -ENOMEM;
257                 goto end;
258         }
259
260         for (i = 0; i < pr->performance->state_count; i++) {
261
262                 struct acpi_processor_px *px = &(pr->performance->states[i]);
263
264                 state.length = sizeof(struct acpi_processor_px);
265                 state.pointer = px;
266
267                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Extracting state %d\n", i));
268
269                 status = acpi_extract_package(&(pss->package.elements[i]),
270                                               &format, &state);
271                 if (ACPI_FAILURE(status)) {
272                         ACPI_EXCEPTION((AE_INFO, status, "Invalid _PSS data"));
273                         result = -EFAULT;
274                         kfree(pr->performance->states);
275                         goto end;
276                 }
277
278                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
279                                   "State [%d]: core_frequency[%d] power[%d] transition_latency[%d] bus_master_latency[%d] control[0x%x] status[0x%x]\n",
280                                   i,
281                                   (u32) px->core_frequency,
282                                   (u32) px->power,
283                                   (u32) px->transition_latency,
284                                   (u32) px->bus_master_latency,
285                                   (u32) px->control, (u32) px->status));
286
287                 if (!px->core_frequency) {
288                         printk(KERN_ERR PREFIX
289                                     "Invalid _PSS data: freq is zero\n");
290                         result = -EFAULT;
291                         kfree(pr->performance->states);
292                         goto end;
293                 }
294         }
295
296       end:
297         kfree(buffer.pointer);
298
299         return result;
300 }
301
302 static int acpi_processor_get_performance_info(struct acpi_processor *pr)
303 {
304         int result = 0;
305         acpi_status status = AE_OK;
306         acpi_handle handle = NULL;
307
308
309         if (!pr || !pr->performance || !pr->handle)
310                 return -EINVAL;
311
312         status = acpi_get_handle(pr->handle, "_PCT", &handle);
313         if (ACPI_FAILURE(status)) {
314                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
315                                   "ACPI-based processor performance control unavailable\n"));
316                 return -ENODEV;
317         }
318
319         result = acpi_processor_get_performance_control(pr);
320         if (result)
321                 return result;
322
323         result = acpi_processor_get_performance_states(pr);
324         if (result)
325                 return result;
326
327         result = acpi_processor_get_platform_limit(pr);
328         if (result)
329                 return result;
330
331         return 0;
332 }
333
334 int acpi_processor_notify_smm(struct module *calling_module)
335 {
336         acpi_status status;
337         static int is_done = 0;
338
339
340         if (!(acpi_processor_ppc_status & PPC_REGISTERED))
341                 return -EBUSY;
342
343         if (!try_module_get(calling_module))
344                 return -EINVAL;
345
346         /* is_done is set to negative if an error occured,
347          * and to postitive if _no_ error occured, but SMM
348          * was already notified. This avoids double notification
349          * which might lead to unexpected results...
350          */
351         if (is_done > 0) {
352                 module_put(calling_module);
353                 return 0;
354         } else if (is_done < 0) {
355                 module_put(calling_module);
356                 return is_done;
357         }
358
359         is_done = -EIO;
360
361         /* Can't write pstate_cnt to smi_cmd if either value is zero */
362         if ((!acpi_fadt.smi_cmd) || (!acpi_fadt.pstate_cnt)) {
363                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No SMI port or pstate_cnt\n"));
364                 module_put(calling_module);
365                 return 0;
366         }
367
368         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
369                           "Writing pstate_cnt [0x%x] to smi_cmd [0x%x]\n",
370                           acpi_fadt.pstate_cnt, acpi_fadt.smi_cmd));
371
372         /* FADT v1 doesn't support pstate_cnt, many BIOS vendors use
373          * it anyway, so we need to support it... */
374         if (acpi_fadt_is_v1) {
375                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
376                                   "Using v1.0 FADT reserved value for pstate_cnt\n"));
377         }
378
379         status = acpi_os_write_port(acpi_fadt.smi_cmd,
380                                     (u32) acpi_fadt.pstate_cnt, 8);
381         if (ACPI_FAILURE(status)) {
382                 ACPI_EXCEPTION((AE_INFO, status,
383                                 "Failed to write pstate_cnt [0x%x] to "
384                                 "smi_cmd [0x%x]", acpi_fadt.pstate_cnt,
385                                 acpi_fadt.smi_cmd));
386                 module_put(calling_module);
387                 return status;
388         }
389
390         /* Success. If there's no _PPC, we need to fear nothing, so
391          * we can allow the cpufreq driver to be rmmod'ed. */
392         is_done = 1;
393
394         if (!(acpi_processor_ppc_status & PPC_IN_USE))
395                 module_put(calling_module);
396
397         return 0;
398 }
399
400 EXPORT_SYMBOL(acpi_processor_notify_smm);
401
402 #ifdef CONFIG_X86_ACPI_CPUFREQ_PROC_INTF
403 /* /proc/acpi/processor/../performance interface (DEPRECATED) */
404
405 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file);
406 static struct file_operations acpi_processor_perf_fops = {
407         .open = acpi_processor_perf_open_fs,
408         .read = seq_read,
409         .llseek = seq_lseek,
410         .release = single_release,
411 };
412
413 static int acpi_processor_perf_seq_show(struct seq_file *seq, void *offset)
414 {
415         struct acpi_processor *pr = (struct acpi_processor *)seq->private;
416         int i;
417
418
419         if (!pr)
420                 goto end;
421
422         if (!pr->performance) {
423                 seq_puts(seq, "<not supported>\n");
424                 goto end;
425         }
426
427         seq_printf(seq, "state count:             %d\n"
428                    "active state:            P%d\n",
429                    pr->performance->state_count, pr->performance->state);
430
431         seq_puts(seq, "states:\n");
432         for (i = 0; i < pr->performance->state_count; i++)
433                 seq_printf(seq,
434                            "   %cP%d:                  %d MHz, %d mW, %d uS\n",
435                            (i == pr->performance->state ? '*' : ' '), i,
436                            (u32) pr->performance->states[i].core_frequency,
437                            (u32) pr->performance->states[i].power,
438                            (u32) pr->performance->states[i].transition_latency);
439
440       end:
441         return 0;
442 }
443
444 static int acpi_processor_perf_open_fs(struct inode *inode, struct file *file)
445 {
446         return single_open(file, acpi_processor_perf_seq_show,
447                            PDE(inode)->data);
448 }
449
450 static ssize_t
451 acpi_processor_write_performance(struct file *file,
452                                  const char __user * buffer,
453                                  size_t count, loff_t * data)
454 {
455         int result = 0;
456         struct seq_file *m = (struct seq_file *)file->private_data;
457         struct acpi_processor *pr = (struct acpi_processor *)m->private;
458         struct acpi_processor_performance *perf;
459         char state_string[12] = { '\0' };
460         unsigned int new_state = 0;
461         struct cpufreq_policy policy;
462
463
464         if (!pr || (count > sizeof(state_string) - 1))
465                 return -EINVAL;
466
467         perf = pr->performance;
468         if (!perf)
469                 return -EINVAL;
470
471         if (copy_from_user(state_string, buffer, count))
472                 return -EFAULT;
473
474         state_string[count] = '\0';
475         new_state = simple_strtoul(state_string, NULL, 0);
476
477         if (new_state >= perf->state_count)
478                 return -EINVAL;
479
480         cpufreq_get_policy(&policy, pr->id);
481
482         policy.cpu = pr->id;
483         policy.min = perf->states[new_state].core_frequency * 1000;
484         policy.max = perf->states[new_state].core_frequency * 1000;
485
486         result = cpufreq_set_policy(&policy);
487         if (result)
488                 return result;
489
490         return count;
491 }
492
493 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
494 {
495         struct proc_dir_entry *entry = NULL;
496         struct acpi_device *device = NULL;
497
498
499         if (acpi_bus_get_device(pr->handle, &device))
500                 return;
501
502         /* add file 'performance' [R/W] */
503         entry = create_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
504                                   S_IFREG | S_IRUGO | S_IWUSR,
505                                   acpi_device_dir(device));
506         if (entry){
507                 acpi_processor_perf_fops.write = acpi_processor_write_performance;
508                 entry->proc_fops = &acpi_processor_perf_fops;
509                 entry->data = acpi_driver_data(device);
510                 entry->owner = THIS_MODULE;
511         }
512         return;
513 }
514
515 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
516 {
517         struct acpi_device *device = NULL;
518
519
520         if (acpi_bus_get_device(pr->handle, &device))
521                 return;
522
523         /* remove file 'performance' */
524         remove_proc_entry(ACPI_PROCESSOR_FILE_PERFORMANCE,
525                           acpi_device_dir(device));
526
527         return;
528 }
529
530 #else
531 static void acpi_cpufreq_add_file(struct acpi_processor *pr)
532 {
533         return;
534 }
535 static void acpi_cpufreq_remove_file(struct acpi_processor *pr)
536 {
537         return;
538 }
539 #endif                          /* CONFIG_X86_ACPI_CPUFREQ_PROC_INTF */
540
541 static int acpi_processor_get_psd(struct acpi_processor *pr)
542 {
543         int result = 0;
544         acpi_status status = AE_OK;
545         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
546         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
547         struct acpi_buffer state = {0, NULL};
548         union acpi_object  *psd = NULL;
549         struct acpi_psd_package *pdomain;
550
551         status = acpi_evaluate_object(pr->handle, "_PSD", NULL, &buffer);
552         if (ACPI_FAILURE(status)) {
553                 return -ENODEV;
554         }
555
556         psd = (union acpi_object *) buffer.pointer;
557         if (!psd || (psd->type != ACPI_TYPE_PACKAGE)) {
558                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
559                 result = -EFAULT;
560                 goto end;
561         }
562
563         if (psd->package.count != 1) {
564                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
565                 result = -EFAULT;
566                 goto end;
567         }
568
569         pdomain = &(pr->performance->domain_info);
570
571         state.length = sizeof(struct acpi_psd_package);
572         state.pointer = pdomain;
573
574         status = acpi_extract_package(&(psd->package.elements[0]),
575                 &format, &state);
576         if (ACPI_FAILURE(status)) {
577                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Invalid _PSD data\n"));
578                 result = -EFAULT;
579                 goto end;
580         }
581
582         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
583                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:num_entries\n"));
584                 result = -EFAULT;
585                 goto end;
586         }
587
588         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
589                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Unknown _PSD:revision\n"));
590                 result = -EFAULT;
591                 goto end;
592         }
593
594 end:
595         kfree(buffer.pointer);
596         return result;
597 }
598
599 int acpi_processor_preregister_performance(
600                 struct acpi_processor_performance **performance)
601 {
602         int count, count_target;
603         int retval = 0;
604         unsigned int i, j;
605         cpumask_t covered_cpus;
606         struct acpi_processor *pr;
607         struct acpi_psd_package *pdomain;
608         struct acpi_processor *match_pr;
609         struct acpi_psd_package *match_pdomain;
610
611         mutex_lock(&performance_mutex);
612
613         retval = 0;
614
615         /* Call _PSD for all CPUs */
616         for_each_possible_cpu(i) {
617                 pr = processors[i];
618                 if (!pr) {
619                         /* Look only at processors in ACPI namespace */
620                         continue;
621                 }
622
623                 if (pr->performance) {
624                         retval = -EBUSY;
625                         continue;
626                 }
627
628                 if (!performance || !performance[i]) {
629                         retval = -EINVAL;
630                         continue;
631                 }
632
633                 pr->performance = performance[i];
634                 cpu_set(i, pr->performance->shared_cpu_map);
635                 if (acpi_processor_get_psd(pr)) {
636                         retval = -EINVAL;
637                         continue;
638                 }
639         }
640         if (retval)
641                 goto err_ret;
642
643         /*
644          * Now that we have _PSD data from all CPUs, lets setup P-state 
645          * domain info.
646          */
647         for_each_possible_cpu(i) {
648                 pr = processors[i];
649                 if (!pr)
650                         continue;
651
652                 /* Basic validity check for domain info */
653                 pdomain = &(pr->performance->domain_info);
654                 if ((pdomain->revision != ACPI_PSD_REV0_REVISION) ||
655                     (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES)) {
656                         retval = -EINVAL;
657                         goto err_ret;
658                 }
659                 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
660                     pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
661                     pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
662                         retval = -EINVAL;
663                         goto err_ret;
664                 }
665         }
666
667         cpus_clear(covered_cpus);
668         for_each_possible_cpu(i) {
669                 pr = processors[i];
670                 if (!pr)
671                         continue;
672
673                 if (cpu_isset(i, covered_cpus))
674                         continue;
675
676                 pdomain = &(pr->performance->domain_info);
677                 cpu_set(i, pr->performance->shared_cpu_map);
678                 cpu_set(i, covered_cpus);
679                 if (pdomain->num_processors <= 1)
680                         continue;
681
682                 /* Validate the Domain info */
683                 count_target = pdomain->num_processors;
684                 count = 1;
685                 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
686                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
687                 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
688                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_HW;
689                 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
690                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ANY;
691
692                 for_each_possible_cpu(j) {
693                         if (i == j)
694                                 continue;
695
696                         match_pr = processors[j];
697                         if (!match_pr)
698                                 continue;
699
700                         match_pdomain = &(match_pr->performance->domain_info);
701                         if (match_pdomain->domain != pdomain->domain)
702                                 continue;
703
704                         /* Here i and j are in the same domain */
705
706                         if (match_pdomain->num_processors != count_target) {
707                                 retval = -EINVAL;
708                                 goto err_ret;
709                         }
710
711                         if (pdomain->coord_type != match_pdomain->coord_type) {
712                                 retval = -EINVAL;
713                                 goto err_ret;
714                         }
715
716                         cpu_set(j, covered_cpus);
717                         cpu_set(j, pr->performance->shared_cpu_map);
718                         count++;
719                 }
720
721                 for_each_possible_cpu(j) {
722                         if (i == j)
723                                 continue;
724
725                         match_pr = processors[j];
726                         if (!match_pr)
727                                 continue;
728
729                         match_pdomain = &(match_pr->performance->domain_info);
730                         if (match_pdomain->domain != pdomain->domain)
731                                 continue;
732
733                         match_pr->performance->shared_type = 
734                                         pr->performance->shared_type;
735                         match_pr->performance->shared_cpu_map =
736                                 pr->performance->shared_cpu_map;
737                 }
738         }
739
740 err_ret:
741         if (retval) {
742                 ACPI_DEBUG_PRINT((ACPI_DB_ERROR, "Error while parsing _PSD domain information. Assuming no coordination\n"));
743         }
744
745         for_each_possible_cpu(i) {
746                 pr = processors[i];
747                 if (!pr || !pr->performance)
748                         continue;
749
750                 /* Assume no coordination on any error parsing domain info */
751                 if (retval) {
752                         cpus_clear(pr->performance->shared_cpu_map);
753                         cpu_set(i, pr->performance->shared_cpu_map);
754                         pr->performance->shared_type = CPUFREQ_SHARED_TYPE_ALL;
755                 }
756                 pr->performance = NULL; /* Will be set for real in register */
757         }
758
759         mutex_unlock(&performance_mutex);
760         return retval;
761 }
762 EXPORT_SYMBOL(acpi_processor_preregister_performance);
763
764
765 int
766 acpi_processor_register_performance(struct acpi_processor_performance
767                                     *performance, unsigned int cpu)
768 {
769         struct acpi_processor *pr;
770
771
772         if (!(acpi_processor_ppc_status & PPC_REGISTERED))
773                 return -EINVAL;
774
775         mutex_lock(&performance_mutex);
776
777         pr = processors[cpu];
778         if (!pr) {
779                 mutex_unlock(&performance_mutex);
780                 return -ENODEV;
781         }
782
783         if (pr->performance) {
784                 mutex_unlock(&performance_mutex);
785                 return -EBUSY;
786         }
787
788         WARN_ON(!performance);
789
790         pr->performance = performance;
791
792         if (acpi_processor_get_performance_info(pr)) {
793                 pr->performance = NULL;
794                 mutex_unlock(&performance_mutex);
795                 return -EIO;
796         }
797
798         acpi_cpufreq_add_file(pr);
799
800         mutex_unlock(&performance_mutex);
801         return 0;
802 }
803
804 EXPORT_SYMBOL(acpi_processor_register_performance);
805
806 void
807 acpi_processor_unregister_performance(struct acpi_processor_performance
808                                       *performance, unsigned int cpu)
809 {
810         struct acpi_processor *pr;
811
812
813         mutex_lock(&performance_mutex);
814
815         pr = processors[cpu];
816         if (!pr) {
817                 mutex_unlock(&performance_mutex);
818                 return;
819         }
820
821         if (pr->performance)
822                 kfree(pr->performance->states);
823         pr->performance = NULL;
824
825         acpi_cpufreq_remove_file(pr);
826
827         mutex_unlock(&performance_mutex);
828
829         return;
830 }
831
832 EXPORT_SYMBOL(acpi_processor_unregister_performance);