Merge commit 'v2.6.28-rc8' into x86/doc
[sfrench/cifs-2.6.git] / drivers / acpi / processor_thermal.c
1 /*
2  * processor_thermal.c - Passive cooling submodule of the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004       Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *
10  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
11  *
12  *  This program is free software; you can redistribute it and/or modify
13  *  it under the terms of the GNU General Public License as published by
14  *  the Free Software Foundation; either version 2 of the License, or (at
15  *  your option) any later version.
16  *
17  *  This program is distributed in the hope that it will be useful, but
18  *  WITHOUT ANY WARRANTY; without even the implied warranty of
19  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
20  *  General Public License for more details.
21  *
22  *  You should have received a copy of the GNU General Public License along
23  *  with this program; if not, write to the Free Software Foundation, Inc.,
24  *  59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
25  *
26  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
27  */
28
29 #include <linux/kernel.h>
30 #include <linux/module.h>
31 #include <linux/init.h>
32 #include <linux/cpufreq.h>
33 #include <linux/proc_fs.h>
34 #include <linux/seq_file.h>
35 #include <linux/sysdev.h>
36
37 #include <asm/uaccess.h>
38
39 #include <acpi/acpi_bus.h>
40 #include <acpi/processor.h>
41 #include <acpi/acpi_drivers.h>
42
43 #define ACPI_PROCESSOR_CLASS            "processor"
44 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
45 ACPI_MODULE_NAME("processor_thermal");
46
47 /* --------------------------------------------------------------------------
48                                  Limit Interface
49    -------------------------------------------------------------------------- */
50 static int acpi_processor_apply_limit(struct acpi_processor *pr)
51 {
52         int result = 0;
53         u16 px = 0;
54         u16 tx = 0;
55
56
57         if (!pr)
58                 return -EINVAL;
59
60         if (!pr->flags.limit)
61                 return -ENODEV;
62
63         if (pr->flags.throttling) {
64                 if (pr->limit.user.tx > tx)
65                         tx = pr->limit.user.tx;
66                 if (pr->limit.thermal.tx > tx)
67                         tx = pr->limit.thermal.tx;
68
69                 result = acpi_processor_set_throttling(pr, tx);
70                 if (result)
71                         goto end;
72         }
73
74         pr->limit.state.px = px;
75         pr->limit.state.tx = tx;
76
77         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
78                           "Processor [%d] limit set to (P%d:T%d)\n", pr->id,
79                           pr->limit.state.px, pr->limit.state.tx));
80
81       end:
82         if (result)
83                 printk(KERN_ERR PREFIX "Unable to set limit\n");
84
85         return result;
86 }
87
88 #ifdef CONFIG_CPU_FREQ
89
90 /* If a passive cooling situation is detected, primarily CPUfreq is used, as it
91  * offers (in most cases) voltage scaling in addition to frequency scaling, and
92  * thus a cubic (instead of linear) reduction of energy. Also, we allow for
93  * _any_ cpufreq driver and not only the acpi-cpufreq driver.
94  */
95
96 #define CPUFREQ_THERMAL_MIN_STEP 0
97 #define CPUFREQ_THERMAL_MAX_STEP 3
98
99 static DEFINE_PER_CPU(unsigned int, cpufreq_thermal_reduction_pctg);
100 static unsigned int acpi_thermal_cpufreq_is_init = 0;
101
102 static int cpu_has_cpufreq(unsigned int cpu)
103 {
104         struct cpufreq_policy policy;
105         if (!acpi_thermal_cpufreq_is_init || cpufreq_get_policy(&policy, cpu))
106                 return 0;
107         return 1;
108 }
109
110 static int acpi_thermal_cpufreq_increase(unsigned int cpu)
111 {
112         if (!cpu_has_cpufreq(cpu))
113                 return -ENODEV;
114
115         if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) <
116                 CPUFREQ_THERMAL_MAX_STEP) {
117                 per_cpu(cpufreq_thermal_reduction_pctg, cpu)++;
118                 cpufreq_update_policy(cpu);
119                 return 0;
120         }
121
122         return -ERANGE;
123 }
124
125 static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
126 {
127         if (!cpu_has_cpufreq(cpu))
128                 return -ENODEV;
129
130         if (per_cpu(cpufreq_thermal_reduction_pctg, cpu) >
131                 (CPUFREQ_THERMAL_MIN_STEP + 1))
132                 per_cpu(cpufreq_thermal_reduction_pctg, cpu)--;
133         else
134                 per_cpu(cpufreq_thermal_reduction_pctg, cpu) = 0;
135         cpufreq_update_policy(cpu);
136         /* We reached max freq again and can leave passive mode */
137         return !per_cpu(cpufreq_thermal_reduction_pctg, cpu);
138 }
139
140 static int acpi_thermal_cpufreq_notifier(struct notifier_block *nb,
141                                          unsigned long event, void *data)
142 {
143         struct cpufreq_policy *policy = data;
144         unsigned long max_freq = 0;
145
146         if (event != CPUFREQ_ADJUST)
147                 goto out;
148
149         max_freq = (
150             policy->cpuinfo.max_freq *
151             (100 - per_cpu(cpufreq_thermal_reduction_pctg, policy->cpu) * 20)
152         ) / 100;
153
154         cpufreq_verify_within_limits(policy, 0, max_freq);
155
156       out:
157         return 0;
158 }
159
160 static struct notifier_block acpi_thermal_cpufreq_notifier_block = {
161         .notifier_call = acpi_thermal_cpufreq_notifier,
162 };
163
164 static int cpufreq_get_max_state(unsigned int cpu)
165 {
166         if (!cpu_has_cpufreq(cpu))
167                 return 0;
168
169         return CPUFREQ_THERMAL_MAX_STEP;
170 }
171
172 static int cpufreq_get_cur_state(unsigned int cpu)
173 {
174         if (!cpu_has_cpufreq(cpu))
175                 return 0;
176
177         return per_cpu(cpufreq_thermal_reduction_pctg, cpu);
178 }
179
180 static int cpufreq_set_cur_state(unsigned int cpu, int state)
181 {
182         if (!cpu_has_cpufreq(cpu))
183                 return 0;
184
185         per_cpu(cpufreq_thermal_reduction_pctg, cpu) = state;
186         cpufreq_update_policy(cpu);
187         return 0;
188 }
189
190 void acpi_thermal_cpufreq_init(void)
191 {
192         int i;
193
194         for (i = 0; i < nr_cpu_ids; i++)
195                 if (cpu_present(i))
196                         per_cpu(cpufreq_thermal_reduction_pctg, i) = 0;
197
198         i = cpufreq_register_notifier(&acpi_thermal_cpufreq_notifier_block,
199                                       CPUFREQ_POLICY_NOTIFIER);
200         if (!i)
201                 acpi_thermal_cpufreq_is_init = 1;
202 }
203
204 void acpi_thermal_cpufreq_exit(void)
205 {
206         if (acpi_thermal_cpufreq_is_init)
207                 cpufreq_unregister_notifier
208                     (&acpi_thermal_cpufreq_notifier_block,
209                      CPUFREQ_POLICY_NOTIFIER);
210
211         acpi_thermal_cpufreq_is_init = 0;
212 }
213
214 #else                           /* ! CONFIG_CPU_FREQ */
215 static int cpufreq_get_max_state(unsigned int cpu)
216 {
217         return 0;
218 }
219
220 static int cpufreq_get_cur_state(unsigned int cpu)
221 {
222         return 0;
223 }
224
225 static int cpufreq_set_cur_state(unsigned int cpu, int state)
226 {
227         return 0;
228 }
229
230 static int acpi_thermal_cpufreq_increase(unsigned int cpu)
231 {
232         return -ENODEV;
233 }
234 static int acpi_thermal_cpufreq_decrease(unsigned int cpu)
235 {
236         return -ENODEV;
237 }
238
239 #endif
240
241 int acpi_processor_set_thermal_limit(acpi_handle handle, int type)
242 {
243         int result = 0;
244         struct acpi_processor *pr = NULL;
245         struct acpi_device *device = NULL;
246         int tx = 0, max_tx_px = 0;
247
248
249         if ((type < ACPI_PROCESSOR_LIMIT_NONE)
250             || (type > ACPI_PROCESSOR_LIMIT_DECREMENT))
251                 return -EINVAL;
252
253         result = acpi_bus_get_device(handle, &device);
254         if (result)
255                 return result;
256
257         pr = acpi_driver_data(device);
258         if (!pr)
259                 return -ENODEV;
260
261         /* Thermal limits are always relative to the current Px/Tx state. */
262         if (pr->flags.throttling)
263                 pr->limit.thermal.tx = pr->throttling.state;
264
265         /*
266          * Our default policy is to only use throttling at the lowest
267          * performance state.
268          */
269
270         tx = pr->limit.thermal.tx;
271
272         switch (type) {
273
274         case ACPI_PROCESSOR_LIMIT_NONE:
275                 do {
276                         result = acpi_thermal_cpufreq_decrease(pr->id);
277                 } while (!result);
278                 tx = 0;
279                 break;
280
281         case ACPI_PROCESSOR_LIMIT_INCREMENT:
282                 /* if going up: P-states first, T-states later */
283
284                 result = acpi_thermal_cpufreq_increase(pr->id);
285                 if (!result)
286                         goto end;
287                 else if (result == -ERANGE)
288                         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
289                                           "At maximum performance state\n"));
290
291                 if (pr->flags.throttling) {
292                         if (tx == (pr->throttling.state_count - 1))
293                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
294                                                   "At maximum throttling state\n"));
295                         else
296                                 tx++;
297                 }
298                 break;
299
300         case ACPI_PROCESSOR_LIMIT_DECREMENT:
301                 /* if going down: T-states first, P-states later */
302
303                 if (pr->flags.throttling) {
304                         if (tx == 0) {
305                                 max_tx_px = 1;
306                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
307                                                   "At minimum throttling state\n"));
308                         } else {
309                                 tx--;
310                                 goto end;
311                         }
312                 }
313
314                 result = acpi_thermal_cpufreq_decrease(pr->id);
315                 if (result) {
316                         /*
317                          * We only could get -ERANGE, 1 or 0.
318                          * In the first two cases we reached max freq again.
319                          */
320                         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
321                                           "At minimum performance state\n"));
322                         max_tx_px = 1;
323                 } else
324                         max_tx_px = 0;
325
326                 break;
327         }
328
329       end:
330         if (pr->flags.throttling) {
331                 pr->limit.thermal.px = 0;
332                 pr->limit.thermal.tx = tx;
333
334                 result = acpi_processor_apply_limit(pr);
335                 if (result)
336                         printk(KERN_ERR PREFIX "Unable to set thermal limit\n");
337
338                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Thermal limit now (P%d:T%d)\n",
339                                   pr->limit.thermal.px, pr->limit.thermal.tx));
340         } else
341                 result = 0;
342         if (max_tx_px)
343                 return 1;
344         else
345                 return result;
346 }
347
348 int acpi_processor_get_limit_info(struct acpi_processor *pr)
349 {
350
351         if (!pr)
352                 return -EINVAL;
353
354         if (pr->flags.throttling)
355                 pr->flags.limit = 1;
356
357         return 0;
358 }
359
360 /* thermal coolign device callbacks */
361 static int acpi_processor_max_state(struct acpi_processor *pr)
362 {
363         int max_state = 0;
364
365         /*
366          * There exists four states according to
367          * cpufreq_thermal_reduction_ptg. 0, 1, 2, 3
368          */
369         max_state += cpufreq_get_max_state(pr->id);
370         if (pr->flags.throttling)
371                 max_state += (pr->throttling.state_count -1);
372
373         return max_state;
374 }
375 static int
376 processor_get_max_state(struct thermal_cooling_device *cdev, char *buf)
377 {
378         struct acpi_device *device = cdev->devdata;
379         struct acpi_processor *pr = acpi_driver_data(device);
380
381         if (!device || !pr)
382                 return -EINVAL;
383
384         return sprintf(buf, "%d\n", acpi_processor_max_state(pr));
385 }
386
387 static int
388 processor_get_cur_state(struct thermal_cooling_device *cdev, char *buf)
389 {
390         struct acpi_device *device = cdev->devdata;
391         struct acpi_processor *pr = acpi_driver_data(device);
392         int cur_state;
393
394         if (!device || !pr)
395                 return -EINVAL;
396
397         cur_state = cpufreq_get_cur_state(pr->id);
398         if (pr->flags.throttling)
399                 cur_state += pr->throttling.state;
400
401         return sprintf(buf, "%d\n", cur_state);
402 }
403
404 static int
405 processor_set_cur_state(struct thermal_cooling_device *cdev, unsigned int state)
406 {
407         struct acpi_device *device = cdev->devdata;
408         struct acpi_processor *pr = acpi_driver_data(device);
409         int result = 0;
410         int max_pstate;
411
412         if (!device || !pr)
413                 return -EINVAL;
414
415         max_pstate = cpufreq_get_max_state(pr->id);
416
417         if (state > acpi_processor_max_state(pr))
418                 return -EINVAL;
419
420         if (state <= max_pstate) {
421                 if (pr->flags.throttling && pr->throttling.state)
422                         result = acpi_processor_set_throttling(pr, 0);
423                 cpufreq_set_cur_state(pr->id, state);
424         } else {
425                 cpufreq_set_cur_state(pr->id, max_pstate);
426                 result = acpi_processor_set_throttling(pr,
427                                 state - max_pstate);
428         }
429         return result;
430 }
431
432 struct thermal_cooling_device_ops processor_cooling_ops = {
433         .get_max_state = processor_get_max_state,
434         .get_cur_state = processor_get_cur_state,
435         .set_cur_state = processor_set_cur_state,
436 };
437
438 /* /proc interface */
439
440 static int acpi_processor_limit_seq_show(struct seq_file *seq, void *offset)
441 {
442         struct acpi_processor *pr = (struct acpi_processor *)seq->private;
443
444
445         if (!pr)
446                 goto end;
447
448         if (!pr->flags.limit) {
449                 seq_puts(seq, "<not supported>\n");
450                 goto end;
451         }
452
453         seq_printf(seq, "active limit:            P%d:T%d\n"
454                    "user limit:              P%d:T%d\n"
455                    "thermal limit:           P%d:T%d\n",
456                    pr->limit.state.px, pr->limit.state.tx,
457                    pr->limit.user.px, pr->limit.user.tx,
458                    pr->limit.thermal.px, pr->limit.thermal.tx);
459
460       end:
461         return 0;
462 }
463
464 static int acpi_processor_limit_open_fs(struct inode *inode, struct file *file)
465 {
466         return single_open(file, acpi_processor_limit_seq_show,
467                            PDE(inode)->data);
468 }
469
470 static ssize_t acpi_processor_write_limit(struct file * file,
471                                           const char __user * buffer,
472                                           size_t count, loff_t * data)
473 {
474         int result = 0;
475         struct seq_file *m = file->private_data;
476         struct acpi_processor *pr = m->private;
477         char limit_string[25] = { '\0' };
478         int px = 0;
479         int tx = 0;
480
481
482         if (!pr || (count > sizeof(limit_string) - 1)) {
483                 return -EINVAL;
484         }
485
486         if (copy_from_user(limit_string, buffer, count)) {
487                 return -EFAULT;
488         }
489
490         limit_string[count] = '\0';
491
492         if (sscanf(limit_string, "%d:%d", &px, &tx) != 2) {
493                 printk(KERN_ERR PREFIX "Invalid data format\n");
494                 return -EINVAL;
495         }
496
497         if (pr->flags.throttling) {
498                 if ((tx < 0) || (tx > (pr->throttling.state_count - 1))) {
499                         printk(KERN_ERR PREFIX "Invalid tx\n");
500                         return -EINVAL;
501                 }
502                 pr->limit.user.tx = tx;
503         }
504
505         result = acpi_processor_apply_limit(pr);
506
507         return count;
508 }
509
510 struct file_operations acpi_processor_limit_fops = {
511         .owner = THIS_MODULE,
512         .open = acpi_processor_limit_open_fs,
513         .read = seq_read,
514         .write = acpi_processor_write_limit,
515         .llseek = seq_lseek,
516         .release = single_release,
517 };