Merge branches 'acpi-video', 'acpi-battery' and 'acpi-cppc'
[sfrench/cifs-2.6.git] / drivers / acpi / processor_idle.c
1 /*
2  * processor_idle - idle state submodule to the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10  *                      - Added support for C3 on SMP
11  *
12  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13  *
14  *  This program is free software; you can redistribute it and/or modify
15  *  it under the terms of the GNU General Public License as published by
16  *  the Free Software Foundation; either version 2 of the License, or (at
17  *  your option) any later version.
18  *
19  *  This program is distributed in the hope that it will be useful, but
20  *  WITHOUT ANY WARRANTY; without even the implied warranty of
21  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  *  General Public License for more details.
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  */
26 #define pr_fmt(fmt) "ACPI: " fmt
27
28 #include <linux/module.h>
29 #include <linux/acpi.h>
30 #include <linux/dmi.h>
31 #include <linux/sched.h>       /* need_resched() */
32 #include <linux/tick.h>
33 #include <linux/cpuidle.h>
34 #include <linux/cpu.h>
35 #include <acpi/processor.h>
36
37 /*
38  * Include the apic definitions for x86 to have the APIC timer related defines
39  * available also for UP (on SMP it gets magically included via linux/smp.h).
40  * asm/acpi.h is not an option, as it would require more include magic. Also
41  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
42  */
43 #ifdef CONFIG_X86
44 #include <asm/apic.h>
45 #endif
46
47 #define ACPI_PROCESSOR_CLASS            "processor"
48 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
49 ACPI_MODULE_NAME("processor_idle");
50
51 #define ACPI_IDLE_STATE_START   (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
52
53 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
54 module_param(max_cstate, uint, 0000);
55 static unsigned int nocst __read_mostly;
56 module_param(nocst, uint, 0000);
57 static int bm_check_disable __read_mostly;
58 module_param(bm_check_disable, uint, 0000);
59
60 static unsigned int latency_factor __read_mostly = 2;
61 module_param(latency_factor, uint, 0644);
62
63 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
64
65 struct cpuidle_driver acpi_idle_driver = {
66         .name =         "acpi_idle",
67         .owner =        THIS_MODULE,
68 };
69
70 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
71 static
72 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
73
74 static int disabled_by_idle_boot_param(void)
75 {
76         return boot_option_idle_override == IDLE_POLL ||
77                 boot_option_idle_override == IDLE_HALT;
78 }
79
80 /*
81  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
82  * For now disable this. Probably a bug somewhere else.
83  *
84  * To skip this limit, boot/load with a large max_cstate limit.
85  */
86 static int set_max_cstate(const struct dmi_system_id *id)
87 {
88         if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
89                 return 0;
90
91         pr_notice("%s detected - limiting to C%ld max_cstate."
92                   " Override with \"processor.max_cstate=%d\"\n", id->ident,
93                   (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
94
95         max_cstate = (long)id->driver_data;
96
97         return 0;
98 }
99
100 static const struct dmi_system_id processor_power_dmi_table[] = {
101         { set_max_cstate, "Clevo 5600D", {
102           DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
103           DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
104          (void *)2},
105         { set_max_cstate, "Pavilion zv5000", {
106           DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
107           DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
108          (void *)1},
109         { set_max_cstate, "Asus L8400B", {
110           DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
111           DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
112          (void *)1},
113         {},
114 };
115
116
117 /*
118  * Callers should disable interrupts before the call and enable
119  * interrupts after return.
120  */
121 static void __cpuidle acpi_safe_halt(void)
122 {
123         if (!tif_need_resched()) {
124                 safe_halt();
125                 local_irq_disable();
126         }
127 }
128
129 #ifdef ARCH_APICTIMER_STOPS_ON_C3
130
131 /*
132  * Some BIOS implementations switch to C3 in the published C2 state.
133  * This seems to be a common problem on AMD boxen, but other vendors
134  * are affected too. We pick the most conservative approach: we assume
135  * that the local APIC stops in both C2 and C3.
136  */
137 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
138                                    struct acpi_processor_cx *cx)
139 {
140         struct acpi_processor_power *pwr = &pr->power;
141         u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
142
143         if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
144                 return;
145
146         if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
147                 type = ACPI_STATE_C1;
148
149         /*
150          * Check, if one of the previous states already marked the lapic
151          * unstable
152          */
153         if (pwr->timer_broadcast_on_state < state)
154                 return;
155
156         if (cx->type >= type)
157                 pr->power.timer_broadcast_on_state = state;
158 }
159
160 static void __lapic_timer_propagate_broadcast(void *arg)
161 {
162         struct acpi_processor *pr = (struct acpi_processor *) arg;
163
164         if (pr->power.timer_broadcast_on_state < INT_MAX)
165                 tick_broadcast_enable();
166         else
167                 tick_broadcast_disable();
168 }
169
170 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
171 {
172         smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
173                                  (void *)pr, 1);
174 }
175
176 /* Power(C) State timer broadcast control */
177 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
178                                        struct acpi_processor_cx *cx,
179                                        int broadcast)
180 {
181         int state = cx - pr->power.states;
182
183         if (state >= pr->power.timer_broadcast_on_state) {
184                 if (broadcast)
185                         tick_broadcast_enter();
186                 else
187                         tick_broadcast_exit();
188         }
189 }
190
191 #else
192
193 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
194                                    struct acpi_processor_cx *cstate) { }
195 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
196 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
197                                        struct acpi_processor_cx *cx,
198                                        int broadcast)
199 {
200 }
201
202 #endif
203
204 #if defined(CONFIG_X86)
205 static void tsc_check_state(int state)
206 {
207         switch (boot_cpu_data.x86_vendor) {
208         case X86_VENDOR_AMD:
209         case X86_VENDOR_INTEL:
210                 /*
211                  * AMD Fam10h TSC will tick in all
212                  * C/P/S0/S1 states when this bit is set.
213                  */
214                 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
215                         return;
216
217                 /*FALL THROUGH*/
218         default:
219                 /* TSC could halt in idle, so notify users */
220                 if (state > ACPI_STATE_C1)
221                         mark_tsc_unstable("TSC halts in idle");
222         }
223 }
224 #else
225 static void tsc_check_state(int state) { return; }
226 #endif
227
228 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
229 {
230
231         if (!pr->pblk)
232                 return -ENODEV;
233
234         /* if info is obtained from pblk/fadt, type equals state */
235         pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
236         pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
237
238 #ifndef CONFIG_HOTPLUG_CPU
239         /*
240          * Check for P_LVL2_UP flag before entering C2 and above on
241          * an SMP system.
242          */
243         if ((num_online_cpus() > 1) &&
244             !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
245                 return -ENODEV;
246 #endif
247
248         /* determine C2 and C3 address from pblk */
249         pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
250         pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
251
252         /* determine latencies from FADT */
253         pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
254         pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
255
256         /*
257          * FADT specified C2 latency must be less than or equal to
258          * 100 microseconds.
259          */
260         if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
261                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
262                         "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
263                 /* invalidate C2 */
264                 pr->power.states[ACPI_STATE_C2].address = 0;
265         }
266
267         /*
268          * FADT supplied C3 latency must be less than or equal to
269          * 1000 microseconds.
270          */
271         if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
272                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
273                         "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
274                 /* invalidate C3 */
275                 pr->power.states[ACPI_STATE_C3].address = 0;
276         }
277
278         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
279                           "lvl2[0x%08x] lvl3[0x%08x]\n",
280                           pr->power.states[ACPI_STATE_C2].address,
281                           pr->power.states[ACPI_STATE_C3].address));
282
283         return 0;
284 }
285
286 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
287 {
288         if (!pr->power.states[ACPI_STATE_C1].valid) {
289                 /* set the first C-State to C1 */
290                 /* all processors need to support C1 */
291                 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
292                 pr->power.states[ACPI_STATE_C1].valid = 1;
293                 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
294
295                 snprintf(pr->power.states[ACPI_STATE_C1].desc,
296                          ACPI_CX_DESC_LEN, "ACPI HLT");
297         }
298         /* the C0 state only exists as a filler in our array */
299         pr->power.states[ACPI_STATE_C0].valid = 1;
300         return 0;
301 }
302
303 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
304 {
305         acpi_status status;
306         u64 count;
307         int current_count;
308         int i, ret = 0;
309         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
310         union acpi_object *cst;
311
312         if (nocst)
313                 return -ENODEV;
314
315         current_count = 0;
316
317         status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
318         if (ACPI_FAILURE(status)) {
319                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
320                 return -ENODEV;
321         }
322
323         cst = buffer.pointer;
324
325         /* There must be at least 2 elements */
326         if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
327                 pr_err("not enough elements in _CST\n");
328                 ret = -EFAULT;
329                 goto end;
330         }
331
332         count = cst->package.elements[0].integer.value;
333
334         /* Validate number of power states. */
335         if (count < 1 || count != cst->package.count - 1) {
336                 pr_err("count given by _CST is not valid\n");
337                 ret = -EFAULT;
338                 goto end;
339         }
340
341         /* Tell driver that at least _CST is supported. */
342         pr->flags.has_cst = 1;
343
344         for (i = 1; i <= count; i++) {
345                 union acpi_object *element;
346                 union acpi_object *obj;
347                 struct acpi_power_register *reg;
348                 struct acpi_processor_cx cx;
349
350                 memset(&cx, 0, sizeof(cx));
351
352                 element = &(cst->package.elements[i]);
353                 if (element->type != ACPI_TYPE_PACKAGE)
354                         continue;
355
356                 if (element->package.count != 4)
357                         continue;
358
359                 obj = &(element->package.elements[0]);
360
361                 if (obj->type != ACPI_TYPE_BUFFER)
362                         continue;
363
364                 reg = (struct acpi_power_register *)obj->buffer.pointer;
365
366                 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
367                     (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
368                         continue;
369
370                 /* There should be an easy way to extract an integer... */
371                 obj = &(element->package.elements[1]);
372                 if (obj->type != ACPI_TYPE_INTEGER)
373                         continue;
374
375                 cx.type = obj->integer.value;
376                 /*
377                  * Some buggy BIOSes won't list C1 in _CST -
378                  * Let acpi_processor_get_power_info_default() handle them later
379                  */
380                 if (i == 1 && cx.type != ACPI_STATE_C1)
381                         current_count++;
382
383                 cx.address = reg->address;
384                 cx.index = current_count + 1;
385
386                 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
387                 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
388                         if (acpi_processor_ffh_cstate_probe
389                                         (pr->id, &cx, reg) == 0) {
390                                 cx.entry_method = ACPI_CSTATE_FFH;
391                         } else if (cx.type == ACPI_STATE_C1) {
392                                 /*
393                                  * C1 is a special case where FIXED_HARDWARE
394                                  * can be handled in non-MWAIT way as well.
395                                  * In that case, save this _CST entry info.
396                                  * Otherwise, ignore this info and continue.
397                                  */
398                                 cx.entry_method = ACPI_CSTATE_HALT;
399                                 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
400                         } else {
401                                 continue;
402                         }
403                         if (cx.type == ACPI_STATE_C1 &&
404                             (boot_option_idle_override == IDLE_NOMWAIT)) {
405                                 /*
406                                  * In most cases the C1 space_id obtained from
407                                  * _CST object is FIXED_HARDWARE access mode.
408                                  * But when the option of idle=halt is added,
409                                  * the entry_method type should be changed from
410                                  * CSTATE_FFH to CSTATE_HALT.
411                                  * When the option of idle=nomwait is added,
412                                  * the C1 entry_method type should be
413                                  * CSTATE_HALT.
414                                  */
415                                 cx.entry_method = ACPI_CSTATE_HALT;
416                                 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
417                         }
418                 } else {
419                         snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
420                                  cx.address);
421                 }
422
423                 if (cx.type == ACPI_STATE_C1) {
424                         cx.valid = 1;
425                 }
426
427                 obj = &(element->package.elements[2]);
428                 if (obj->type != ACPI_TYPE_INTEGER)
429                         continue;
430
431                 cx.latency = obj->integer.value;
432
433                 obj = &(element->package.elements[3]);
434                 if (obj->type != ACPI_TYPE_INTEGER)
435                         continue;
436
437                 current_count++;
438                 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
439
440                 /*
441                  * We support total ACPI_PROCESSOR_MAX_POWER - 1
442                  * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
443                  */
444                 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
445                         pr_warn("Limiting number of power states to max (%d)\n",
446                                 ACPI_PROCESSOR_MAX_POWER);
447                         pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
448                         break;
449                 }
450         }
451
452         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
453                           current_count));
454
455         /* Validate number of power states discovered */
456         if (current_count < 2)
457                 ret = -EFAULT;
458
459       end:
460         kfree(buffer.pointer);
461
462         return ret;
463 }
464
465 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
466                                            struct acpi_processor_cx *cx)
467 {
468         static int bm_check_flag = -1;
469         static int bm_control_flag = -1;
470
471
472         if (!cx->address)
473                 return;
474
475         /*
476          * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
477          * DMA transfers are used by any ISA device to avoid livelock.
478          * Note that we could disable Type-F DMA (as recommended by
479          * the erratum), but this is known to disrupt certain ISA
480          * devices thus we take the conservative approach.
481          */
482         else if (errata.piix4.fdma) {
483                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
484                                   "C3 not supported on PIIX4 with Type-F DMA\n"));
485                 return;
486         }
487
488         /* All the logic here assumes flags.bm_check is same across all CPUs */
489         if (bm_check_flag == -1) {
490                 /* Determine whether bm_check is needed based on CPU  */
491                 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
492                 bm_check_flag = pr->flags.bm_check;
493                 bm_control_flag = pr->flags.bm_control;
494         } else {
495                 pr->flags.bm_check = bm_check_flag;
496                 pr->flags.bm_control = bm_control_flag;
497         }
498
499         if (pr->flags.bm_check) {
500                 if (!pr->flags.bm_control) {
501                         if (pr->flags.has_cst != 1) {
502                                 /* bus mastering control is necessary */
503                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
504                                         "C3 support requires BM control\n"));
505                                 return;
506                         } else {
507                                 /* Here we enter C3 without bus mastering */
508                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
509                                         "C3 support without BM control\n"));
510                         }
511                 }
512         } else {
513                 /*
514                  * WBINVD should be set in fadt, for C3 state to be
515                  * supported on when bm_check is not required.
516                  */
517                 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
518                         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
519                                           "Cache invalidation should work properly"
520                                           " for C3 to be enabled on SMP systems\n"));
521                         return;
522                 }
523         }
524
525         /*
526          * Otherwise we've met all of our C3 requirements.
527          * Normalize the C3 latency to expidite policy.  Enable
528          * checking of bus mastering status (bm_check) so we can
529          * use this in our C3 policy
530          */
531         cx->valid = 1;
532
533         /*
534          * On older chipsets, BM_RLD needs to be set
535          * in order for Bus Master activity to wake the
536          * system from C3.  Newer chipsets handle DMA
537          * during C3 automatically and BM_RLD is a NOP.
538          * In either case, the proper way to
539          * handle BM_RLD is to set it and leave it set.
540          */
541         acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
542
543         return;
544 }
545
546 static int acpi_processor_power_verify(struct acpi_processor *pr)
547 {
548         unsigned int i;
549         unsigned int working = 0;
550
551         pr->power.timer_broadcast_on_state = INT_MAX;
552
553         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
554                 struct acpi_processor_cx *cx = &pr->power.states[i];
555
556                 switch (cx->type) {
557                 case ACPI_STATE_C1:
558                         cx->valid = 1;
559                         break;
560
561                 case ACPI_STATE_C2:
562                         if (!cx->address)
563                                 break;
564                         cx->valid = 1;
565                         break;
566
567                 case ACPI_STATE_C3:
568                         acpi_processor_power_verify_c3(pr, cx);
569                         break;
570                 }
571                 if (!cx->valid)
572                         continue;
573
574                 lapic_timer_check_state(i, pr, cx);
575                 tsc_check_state(cx->type);
576                 working++;
577         }
578
579         lapic_timer_propagate_broadcast(pr);
580
581         return (working);
582 }
583
584 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
585 {
586         unsigned int i;
587         int result;
588
589
590         /* NOTE: the idle thread may not be running while calling
591          * this function */
592
593         /* Zero initialize all the C-states info. */
594         memset(pr->power.states, 0, sizeof(pr->power.states));
595
596         result = acpi_processor_get_power_info_cst(pr);
597         if (result == -ENODEV)
598                 result = acpi_processor_get_power_info_fadt(pr);
599
600         if (result)
601                 return result;
602
603         acpi_processor_get_power_info_default(pr);
604
605         pr->power.count = acpi_processor_power_verify(pr);
606
607         /*
608          * if one state of type C2 or C3 is available, mark this
609          * CPU as being "idle manageable"
610          */
611         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
612                 if (pr->power.states[i].valid) {
613                         pr->power.count = i;
614                         if (pr->power.states[i].type >= ACPI_STATE_C2)
615                                 pr->flags.power = 1;
616                 }
617         }
618
619         return 0;
620 }
621
622 /**
623  * acpi_idle_bm_check - checks if bus master activity was detected
624  */
625 static int acpi_idle_bm_check(void)
626 {
627         u32 bm_status = 0;
628
629         if (bm_check_disable)
630                 return 0;
631
632         acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
633         if (bm_status)
634                 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
635         /*
636          * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
637          * the true state of bus mastering activity; forcing us to
638          * manually check the BMIDEA bit of each IDE channel.
639          */
640         else if (errata.piix4.bmisx) {
641                 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
642                     || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
643                         bm_status = 1;
644         }
645         return bm_status;
646 }
647
648 /**
649  * acpi_idle_do_entry - enter idle state using the appropriate method
650  * @cx: cstate data
651  *
652  * Caller disables interrupt before call and enables interrupt after return.
653  */
654 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
655 {
656         if (cx->entry_method == ACPI_CSTATE_FFH) {
657                 /* Call into architectural FFH based C-state */
658                 acpi_processor_ffh_cstate_enter(cx);
659         } else if (cx->entry_method == ACPI_CSTATE_HALT) {
660                 acpi_safe_halt();
661         } else {
662                 /* IO port based C-state */
663                 inb(cx->address);
664                 /* Dummy wait op - must do something useless after P_LVL2 read
665                    because chipsets cannot guarantee that STPCLK# signal
666                    gets asserted in time to freeze execution properly. */
667                 inl(acpi_gbl_FADT.xpm_timer_block.address);
668         }
669 }
670
671 /**
672  * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
673  * @dev: the target CPU
674  * @index: the index of suggested state
675  */
676 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
677 {
678         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
679
680         ACPI_FLUSH_CPU_CACHE();
681
682         while (1) {
683
684                 if (cx->entry_method == ACPI_CSTATE_HALT)
685                         safe_halt();
686                 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
687                         inb(cx->address);
688                         /* See comment in acpi_idle_do_entry() */
689                         inl(acpi_gbl_FADT.xpm_timer_block.address);
690                 } else
691                         return -ENODEV;
692         }
693
694         /* Never reached */
695         return 0;
696 }
697
698 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
699 {
700         return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
701                 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
702 }
703
704 static int c3_cpu_count;
705 static DEFINE_RAW_SPINLOCK(c3_lock);
706
707 /**
708  * acpi_idle_enter_bm - enters C3 with proper BM handling
709  * @pr: Target processor
710  * @cx: Target state context
711  * @timer_bc: Whether or not to change timer mode to broadcast
712  */
713 static void acpi_idle_enter_bm(struct acpi_processor *pr,
714                                struct acpi_processor_cx *cx, bool timer_bc)
715 {
716         acpi_unlazy_tlb(smp_processor_id());
717
718         /*
719          * Must be done before busmaster disable as we might need to
720          * access HPET !
721          */
722         if (timer_bc)
723                 lapic_timer_state_broadcast(pr, cx, 1);
724
725         /*
726          * disable bus master
727          * bm_check implies we need ARB_DIS
728          * bm_control implies whether we can do ARB_DIS
729          *
730          * That leaves a case where bm_check is set and bm_control is
731          * not set. In that case we cannot do much, we enter C3
732          * without doing anything.
733          */
734         if (pr->flags.bm_control) {
735                 raw_spin_lock(&c3_lock);
736                 c3_cpu_count++;
737                 /* Disable bus master arbitration when all CPUs are in C3 */
738                 if (c3_cpu_count == num_online_cpus())
739                         acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
740                 raw_spin_unlock(&c3_lock);
741         }
742
743         acpi_idle_do_entry(cx);
744
745         /* Re-enable bus master arbitration */
746         if (pr->flags.bm_control) {
747                 raw_spin_lock(&c3_lock);
748                 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
749                 c3_cpu_count--;
750                 raw_spin_unlock(&c3_lock);
751         }
752
753         if (timer_bc)
754                 lapic_timer_state_broadcast(pr, cx, 0);
755 }
756
757 static int acpi_idle_enter(struct cpuidle_device *dev,
758                            struct cpuidle_driver *drv, int index)
759 {
760         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
761         struct acpi_processor *pr;
762
763         pr = __this_cpu_read(processors);
764         if (unlikely(!pr))
765                 return -EINVAL;
766
767         if (cx->type != ACPI_STATE_C1) {
768                 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
769                         index = ACPI_IDLE_STATE_START;
770                         cx = per_cpu(acpi_cstate[index], dev->cpu);
771                 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
772                         if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
773                                 acpi_idle_enter_bm(pr, cx, true);
774                                 return index;
775                         } else if (drv->safe_state_index >= 0) {
776                                 index = drv->safe_state_index;
777                                 cx = per_cpu(acpi_cstate[index], dev->cpu);
778                         } else {
779                                 acpi_safe_halt();
780                                 return -EBUSY;
781                         }
782                 }
783         }
784
785         lapic_timer_state_broadcast(pr, cx, 1);
786
787         if (cx->type == ACPI_STATE_C3)
788                 ACPI_FLUSH_CPU_CACHE();
789
790         acpi_idle_do_entry(cx);
791
792         lapic_timer_state_broadcast(pr, cx, 0);
793
794         return index;
795 }
796
797 static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
798                                    struct cpuidle_driver *drv, int index)
799 {
800         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
801
802         if (cx->type == ACPI_STATE_C3) {
803                 struct acpi_processor *pr = __this_cpu_read(processors);
804
805                 if (unlikely(!pr))
806                         return;
807
808                 if (pr->flags.bm_check) {
809                         acpi_idle_enter_bm(pr, cx, false);
810                         return;
811                 } else {
812                         ACPI_FLUSH_CPU_CACHE();
813                 }
814         }
815         acpi_idle_do_entry(cx);
816 }
817
818 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
819                                            struct cpuidle_device *dev)
820 {
821         int i, count = ACPI_IDLE_STATE_START;
822         struct acpi_processor_cx *cx;
823
824         if (max_cstate == 0)
825                 max_cstate = 1;
826
827         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
828                 cx = &pr->power.states[i];
829
830                 if (!cx->valid)
831                         continue;
832
833                 per_cpu(acpi_cstate[count], dev->cpu) = cx;
834
835                 count++;
836                 if (count == CPUIDLE_STATE_MAX)
837                         break;
838         }
839
840         if (!count)
841                 return -EINVAL;
842
843         return 0;
844 }
845
846 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
847 {
848         int i, count;
849         struct acpi_processor_cx *cx;
850         struct cpuidle_state *state;
851         struct cpuidle_driver *drv = &acpi_idle_driver;
852
853         if (max_cstate == 0)
854                 max_cstate = 1;
855
856         if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
857                 cpuidle_poll_state_init(drv);
858                 count = 1;
859         } else {
860                 count = 0;
861         }
862
863         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
864                 cx = &pr->power.states[i];
865
866                 if (!cx->valid)
867                         continue;
868
869                 state = &drv->states[count];
870                 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
871                 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
872                 state->exit_latency = cx->latency;
873                 state->target_residency = cx->latency * latency_factor;
874                 state->enter = acpi_idle_enter;
875
876                 state->flags = 0;
877                 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
878                         state->enter_dead = acpi_idle_play_dead;
879                         drv->safe_state_index = count;
880                 }
881                 /*
882                  * Halt-induced C1 is not good for ->enter_s2idle, because it
883                  * re-enables interrupts on exit.  Moreover, C1 is generally not
884                  * particularly interesting from the suspend-to-idle angle, so
885                  * avoid C1 and the situations in which we may need to fall back
886                  * to it altogether.
887                  */
888                 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
889                         state->enter_s2idle = acpi_idle_enter_s2idle;
890
891                 count++;
892                 if (count == CPUIDLE_STATE_MAX)
893                         break;
894         }
895
896         drv->state_count = count;
897
898         if (!count)
899                 return -EINVAL;
900
901         return 0;
902 }
903
904 static inline void acpi_processor_cstate_first_run_checks(void)
905 {
906         acpi_status status;
907         static int first_run;
908
909         if (first_run)
910                 return;
911         dmi_check_system(processor_power_dmi_table);
912         max_cstate = acpi_processor_cstate_check(max_cstate);
913         if (max_cstate < ACPI_C_STATES_MAX)
914                 pr_notice("ACPI: processor limited to max C-state %d\n",
915                           max_cstate);
916         first_run++;
917
918         if (acpi_gbl_FADT.cst_control && !nocst) {
919                 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
920                                             acpi_gbl_FADT.cst_control, 8);
921                 if (ACPI_FAILURE(status))
922                         ACPI_EXCEPTION((AE_INFO, status,
923                                         "Notifying BIOS of _CST ability failed"));
924         }
925 }
926 #else
927
928 static inline int disabled_by_idle_boot_param(void) { return 0; }
929 static inline void acpi_processor_cstate_first_run_checks(void) { }
930 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
931 {
932         return -ENODEV;
933 }
934
935 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
936                                            struct cpuidle_device *dev)
937 {
938         return -EINVAL;
939 }
940
941 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
942 {
943         return -EINVAL;
944 }
945
946 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
947
948 struct acpi_lpi_states_array {
949         unsigned int size;
950         unsigned int composite_states_size;
951         struct acpi_lpi_state *entries;
952         struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
953 };
954
955 static int obj_get_integer(union acpi_object *obj, u32 *value)
956 {
957         if (obj->type != ACPI_TYPE_INTEGER)
958                 return -EINVAL;
959
960         *value = obj->integer.value;
961         return 0;
962 }
963
964 static int acpi_processor_evaluate_lpi(acpi_handle handle,
965                                        struct acpi_lpi_states_array *info)
966 {
967         acpi_status status;
968         int ret = 0;
969         int pkg_count, state_idx = 1, loop;
970         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
971         union acpi_object *lpi_data;
972         struct acpi_lpi_state *lpi_state;
973
974         status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
975         if (ACPI_FAILURE(status)) {
976                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
977                 return -ENODEV;
978         }
979
980         lpi_data = buffer.pointer;
981
982         /* There must be at least 4 elements = 3 elements + 1 package */
983         if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
984             lpi_data->package.count < 4) {
985                 pr_debug("not enough elements in _LPI\n");
986                 ret = -ENODATA;
987                 goto end;
988         }
989
990         pkg_count = lpi_data->package.elements[2].integer.value;
991
992         /* Validate number of power states. */
993         if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
994                 pr_debug("count given by _LPI is not valid\n");
995                 ret = -ENODATA;
996                 goto end;
997         }
998
999         lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
1000         if (!lpi_state) {
1001                 ret = -ENOMEM;
1002                 goto end;
1003         }
1004
1005         info->size = pkg_count;
1006         info->entries = lpi_state;
1007
1008         /* LPI States start at index 3 */
1009         for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
1010                 union acpi_object *element, *pkg_elem, *obj;
1011
1012                 element = &lpi_data->package.elements[loop];
1013                 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
1014                         continue;
1015
1016                 pkg_elem = element->package.elements;
1017
1018                 obj = pkg_elem + 6;
1019                 if (obj->type == ACPI_TYPE_BUFFER) {
1020                         struct acpi_power_register *reg;
1021
1022                         reg = (struct acpi_power_register *)obj->buffer.pointer;
1023                         if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
1024                             reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
1025                                 continue;
1026
1027                         lpi_state->address = reg->address;
1028                         lpi_state->entry_method =
1029                                 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
1030                                 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
1031                 } else if (obj->type == ACPI_TYPE_INTEGER) {
1032                         lpi_state->entry_method = ACPI_CSTATE_INTEGER;
1033                         lpi_state->address = obj->integer.value;
1034                 } else {
1035                         continue;
1036                 }
1037
1038                 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
1039
1040                 obj = pkg_elem + 9;
1041                 if (obj->type == ACPI_TYPE_STRING)
1042                         strlcpy(lpi_state->desc, obj->string.pointer,
1043                                 ACPI_CX_DESC_LEN);
1044
1045                 lpi_state->index = state_idx;
1046                 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
1047                         pr_debug("No min. residency found, assuming 10 us\n");
1048                         lpi_state->min_residency = 10;
1049                 }
1050
1051                 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
1052                         pr_debug("No wakeup residency found, assuming 10 us\n");
1053                         lpi_state->wake_latency = 10;
1054                 }
1055
1056                 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
1057                         lpi_state->flags = 0;
1058
1059                 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
1060                         lpi_state->arch_flags = 0;
1061
1062                 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
1063                         lpi_state->res_cnt_freq = 1;
1064
1065                 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
1066                         lpi_state->enable_parent_state = 0;
1067         }
1068
1069         acpi_handle_debug(handle, "Found %d power states\n", state_idx);
1070 end:
1071         kfree(buffer.pointer);
1072         return ret;
1073 }
1074
1075 /*
1076  * flat_state_cnt - the number of composite LPI states after the process of flattening
1077  */
1078 static int flat_state_cnt;
1079
1080 /**
1081  * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1082  *
1083  * @local: local LPI state
1084  * @parent: parent LPI state
1085  * @result: composite LPI state
1086  */
1087 static bool combine_lpi_states(struct acpi_lpi_state *local,
1088                                struct acpi_lpi_state *parent,
1089                                struct acpi_lpi_state *result)
1090 {
1091         if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1092                 if (!parent->address) /* 0 means autopromotable */
1093                         return false;
1094                 result->address = local->address + parent->address;
1095         } else {
1096                 result->address = parent->address;
1097         }
1098
1099         result->min_residency = max(local->min_residency, parent->min_residency);
1100         result->wake_latency = local->wake_latency + parent->wake_latency;
1101         result->enable_parent_state = parent->enable_parent_state;
1102         result->entry_method = local->entry_method;
1103
1104         result->flags = parent->flags;
1105         result->arch_flags = parent->arch_flags;
1106         result->index = parent->index;
1107
1108         strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1109         strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1110         strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1111         return true;
1112 }
1113
1114 #define ACPI_LPI_STATE_FLAGS_ENABLED                    BIT(0)
1115
1116 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1117                                   struct acpi_lpi_state *t)
1118 {
1119         curr_level->composite_states[curr_level->composite_states_size++] = t;
1120 }
1121
1122 static int flatten_lpi_states(struct acpi_processor *pr,
1123                               struct acpi_lpi_states_array *curr_level,
1124                               struct acpi_lpi_states_array *prev_level)
1125 {
1126         int i, j, state_count = curr_level->size;
1127         struct acpi_lpi_state *p, *t = curr_level->entries;
1128
1129         curr_level->composite_states_size = 0;
1130         for (j = 0; j < state_count; j++, t++) {
1131                 struct acpi_lpi_state *flpi;
1132
1133                 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1134                         continue;
1135
1136                 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1137                         pr_warn("Limiting number of LPI states to max (%d)\n",
1138                                 ACPI_PROCESSOR_MAX_POWER);
1139                         pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1140                         break;
1141                 }
1142
1143                 flpi = &pr->power.lpi_states[flat_state_cnt];
1144
1145                 if (!prev_level) { /* leaf/processor node */
1146                         memcpy(flpi, t, sizeof(*t));
1147                         stash_composite_state(curr_level, flpi);
1148                         flat_state_cnt++;
1149                         continue;
1150                 }
1151
1152                 for (i = 0; i < prev_level->composite_states_size; i++) {
1153                         p = prev_level->composite_states[i];
1154                         if (t->index <= p->enable_parent_state &&
1155                             combine_lpi_states(p, t, flpi)) {
1156                                 stash_composite_state(curr_level, flpi);
1157                                 flat_state_cnt++;
1158                                 flpi++;
1159                         }
1160                 }
1161         }
1162
1163         kfree(curr_level->entries);
1164         return 0;
1165 }
1166
1167 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1168 {
1169         int ret, i;
1170         acpi_status status;
1171         acpi_handle handle = pr->handle, pr_ahandle;
1172         struct acpi_device *d = NULL;
1173         struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1174
1175         if (!osc_pc_lpi_support_confirmed)
1176                 return -EOPNOTSUPP;
1177
1178         if (!acpi_has_method(handle, "_LPI"))
1179                 return -EINVAL;
1180
1181         flat_state_cnt = 0;
1182         prev = &info[0];
1183         curr = &info[1];
1184         handle = pr->handle;
1185         ret = acpi_processor_evaluate_lpi(handle, prev);
1186         if (ret)
1187                 return ret;
1188         flatten_lpi_states(pr, prev, NULL);
1189
1190         status = acpi_get_parent(handle, &pr_ahandle);
1191         while (ACPI_SUCCESS(status)) {
1192                 acpi_bus_get_device(pr_ahandle, &d);
1193                 handle = pr_ahandle;
1194
1195                 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1196                         break;
1197
1198                 /* can be optional ? */
1199                 if (!acpi_has_method(handle, "_LPI"))
1200                         break;
1201
1202                 ret = acpi_processor_evaluate_lpi(handle, curr);
1203                 if (ret)
1204                         break;
1205
1206                 /* flatten all the LPI states in this level of hierarchy */
1207                 flatten_lpi_states(pr, curr, prev);
1208
1209                 tmp = prev, prev = curr, curr = tmp;
1210
1211                 status = acpi_get_parent(handle, &pr_ahandle);
1212         }
1213
1214         pr->power.count = flat_state_cnt;
1215         /* reset the index after flattening */
1216         for (i = 0; i < pr->power.count; i++)
1217                 pr->power.lpi_states[i].index = i;
1218
1219         /* Tell driver that _LPI is supported. */
1220         pr->flags.has_lpi = 1;
1221         pr->flags.power = 1;
1222
1223         return 0;
1224 }
1225
1226 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1227 {
1228         return -ENODEV;
1229 }
1230
1231 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1232 {
1233         return -ENODEV;
1234 }
1235
1236 /**
1237  * acpi_idle_lpi_enter - enters an ACPI any LPI state
1238  * @dev: the target CPU
1239  * @drv: cpuidle driver containing cpuidle state info
1240  * @index: index of target state
1241  *
1242  * Return: 0 for success or negative value for error
1243  */
1244 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1245                                struct cpuidle_driver *drv, int index)
1246 {
1247         struct acpi_processor *pr;
1248         struct acpi_lpi_state *lpi;
1249
1250         pr = __this_cpu_read(processors);
1251
1252         if (unlikely(!pr))
1253                 return -EINVAL;
1254
1255         lpi = &pr->power.lpi_states[index];
1256         if (lpi->entry_method == ACPI_CSTATE_FFH)
1257                 return acpi_processor_ffh_lpi_enter(lpi);
1258
1259         return -EINVAL;
1260 }
1261
1262 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1263 {
1264         int i;
1265         struct acpi_lpi_state *lpi;
1266         struct cpuidle_state *state;
1267         struct cpuidle_driver *drv = &acpi_idle_driver;
1268
1269         if (!pr->flags.has_lpi)
1270                 return -EOPNOTSUPP;
1271
1272         for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1273                 lpi = &pr->power.lpi_states[i];
1274
1275                 state = &drv->states[i];
1276                 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1277                 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1278                 state->exit_latency = lpi->wake_latency;
1279                 state->target_residency = lpi->min_residency;
1280                 if (lpi->arch_flags)
1281                         state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1282                 state->enter = acpi_idle_lpi_enter;
1283                 drv->safe_state_index = i;
1284         }
1285
1286         drv->state_count = i;
1287
1288         return 0;
1289 }
1290
1291 /**
1292  * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1293  * global state data i.e. idle routines
1294  *
1295  * @pr: the ACPI processor
1296  */
1297 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1298 {
1299         int i;
1300         struct cpuidle_driver *drv = &acpi_idle_driver;
1301
1302         if (!pr->flags.power_setup_done || !pr->flags.power)
1303                 return -EINVAL;
1304
1305         drv->safe_state_index = -1;
1306         for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1307                 drv->states[i].name[0] = '\0';
1308                 drv->states[i].desc[0] = '\0';
1309         }
1310
1311         if (pr->flags.has_lpi)
1312                 return acpi_processor_setup_lpi_states(pr);
1313
1314         return acpi_processor_setup_cstates(pr);
1315 }
1316
1317 /**
1318  * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1319  * device i.e. per-cpu data
1320  *
1321  * @pr: the ACPI processor
1322  * @dev : the cpuidle device
1323  */
1324 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1325                                             struct cpuidle_device *dev)
1326 {
1327         if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1328                 return -EINVAL;
1329
1330         dev->cpu = pr->id;
1331         if (pr->flags.has_lpi)
1332                 return acpi_processor_ffh_lpi_probe(pr->id);
1333
1334         return acpi_processor_setup_cpuidle_cx(pr, dev);
1335 }
1336
1337 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1338 {
1339         int ret;
1340
1341         ret = acpi_processor_get_lpi_info(pr);
1342         if (ret)
1343                 ret = acpi_processor_get_cstate_info(pr);
1344
1345         return ret;
1346 }
1347
1348 int acpi_processor_hotplug(struct acpi_processor *pr)
1349 {
1350         int ret = 0;
1351         struct cpuidle_device *dev;
1352
1353         if (disabled_by_idle_boot_param())
1354                 return 0;
1355
1356         if (!pr->flags.power_setup_done)
1357                 return -ENODEV;
1358
1359         dev = per_cpu(acpi_cpuidle_device, pr->id);
1360         cpuidle_pause_and_lock();
1361         cpuidle_disable_device(dev);
1362         ret = acpi_processor_get_power_info(pr);
1363         if (!ret && pr->flags.power) {
1364                 acpi_processor_setup_cpuidle_dev(pr, dev);
1365                 ret = cpuidle_enable_device(dev);
1366         }
1367         cpuidle_resume_and_unlock();
1368
1369         return ret;
1370 }
1371
1372 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1373 {
1374         int cpu;
1375         struct acpi_processor *_pr;
1376         struct cpuidle_device *dev;
1377
1378         if (disabled_by_idle_boot_param())
1379                 return 0;
1380
1381         if (!pr->flags.power_setup_done)
1382                 return -ENODEV;
1383
1384         /*
1385          * FIXME:  Design the ACPI notification to make it once per
1386          * system instead of once per-cpu.  This condition is a hack
1387          * to make the code that updates C-States be called once.
1388          */
1389
1390         if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1391
1392                 /* Protect against cpu-hotplug */
1393                 get_online_cpus();
1394                 cpuidle_pause_and_lock();
1395
1396                 /* Disable all cpuidle devices */
1397                 for_each_online_cpu(cpu) {
1398                         _pr = per_cpu(processors, cpu);
1399                         if (!_pr || !_pr->flags.power_setup_done)
1400                                 continue;
1401                         dev = per_cpu(acpi_cpuidle_device, cpu);
1402                         cpuidle_disable_device(dev);
1403                 }
1404
1405                 /* Populate Updated C-state information */
1406                 acpi_processor_get_power_info(pr);
1407                 acpi_processor_setup_cpuidle_states(pr);
1408
1409                 /* Enable all cpuidle devices */
1410                 for_each_online_cpu(cpu) {
1411                         _pr = per_cpu(processors, cpu);
1412                         if (!_pr || !_pr->flags.power_setup_done)
1413                                 continue;
1414                         acpi_processor_get_power_info(_pr);
1415                         if (_pr->flags.power) {
1416                                 dev = per_cpu(acpi_cpuidle_device, cpu);
1417                                 acpi_processor_setup_cpuidle_dev(_pr, dev);
1418                                 cpuidle_enable_device(dev);
1419                         }
1420                 }
1421                 cpuidle_resume_and_unlock();
1422                 put_online_cpus();
1423         }
1424
1425         return 0;
1426 }
1427
1428 static int acpi_processor_registered;
1429
1430 int acpi_processor_power_init(struct acpi_processor *pr)
1431 {
1432         int retval;
1433         struct cpuidle_device *dev;
1434
1435         if (disabled_by_idle_boot_param())
1436                 return 0;
1437
1438         acpi_processor_cstate_first_run_checks();
1439
1440         if (!acpi_processor_get_power_info(pr))
1441                 pr->flags.power_setup_done = 1;
1442
1443         /*
1444          * Install the idle handler if processor power management is supported.
1445          * Note that we use previously set idle handler will be used on
1446          * platforms that only support C1.
1447          */
1448         if (pr->flags.power) {
1449                 /* Register acpi_idle_driver if not already registered */
1450                 if (!acpi_processor_registered) {
1451                         acpi_processor_setup_cpuidle_states(pr);
1452                         retval = cpuidle_register_driver(&acpi_idle_driver);
1453                         if (retval)
1454                                 return retval;
1455                         pr_debug("%s registered with cpuidle\n",
1456                                  acpi_idle_driver.name);
1457                 }
1458
1459                 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1460                 if (!dev)
1461                         return -ENOMEM;
1462                 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1463
1464                 acpi_processor_setup_cpuidle_dev(pr, dev);
1465
1466                 /* Register per-cpu cpuidle_device. Cpuidle driver
1467                  * must already be registered before registering device
1468                  */
1469                 retval = cpuidle_register_device(dev);
1470                 if (retval) {
1471                         if (acpi_processor_registered == 0)
1472                                 cpuidle_unregister_driver(&acpi_idle_driver);
1473                         return retval;
1474                 }
1475                 acpi_processor_registered++;
1476         }
1477         return 0;
1478 }
1479
1480 int acpi_processor_power_exit(struct acpi_processor *pr)
1481 {
1482         struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1483
1484         if (disabled_by_idle_boot_param())
1485                 return 0;
1486
1487         if (pr->flags.power) {
1488                 cpuidle_unregister_device(dev);
1489                 acpi_processor_registered--;
1490                 if (acpi_processor_registered == 0)
1491                         cpuidle_unregister_driver(&acpi_idle_driver);
1492         }
1493
1494         pr->flags.power_setup_done = 0;
1495         return 0;
1496 }