Merge tag 'pci-v4.16-changes' of git://git.kernel.org/pub/scm/linux/kernel/git/helgaa...
[sfrench/cifs-2.6.git] / drivers / acpi / processor_idle.c
1 /*
2  * processor_idle - idle state submodule to the ACPI processor driver
3  *
4  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
5  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
6  *  Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
7  *  Copyright (C) 2004  Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
8  *                      - Added processor hotplug support
9  *  Copyright (C) 2005  Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
10  *                      - Added support for C3 on SMP
11  *
12  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
13  *
14  *  This program is free software; you can redistribute it and/or modify
15  *  it under the terms of the GNU General Public License as published by
16  *  the Free Software Foundation; either version 2 of the License, or (at
17  *  your option) any later version.
18  *
19  *  This program is distributed in the hope that it will be useful, but
20  *  WITHOUT ANY WARRANTY; without even the implied warranty of
21  *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
22  *  General Public License for more details.
23  *
24  * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
25  */
26 #define pr_fmt(fmt) "ACPI: " fmt
27
28 #include <linux/module.h>
29 #include <linux/acpi.h>
30 #include <linux/dmi.h>
31 #include <linux/sched.h>       /* need_resched() */
32 #include <linux/tick.h>
33 #include <linux/cpuidle.h>
34 #include <linux/cpu.h>
35 #include <acpi/processor.h>
36
37 /*
38  * Include the apic definitions for x86 to have the APIC timer related defines
39  * available also for UP (on SMP it gets magically included via linux/smp.h).
40  * asm/acpi.h is not an option, as it would require more include magic. Also
41  * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
42  */
43 #ifdef CONFIG_X86
44 #include <asm/apic.h>
45 #endif
46
47 #define ACPI_PROCESSOR_CLASS            "processor"
48 #define _COMPONENT              ACPI_PROCESSOR_COMPONENT
49 ACPI_MODULE_NAME("processor_idle");
50
51 #define ACPI_IDLE_STATE_START   (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
52
53 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
54 module_param(max_cstate, uint, 0000);
55 static unsigned int nocst __read_mostly;
56 module_param(nocst, uint, 0000);
57 static int bm_check_disable __read_mostly;
58 module_param(bm_check_disable, uint, 0000);
59
60 static unsigned int latency_factor __read_mostly = 2;
61 module_param(latency_factor, uint, 0644);
62
63 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
64
65 struct cpuidle_driver acpi_idle_driver = {
66         .name =         "acpi_idle",
67         .owner =        THIS_MODULE,
68 };
69
70 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
71 static
72 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
73
74 static int disabled_by_idle_boot_param(void)
75 {
76         return boot_option_idle_override == IDLE_POLL ||
77                 boot_option_idle_override == IDLE_HALT;
78 }
79
80 /*
81  * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
82  * For now disable this. Probably a bug somewhere else.
83  *
84  * To skip this limit, boot/load with a large max_cstate limit.
85  */
86 static int set_max_cstate(const struct dmi_system_id *id)
87 {
88         if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
89                 return 0;
90
91         pr_notice("%s detected - limiting to C%ld max_cstate."
92                   " Override with \"processor.max_cstate=%d\"\n", id->ident,
93                   (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
94
95         max_cstate = (long)id->driver_data;
96
97         return 0;
98 }
99
100 static const struct dmi_system_id processor_power_dmi_table[] = {
101         { set_max_cstate, "Clevo 5600D", {
102           DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
103           DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
104          (void *)2},
105         { set_max_cstate, "Pavilion zv5000", {
106           DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
107           DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
108          (void *)1},
109         { set_max_cstate, "Asus L8400B", {
110           DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
111           DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
112          (void *)1},
113         {},
114 };
115
116
117 /*
118  * Callers should disable interrupts before the call and enable
119  * interrupts after return.
120  */
121 static void __cpuidle acpi_safe_halt(void)
122 {
123         if (!tif_need_resched()) {
124                 safe_halt();
125                 local_irq_disable();
126         }
127 }
128
129 #ifdef ARCH_APICTIMER_STOPS_ON_C3
130
131 /*
132  * Some BIOS implementations switch to C3 in the published C2 state.
133  * This seems to be a common problem on AMD boxen, but other vendors
134  * are affected too. We pick the most conservative approach: we assume
135  * that the local APIC stops in both C2 and C3.
136  */
137 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
138                                    struct acpi_processor_cx *cx)
139 {
140         struct acpi_processor_power *pwr = &pr->power;
141         u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
142
143         if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
144                 return;
145
146         if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
147                 type = ACPI_STATE_C1;
148
149         /*
150          * Check, if one of the previous states already marked the lapic
151          * unstable
152          */
153         if (pwr->timer_broadcast_on_state < state)
154                 return;
155
156         if (cx->type >= type)
157                 pr->power.timer_broadcast_on_state = state;
158 }
159
160 static void __lapic_timer_propagate_broadcast(void *arg)
161 {
162         struct acpi_processor *pr = (struct acpi_processor *) arg;
163
164         if (pr->power.timer_broadcast_on_state < INT_MAX)
165                 tick_broadcast_enable();
166         else
167                 tick_broadcast_disable();
168 }
169
170 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
171 {
172         smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
173                                  (void *)pr, 1);
174 }
175
176 /* Power(C) State timer broadcast control */
177 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
178                                        struct acpi_processor_cx *cx,
179                                        int broadcast)
180 {
181         int state = cx - pr->power.states;
182
183         if (state >= pr->power.timer_broadcast_on_state) {
184                 if (broadcast)
185                         tick_broadcast_enter();
186                 else
187                         tick_broadcast_exit();
188         }
189 }
190
191 #else
192
193 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
194                                    struct acpi_processor_cx *cstate) { }
195 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
196 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
197                                        struct acpi_processor_cx *cx,
198                                        int broadcast)
199 {
200 }
201
202 #endif
203
204 #if defined(CONFIG_X86)
205 static void tsc_check_state(int state)
206 {
207         switch (boot_cpu_data.x86_vendor) {
208         case X86_VENDOR_AMD:
209         case X86_VENDOR_INTEL:
210         case X86_VENDOR_CENTAUR:
211                 /*
212                  * AMD Fam10h TSC will tick in all
213                  * C/P/S0/S1 states when this bit is set.
214                  */
215                 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
216                         return;
217
218                 /*FALL THROUGH*/
219         default:
220                 /* TSC could halt in idle, so notify users */
221                 if (state > ACPI_STATE_C1)
222                         mark_tsc_unstable("TSC halts in idle");
223         }
224 }
225 #else
226 static void tsc_check_state(int state) { return; }
227 #endif
228
229 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
230 {
231
232         if (!pr->pblk)
233                 return -ENODEV;
234
235         /* if info is obtained from pblk/fadt, type equals state */
236         pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
237         pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
238
239 #ifndef CONFIG_HOTPLUG_CPU
240         /*
241          * Check for P_LVL2_UP flag before entering C2 and above on
242          * an SMP system.
243          */
244         if ((num_online_cpus() > 1) &&
245             !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
246                 return -ENODEV;
247 #endif
248
249         /* determine C2 and C3 address from pblk */
250         pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
251         pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
252
253         /* determine latencies from FADT */
254         pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
255         pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
256
257         /*
258          * FADT specified C2 latency must be less than or equal to
259          * 100 microseconds.
260          */
261         if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
262                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
263                         "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
264                 /* invalidate C2 */
265                 pr->power.states[ACPI_STATE_C2].address = 0;
266         }
267
268         /*
269          * FADT supplied C3 latency must be less than or equal to
270          * 1000 microseconds.
271          */
272         if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
273                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
274                         "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
275                 /* invalidate C3 */
276                 pr->power.states[ACPI_STATE_C3].address = 0;
277         }
278
279         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
280                           "lvl2[0x%08x] lvl3[0x%08x]\n",
281                           pr->power.states[ACPI_STATE_C2].address,
282                           pr->power.states[ACPI_STATE_C3].address));
283
284         return 0;
285 }
286
287 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
288 {
289         if (!pr->power.states[ACPI_STATE_C1].valid) {
290                 /* set the first C-State to C1 */
291                 /* all processors need to support C1 */
292                 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
293                 pr->power.states[ACPI_STATE_C1].valid = 1;
294                 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
295         }
296         /* the C0 state only exists as a filler in our array */
297         pr->power.states[ACPI_STATE_C0].valid = 1;
298         return 0;
299 }
300
301 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
302 {
303         acpi_status status;
304         u64 count;
305         int current_count;
306         int i, ret = 0;
307         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
308         union acpi_object *cst;
309
310         if (nocst)
311                 return -ENODEV;
312
313         current_count = 0;
314
315         status = acpi_evaluate_object(pr->handle, "_CST", NULL, &buffer);
316         if (ACPI_FAILURE(status)) {
317                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _CST, giving up\n"));
318                 return -ENODEV;
319         }
320
321         cst = buffer.pointer;
322
323         /* There must be at least 2 elements */
324         if (!cst || (cst->type != ACPI_TYPE_PACKAGE) || cst->package.count < 2) {
325                 pr_err("not enough elements in _CST\n");
326                 ret = -EFAULT;
327                 goto end;
328         }
329
330         count = cst->package.elements[0].integer.value;
331
332         /* Validate number of power states. */
333         if (count < 1 || count != cst->package.count - 1) {
334                 pr_err("count given by _CST is not valid\n");
335                 ret = -EFAULT;
336                 goto end;
337         }
338
339         /* Tell driver that at least _CST is supported. */
340         pr->flags.has_cst = 1;
341
342         for (i = 1; i <= count; i++) {
343                 union acpi_object *element;
344                 union acpi_object *obj;
345                 struct acpi_power_register *reg;
346                 struct acpi_processor_cx cx;
347
348                 memset(&cx, 0, sizeof(cx));
349
350                 element = &(cst->package.elements[i]);
351                 if (element->type != ACPI_TYPE_PACKAGE)
352                         continue;
353
354                 if (element->package.count != 4)
355                         continue;
356
357                 obj = &(element->package.elements[0]);
358
359                 if (obj->type != ACPI_TYPE_BUFFER)
360                         continue;
361
362                 reg = (struct acpi_power_register *)obj->buffer.pointer;
363
364                 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
365                     (reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE))
366                         continue;
367
368                 /* There should be an easy way to extract an integer... */
369                 obj = &(element->package.elements[1]);
370                 if (obj->type != ACPI_TYPE_INTEGER)
371                         continue;
372
373                 cx.type = obj->integer.value;
374                 /*
375                  * Some buggy BIOSes won't list C1 in _CST -
376                  * Let acpi_processor_get_power_info_default() handle them later
377                  */
378                 if (i == 1 && cx.type != ACPI_STATE_C1)
379                         current_count++;
380
381                 cx.address = reg->address;
382                 cx.index = current_count + 1;
383
384                 cx.entry_method = ACPI_CSTATE_SYSTEMIO;
385                 if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE) {
386                         if (acpi_processor_ffh_cstate_probe
387                                         (pr->id, &cx, reg) == 0) {
388                                 cx.entry_method = ACPI_CSTATE_FFH;
389                         } else if (cx.type == ACPI_STATE_C1) {
390                                 /*
391                                  * C1 is a special case where FIXED_HARDWARE
392                                  * can be handled in non-MWAIT way as well.
393                                  * In that case, save this _CST entry info.
394                                  * Otherwise, ignore this info and continue.
395                                  */
396                                 cx.entry_method = ACPI_CSTATE_HALT;
397                                 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
398                         } else {
399                                 continue;
400                         }
401                         if (cx.type == ACPI_STATE_C1 &&
402                             (boot_option_idle_override == IDLE_NOMWAIT)) {
403                                 /*
404                                  * In most cases the C1 space_id obtained from
405                                  * _CST object is FIXED_HARDWARE access mode.
406                                  * But when the option of idle=halt is added,
407                                  * the entry_method type should be changed from
408                                  * CSTATE_FFH to CSTATE_HALT.
409                                  * When the option of idle=nomwait is added,
410                                  * the C1 entry_method type should be
411                                  * CSTATE_HALT.
412                                  */
413                                 cx.entry_method = ACPI_CSTATE_HALT;
414                                 snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI HLT");
415                         }
416                 } else {
417                         snprintf(cx.desc, ACPI_CX_DESC_LEN, "ACPI IOPORT 0x%x",
418                                  cx.address);
419                 }
420
421                 if (cx.type == ACPI_STATE_C1) {
422                         cx.valid = 1;
423                 }
424
425                 obj = &(element->package.elements[2]);
426                 if (obj->type != ACPI_TYPE_INTEGER)
427                         continue;
428
429                 cx.latency = obj->integer.value;
430
431                 obj = &(element->package.elements[3]);
432                 if (obj->type != ACPI_TYPE_INTEGER)
433                         continue;
434
435                 current_count++;
436                 memcpy(&(pr->power.states[current_count]), &cx, sizeof(cx));
437
438                 /*
439                  * We support total ACPI_PROCESSOR_MAX_POWER - 1
440                  * (From 1 through ACPI_PROCESSOR_MAX_POWER - 1)
441                  */
442                 if (current_count >= (ACPI_PROCESSOR_MAX_POWER - 1)) {
443                         pr_warn("Limiting number of power states to max (%d)\n",
444                                 ACPI_PROCESSOR_MAX_POWER);
445                         pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
446                         break;
447                 }
448         }
449
450         ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Found %d power states\n",
451                           current_count));
452
453         /* Validate number of power states discovered */
454         if (current_count < 2)
455                 ret = -EFAULT;
456
457       end:
458         kfree(buffer.pointer);
459
460         return ret;
461 }
462
463 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
464                                            struct acpi_processor_cx *cx)
465 {
466         static int bm_check_flag = -1;
467         static int bm_control_flag = -1;
468
469
470         if (!cx->address)
471                 return;
472
473         /*
474          * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
475          * DMA transfers are used by any ISA device to avoid livelock.
476          * Note that we could disable Type-F DMA (as recommended by
477          * the erratum), but this is known to disrupt certain ISA
478          * devices thus we take the conservative approach.
479          */
480         else if (errata.piix4.fdma) {
481                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
482                                   "C3 not supported on PIIX4 with Type-F DMA\n"));
483                 return;
484         }
485
486         /* All the logic here assumes flags.bm_check is same across all CPUs */
487         if (bm_check_flag == -1) {
488                 /* Determine whether bm_check is needed based on CPU  */
489                 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
490                 bm_check_flag = pr->flags.bm_check;
491                 bm_control_flag = pr->flags.bm_control;
492         } else {
493                 pr->flags.bm_check = bm_check_flag;
494                 pr->flags.bm_control = bm_control_flag;
495         }
496
497         if (pr->flags.bm_check) {
498                 if (!pr->flags.bm_control) {
499                         if (pr->flags.has_cst != 1) {
500                                 /* bus mastering control is necessary */
501                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
502                                         "C3 support requires BM control\n"));
503                                 return;
504                         } else {
505                                 /* Here we enter C3 without bus mastering */
506                                 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
507                                         "C3 support without BM control\n"));
508                         }
509                 }
510         } else {
511                 /*
512                  * WBINVD should be set in fadt, for C3 state to be
513                  * supported on when bm_check is not required.
514                  */
515                 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
516                         ACPI_DEBUG_PRINT((ACPI_DB_INFO,
517                                           "Cache invalidation should work properly"
518                                           " for C3 to be enabled on SMP systems\n"));
519                         return;
520                 }
521         }
522
523         /*
524          * Otherwise we've met all of our C3 requirements.
525          * Normalize the C3 latency to expidite policy.  Enable
526          * checking of bus mastering status (bm_check) so we can
527          * use this in our C3 policy
528          */
529         cx->valid = 1;
530
531         /*
532          * On older chipsets, BM_RLD needs to be set
533          * in order for Bus Master activity to wake the
534          * system from C3.  Newer chipsets handle DMA
535          * during C3 automatically and BM_RLD is a NOP.
536          * In either case, the proper way to
537          * handle BM_RLD is to set it and leave it set.
538          */
539         acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
540
541         return;
542 }
543
544 static int acpi_processor_power_verify(struct acpi_processor *pr)
545 {
546         unsigned int i;
547         unsigned int working = 0;
548
549         pr->power.timer_broadcast_on_state = INT_MAX;
550
551         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
552                 struct acpi_processor_cx *cx = &pr->power.states[i];
553
554                 switch (cx->type) {
555                 case ACPI_STATE_C1:
556                         cx->valid = 1;
557                         break;
558
559                 case ACPI_STATE_C2:
560                         if (!cx->address)
561                                 break;
562                         cx->valid = 1;
563                         break;
564
565                 case ACPI_STATE_C3:
566                         acpi_processor_power_verify_c3(pr, cx);
567                         break;
568                 }
569                 if (!cx->valid)
570                         continue;
571
572                 lapic_timer_check_state(i, pr, cx);
573                 tsc_check_state(cx->type);
574                 working++;
575         }
576
577         lapic_timer_propagate_broadcast(pr);
578
579         return (working);
580 }
581
582 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
583 {
584         unsigned int i;
585         int result;
586
587
588         /* NOTE: the idle thread may not be running while calling
589          * this function */
590
591         /* Zero initialize all the C-states info. */
592         memset(pr->power.states, 0, sizeof(pr->power.states));
593
594         result = acpi_processor_get_power_info_cst(pr);
595         if (result == -ENODEV)
596                 result = acpi_processor_get_power_info_fadt(pr);
597
598         if (result)
599                 return result;
600
601         acpi_processor_get_power_info_default(pr);
602
603         pr->power.count = acpi_processor_power_verify(pr);
604
605         /*
606          * if one state of type C2 or C3 is available, mark this
607          * CPU as being "idle manageable"
608          */
609         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
610                 if (pr->power.states[i].valid) {
611                         pr->power.count = i;
612                         if (pr->power.states[i].type >= ACPI_STATE_C2)
613                                 pr->flags.power = 1;
614                 }
615         }
616
617         return 0;
618 }
619
620 /**
621  * acpi_idle_bm_check - checks if bus master activity was detected
622  */
623 static int acpi_idle_bm_check(void)
624 {
625         u32 bm_status = 0;
626
627         if (bm_check_disable)
628                 return 0;
629
630         acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
631         if (bm_status)
632                 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
633         /*
634          * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
635          * the true state of bus mastering activity; forcing us to
636          * manually check the BMIDEA bit of each IDE channel.
637          */
638         else if (errata.piix4.bmisx) {
639                 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
640                     || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
641                         bm_status = 1;
642         }
643         return bm_status;
644 }
645
646 /**
647  * acpi_idle_do_entry - enter idle state using the appropriate method
648  * @cx: cstate data
649  *
650  * Caller disables interrupt before call and enables interrupt after return.
651  */
652 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
653 {
654         if (cx->entry_method == ACPI_CSTATE_FFH) {
655                 /* Call into architectural FFH based C-state */
656                 acpi_processor_ffh_cstate_enter(cx);
657         } else if (cx->entry_method == ACPI_CSTATE_HALT) {
658                 acpi_safe_halt();
659         } else {
660                 /* IO port based C-state */
661                 inb(cx->address);
662                 /* Dummy wait op - must do something useless after P_LVL2 read
663                    because chipsets cannot guarantee that STPCLK# signal
664                    gets asserted in time to freeze execution properly. */
665                 inl(acpi_gbl_FADT.xpm_timer_block.address);
666         }
667 }
668
669 /**
670  * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
671  * @dev: the target CPU
672  * @index: the index of suggested state
673  */
674 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
675 {
676         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
677
678         ACPI_FLUSH_CPU_CACHE();
679
680         while (1) {
681
682                 if (cx->entry_method == ACPI_CSTATE_HALT)
683                         safe_halt();
684                 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
685                         inb(cx->address);
686                         /* See comment in acpi_idle_do_entry() */
687                         inl(acpi_gbl_FADT.xpm_timer_block.address);
688                 } else
689                         return -ENODEV;
690         }
691
692         /* Never reached */
693         return 0;
694 }
695
696 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
697 {
698         return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
699                 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
700 }
701
702 static int c3_cpu_count;
703 static DEFINE_RAW_SPINLOCK(c3_lock);
704
705 /**
706  * acpi_idle_enter_bm - enters C3 with proper BM handling
707  * @pr: Target processor
708  * @cx: Target state context
709  * @timer_bc: Whether or not to change timer mode to broadcast
710  */
711 static void acpi_idle_enter_bm(struct acpi_processor *pr,
712                                struct acpi_processor_cx *cx, bool timer_bc)
713 {
714         acpi_unlazy_tlb(smp_processor_id());
715
716         /*
717          * Must be done before busmaster disable as we might need to
718          * access HPET !
719          */
720         if (timer_bc)
721                 lapic_timer_state_broadcast(pr, cx, 1);
722
723         /*
724          * disable bus master
725          * bm_check implies we need ARB_DIS
726          * bm_control implies whether we can do ARB_DIS
727          *
728          * That leaves a case where bm_check is set and bm_control is
729          * not set. In that case we cannot do much, we enter C3
730          * without doing anything.
731          */
732         if (pr->flags.bm_control) {
733                 raw_spin_lock(&c3_lock);
734                 c3_cpu_count++;
735                 /* Disable bus master arbitration when all CPUs are in C3 */
736                 if (c3_cpu_count == num_online_cpus())
737                         acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
738                 raw_spin_unlock(&c3_lock);
739         }
740
741         acpi_idle_do_entry(cx);
742
743         /* Re-enable bus master arbitration */
744         if (pr->flags.bm_control) {
745                 raw_spin_lock(&c3_lock);
746                 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
747                 c3_cpu_count--;
748                 raw_spin_unlock(&c3_lock);
749         }
750
751         if (timer_bc)
752                 lapic_timer_state_broadcast(pr, cx, 0);
753 }
754
755 static int acpi_idle_enter(struct cpuidle_device *dev,
756                            struct cpuidle_driver *drv, int index)
757 {
758         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
759         struct acpi_processor *pr;
760
761         pr = __this_cpu_read(processors);
762         if (unlikely(!pr))
763                 return -EINVAL;
764
765         if (cx->type != ACPI_STATE_C1) {
766                 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
767                         index = ACPI_IDLE_STATE_START;
768                         cx = per_cpu(acpi_cstate[index], dev->cpu);
769                 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
770                         if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
771                                 acpi_idle_enter_bm(pr, cx, true);
772                                 return index;
773                         } else if (drv->safe_state_index >= 0) {
774                                 index = drv->safe_state_index;
775                                 cx = per_cpu(acpi_cstate[index], dev->cpu);
776                         } else {
777                                 acpi_safe_halt();
778                                 return -EBUSY;
779                         }
780                 }
781         }
782
783         lapic_timer_state_broadcast(pr, cx, 1);
784
785         if (cx->type == ACPI_STATE_C3)
786                 ACPI_FLUSH_CPU_CACHE();
787
788         acpi_idle_do_entry(cx);
789
790         lapic_timer_state_broadcast(pr, cx, 0);
791
792         return index;
793 }
794
795 static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
796                                    struct cpuidle_driver *drv, int index)
797 {
798         struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
799
800         if (cx->type == ACPI_STATE_C3) {
801                 struct acpi_processor *pr = __this_cpu_read(processors);
802
803                 if (unlikely(!pr))
804                         return;
805
806                 if (pr->flags.bm_check) {
807                         acpi_idle_enter_bm(pr, cx, false);
808                         return;
809                 } else {
810                         ACPI_FLUSH_CPU_CACHE();
811                 }
812         }
813         acpi_idle_do_entry(cx);
814 }
815
816 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
817                                            struct cpuidle_device *dev)
818 {
819         int i, count = ACPI_IDLE_STATE_START;
820         struct acpi_processor_cx *cx;
821
822         if (max_cstate == 0)
823                 max_cstate = 1;
824
825         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
826                 cx = &pr->power.states[i];
827
828                 if (!cx->valid)
829                         continue;
830
831                 per_cpu(acpi_cstate[count], dev->cpu) = cx;
832
833                 count++;
834                 if (count == CPUIDLE_STATE_MAX)
835                         break;
836         }
837
838         if (!count)
839                 return -EINVAL;
840
841         return 0;
842 }
843
844 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
845 {
846         int i, count;
847         struct acpi_processor_cx *cx;
848         struct cpuidle_state *state;
849         struct cpuidle_driver *drv = &acpi_idle_driver;
850
851         if (max_cstate == 0)
852                 max_cstate = 1;
853
854         if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
855                 cpuidle_poll_state_init(drv);
856                 count = 1;
857         } else {
858                 count = 0;
859         }
860
861         for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
862                 cx = &pr->power.states[i];
863
864                 if (!cx->valid)
865                         continue;
866
867                 state = &drv->states[count];
868                 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
869                 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
870                 state->exit_latency = cx->latency;
871                 state->target_residency = cx->latency * latency_factor;
872                 state->enter = acpi_idle_enter;
873
874                 state->flags = 0;
875                 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
876                         state->enter_dead = acpi_idle_play_dead;
877                         drv->safe_state_index = count;
878                 }
879                 /*
880                  * Halt-induced C1 is not good for ->enter_s2idle, because it
881                  * re-enables interrupts on exit.  Moreover, C1 is generally not
882                  * particularly interesting from the suspend-to-idle angle, so
883                  * avoid C1 and the situations in which we may need to fall back
884                  * to it altogether.
885                  */
886                 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
887                         state->enter_s2idle = acpi_idle_enter_s2idle;
888
889                 count++;
890                 if (count == CPUIDLE_STATE_MAX)
891                         break;
892         }
893
894         drv->state_count = count;
895
896         if (!count)
897                 return -EINVAL;
898
899         return 0;
900 }
901
902 static inline void acpi_processor_cstate_first_run_checks(void)
903 {
904         acpi_status status;
905         static int first_run;
906
907         if (first_run)
908                 return;
909         dmi_check_system(processor_power_dmi_table);
910         max_cstate = acpi_processor_cstate_check(max_cstate);
911         if (max_cstate < ACPI_C_STATES_MAX)
912                 pr_notice("ACPI: processor limited to max C-state %d\n",
913                           max_cstate);
914         first_run++;
915
916         if (acpi_gbl_FADT.cst_control && !nocst) {
917                 status = acpi_os_write_port(acpi_gbl_FADT.smi_command,
918                                             acpi_gbl_FADT.cst_control, 8);
919                 if (ACPI_FAILURE(status))
920                         ACPI_EXCEPTION((AE_INFO, status,
921                                         "Notifying BIOS of _CST ability failed"));
922         }
923 }
924 #else
925
926 static inline int disabled_by_idle_boot_param(void) { return 0; }
927 static inline void acpi_processor_cstate_first_run_checks(void) { }
928 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
929 {
930         return -ENODEV;
931 }
932
933 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
934                                            struct cpuidle_device *dev)
935 {
936         return -EINVAL;
937 }
938
939 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
940 {
941         return -EINVAL;
942 }
943
944 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
945
946 struct acpi_lpi_states_array {
947         unsigned int size;
948         unsigned int composite_states_size;
949         struct acpi_lpi_state *entries;
950         struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
951 };
952
953 static int obj_get_integer(union acpi_object *obj, u32 *value)
954 {
955         if (obj->type != ACPI_TYPE_INTEGER)
956                 return -EINVAL;
957
958         *value = obj->integer.value;
959         return 0;
960 }
961
962 static int acpi_processor_evaluate_lpi(acpi_handle handle,
963                                        struct acpi_lpi_states_array *info)
964 {
965         acpi_status status;
966         int ret = 0;
967         int pkg_count, state_idx = 1, loop;
968         struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
969         union acpi_object *lpi_data;
970         struct acpi_lpi_state *lpi_state;
971
972         status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
973         if (ACPI_FAILURE(status)) {
974                 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
975                 return -ENODEV;
976         }
977
978         lpi_data = buffer.pointer;
979
980         /* There must be at least 4 elements = 3 elements + 1 package */
981         if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
982             lpi_data->package.count < 4) {
983                 pr_debug("not enough elements in _LPI\n");
984                 ret = -ENODATA;
985                 goto end;
986         }
987
988         pkg_count = lpi_data->package.elements[2].integer.value;
989
990         /* Validate number of power states. */
991         if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
992                 pr_debug("count given by _LPI is not valid\n");
993                 ret = -ENODATA;
994                 goto end;
995         }
996
997         lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
998         if (!lpi_state) {
999                 ret = -ENOMEM;
1000                 goto end;
1001         }
1002
1003         info->size = pkg_count;
1004         info->entries = lpi_state;
1005
1006         /* LPI States start at index 3 */
1007         for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
1008                 union acpi_object *element, *pkg_elem, *obj;
1009
1010                 element = &lpi_data->package.elements[loop];
1011                 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
1012                         continue;
1013
1014                 pkg_elem = element->package.elements;
1015
1016                 obj = pkg_elem + 6;
1017                 if (obj->type == ACPI_TYPE_BUFFER) {
1018                         struct acpi_power_register *reg;
1019
1020                         reg = (struct acpi_power_register *)obj->buffer.pointer;
1021                         if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
1022                             reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
1023                                 continue;
1024
1025                         lpi_state->address = reg->address;
1026                         lpi_state->entry_method =
1027                                 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
1028                                 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
1029                 } else if (obj->type == ACPI_TYPE_INTEGER) {
1030                         lpi_state->entry_method = ACPI_CSTATE_INTEGER;
1031                         lpi_state->address = obj->integer.value;
1032                 } else {
1033                         continue;
1034                 }
1035
1036                 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
1037
1038                 obj = pkg_elem + 9;
1039                 if (obj->type == ACPI_TYPE_STRING)
1040                         strlcpy(lpi_state->desc, obj->string.pointer,
1041                                 ACPI_CX_DESC_LEN);
1042
1043                 lpi_state->index = state_idx;
1044                 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
1045                         pr_debug("No min. residency found, assuming 10 us\n");
1046                         lpi_state->min_residency = 10;
1047                 }
1048
1049                 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
1050                         pr_debug("No wakeup residency found, assuming 10 us\n");
1051                         lpi_state->wake_latency = 10;
1052                 }
1053
1054                 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
1055                         lpi_state->flags = 0;
1056
1057                 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
1058                         lpi_state->arch_flags = 0;
1059
1060                 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
1061                         lpi_state->res_cnt_freq = 1;
1062
1063                 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
1064                         lpi_state->enable_parent_state = 0;
1065         }
1066
1067         acpi_handle_debug(handle, "Found %d power states\n", state_idx);
1068 end:
1069         kfree(buffer.pointer);
1070         return ret;
1071 }
1072
1073 /*
1074  * flat_state_cnt - the number of composite LPI states after the process of flattening
1075  */
1076 static int flat_state_cnt;
1077
1078 /**
1079  * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
1080  *
1081  * @local: local LPI state
1082  * @parent: parent LPI state
1083  * @result: composite LPI state
1084  */
1085 static bool combine_lpi_states(struct acpi_lpi_state *local,
1086                                struct acpi_lpi_state *parent,
1087                                struct acpi_lpi_state *result)
1088 {
1089         if (parent->entry_method == ACPI_CSTATE_INTEGER) {
1090                 if (!parent->address) /* 0 means autopromotable */
1091                         return false;
1092                 result->address = local->address + parent->address;
1093         } else {
1094                 result->address = parent->address;
1095         }
1096
1097         result->min_residency = max(local->min_residency, parent->min_residency);
1098         result->wake_latency = local->wake_latency + parent->wake_latency;
1099         result->enable_parent_state = parent->enable_parent_state;
1100         result->entry_method = local->entry_method;
1101
1102         result->flags = parent->flags;
1103         result->arch_flags = parent->arch_flags;
1104         result->index = parent->index;
1105
1106         strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
1107         strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
1108         strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
1109         return true;
1110 }
1111
1112 #define ACPI_LPI_STATE_FLAGS_ENABLED                    BIT(0)
1113
1114 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
1115                                   struct acpi_lpi_state *t)
1116 {
1117         curr_level->composite_states[curr_level->composite_states_size++] = t;
1118 }
1119
1120 static int flatten_lpi_states(struct acpi_processor *pr,
1121                               struct acpi_lpi_states_array *curr_level,
1122                               struct acpi_lpi_states_array *prev_level)
1123 {
1124         int i, j, state_count = curr_level->size;
1125         struct acpi_lpi_state *p, *t = curr_level->entries;
1126
1127         curr_level->composite_states_size = 0;
1128         for (j = 0; j < state_count; j++, t++) {
1129                 struct acpi_lpi_state *flpi;
1130
1131                 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
1132                         continue;
1133
1134                 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
1135                         pr_warn("Limiting number of LPI states to max (%d)\n",
1136                                 ACPI_PROCESSOR_MAX_POWER);
1137                         pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1138                         break;
1139                 }
1140
1141                 flpi = &pr->power.lpi_states[flat_state_cnt];
1142
1143                 if (!prev_level) { /* leaf/processor node */
1144                         memcpy(flpi, t, sizeof(*t));
1145                         stash_composite_state(curr_level, flpi);
1146                         flat_state_cnt++;
1147                         continue;
1148                 }
1149
1150                 for (i = 0; i < prev_level->composite_states_size; i++) {
1151                         p = prev_level->composite_states[i];
1152                         if (t->index <= p->enable_parent_state &&
1153                             combine_lpi_states(p, t, flpi)) {
1154                                 stash_composite_state(curr_level, flpi);
1155                                 flat_state_cnt++;
1156                                 flpi++;
1157                         }
1158                 }
1159         }
1160
1161         kfree(curr_level->entries);
1162         return 0;
1163 }
1164
1165 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1166 {
1167         int ret, i;
1168         acpi_status status;
1169         acpi_handle handle = pr->handle, pr_ahandle;
1170         struct acpi_device *d = NULL;
1171         struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1172
1173         if (!osc_pc_lpi_support_confirmed)
1174                 return -EOPNOTSUPP;
1175
1176         if (!acpi_has_method(handle, "_LPI"))
1177                 return -EINVAL;
1178
1179         flat_state_cnt = 0;
1180         prev = &info[0];
1181         curr = &info[1];
1182         handle = pr->handle;
1183         ret = acpi_processor_evaluate_lpi(handle, prev);
1184         if (ret)
1185                 return ret;
1186         flatten_lpi_states(pr, prev, NULL);
1187
1188         status = acpi_get_parent(handle, &pr_ahandle);
1189         while (ACPI_SUCCESS(status)) {
1190                 acpi_bus_get_device(pr_ahandle, &d);
1191                 handle = pr_ahandle;
1192
1193                 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1194                         break;
1195
1196                 /* can be optional ? */
1197                 if (!acpi_has_method(handle, "_LPI"))
1198                         break;
1199
1200                 ret = acpi_processor_evaluate_lpi(handle, curr);
1201                 if (ret)
1202                         break;
1203
1204                 /* flatten all the LPI states in this level of hierarchy */
1205                 flatten_lpi_states(pr, curr, prev);
1206
1207                 tmp = prev, prev = curr, curr = tmp;
1208
1209                 status = acpi_get_parent(handle, &pr_ahandle);
1210         }
1211
1212         pr->power.count = flat_state_cnt;
1213         /* reset the index after flattening */
1214         for (i = 0; i < pr->power.count; i++)
1215                 pr->power.lpi_states[i].index = i;
1216
1217         /* Tell driver that _LPI is supported. */
1218         pr->flags.has_lpi = 1;
1219         pr->flags.power = 1;
1220
1221         return 0;
1222 }
1223
1224 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1225 {
1226         return -ENODEV;
1227 }
1228
1229 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1230 {
1231         return -ENODEV;
1232 }
1233
1234 /**
1235  * acpi_idle_lpi_enter - enters an ACPI any LPI state
1236  * @dev: the target CPU
1237  * @drv: cpuidle driver containing cpuidle state info
1238  * @index: index of target state
1239  *
1240  * Return: 0 for success or negative value for error
1241  */
1242 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1243                                struct cpuidle_driver *drv, int index)
1244 {
1245         struct acpi_processor *pr;
1246         struct acpi_lpi_state *lpi;
1247
1248         pr = __this_cpu_read(processors);
1249
1250         if (unlikely(!pr))
1251                 return -EINVAL;
1252
1253         lpi = &pr->power.lpi_states[index];
1254         if (lpi->entry_method == ACPI_CSTATE_FFH)
1255                 return acpi_processor_ffh_lpi_enter(lpi);
1256
1257         return -EINVAL;
1258 }
1259
1260 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1261 {
1262         int i;
1263         struct acpi_lpi_state *lpi;
1264         struct cpuidle_state *state;
1265         struct cpuidle_driver *drv = &acpi_idle_driver;
1266
1267         if (!pr->flags.has_lpi)
1268                 return -EOPNOTSUPP;
1269
1270         for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1271                 lpi = &pr->power.lpi_states[i];
1272
1273                 state = &drv->states[i];
1274                 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1275                 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1276                 state->exit_latency = lpi->wake_latency;
1277                 state->target_residency = lpi->min_residency;
1278                 if (lpi->arch_flags)
1279                         state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1280                 state->enter = acpi_idle_lpi_enter;
1281                 drv->safe_state_index = i;
1282         }
1283
1284         drv->state_count = i;
1285
1286         return 0;
1287 }
1288
1289 /**
1290  * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1291  * global state data i.e. idle routines
1292  *
1293  * @pr: the ACPI processor
1294  */
1295 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1296 {
1297         int i;
1298         struct cpuidle_driver *drv = &acpi_idle_driver;
1299
1300         if (!pr->flags.power_setup_done || !pr->flags.power)
1301                 return -EINVAL;
1302
1303         drv->safe_state_index = -1;
1304         for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1305                 drv->states[i].name[0] = '\0';
1306                 drv->states[i].desc[0] = '\0';
1307         }
1308
1309         if (pr->flags.has_lpi)
1310                 return acpi_processor_setup_lpi_states(pr);
1311
1312         return acpi_processor_setup_cstates(pr);
1313 }
1314
1315 /**
1316  * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1317  * device i.e. per-cpu data
1318  *
1319  * @pr: the ACPI processor
1320  * @dev : the cpuidle device
1321  */
1322 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1323                                             struct cpuidle_device *dev)
1324 {
1325         if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1326                 return -EINVAL;
1327
1328         dev->cpu = pr->id;
1329         if (pr->flags.has_lpi)
1330                 return acpi_processor_ffh_lpi_probe(pr->id);
1331
1332         return acpi_processor_setup_cpuidle_cx(pr, dev);
1333 }
1334
1335 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1336 {
1337         int ret;
1338
1339         ret = acpi_processor_get_lpi_info(pr);
1340         if (ret)
1341                 ret = acpi_processor_get_cstate_info(pr);
1342
1343         return ret;
1344 }
1345
1346 int acpi_processor_hotplug(struct acpi_processor *pr)
1347 {
1348         int ret = 0;
1349         struct cpuidle_device *dev;
1350
1351         if (disabled_by_idle_boot_param())
1352                 return 0;
1353
1354         if (!pr->flags.power_setup_done)
1355                 return -ENODEV;
1356
1357         dev = per_cpu(acpi_cpuidle_device, pr->id);
1358         cpuidle_pause_and_lock();
1359         cpuidle_disable_device(dev);
1360         ret = acpi_processor_get_power_info(pr);
1361         if (!ret && pr->flags.power) {
1362                 acpi_processor_setup_cpuidle_dev(pr, dev);
1363                 ret = cpuidle_enable_device(dev);
1364         }
1365         cpuidle_resume_and_unlock();
1366
1367         return ret;
1368 }
1369
1370 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1371 {
1372         int cpu;
1373         struct acpi_processor *_pr;
1374         struct cpuidle_device *dev;
1375
1376         if (disabled_by_idle_boot_param())
1377                 return 0;
1378
1379         if (!pr->flags.power_setup_done)
1380                 return -ENODEV;
1381
1382         /*
1383          * FIXME:  Design the ACPI notification to make it once per
1384          * system instead of once per-cpu.  This condition is a hack
1385          * to make the code that updates C-States be called once.
1386          */
1387
1388         if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1389
1390                 /* Protect against cpu-hotplug */
1391                 get_online_cpus();
1392                 cpuidle_pause_and_lock();
1393
1394                 /* Disable all cpuidle devices */
1395                 for_each_online_cpu(cpu) {
1396                         _pr = per_cpu(processors, cpu);
1397                         if (!_pr || !_pr->flags.power_setup_done)
1398                                 continue;
1399                         dev = per_cpu(acpi_cpuidle_device, cpu);
1400                         cpuidle_disable_device(dev);
1401                 }
1402
1403                 /* Populate Updated C-state information */
1404                 acpi_processor_get_power_info(pr);
1405                 acpi_processor_setup_cpuidle_states(pr);
1406
1407                 /* Enable all cpuidle devices */
1408                 for_each_online_cpu(cpu) {
1409                         _pr = per_cpu(processors, cpu);
1410                         if (!_pr || !_pr->flags.power_setup_done)
1411                                 continue;
1412                         acpi_processor_get_power_info(_pr);
1413                         if (_pr->flags.power) {
1414                                 dev = per_cpu(acpi_cpuidle_device, cpu);
1415                                 acpi_processor_setup_cpuidle_dev(_pr, dev);
1416                                 cpuidle_enable_device(dev);
1417                         }
1418                 }
1419                 cpuidle_resume_and_unlock();
1420                 put_online_cpus();
1421         }
1422
1423         return 0;
1424 }
1425
1426 static int acpi_processor_registered;
1427
1428 int acpi_processor_power_init(struct acpi_processor *pr)
1429 {
1430         int retval;
1431         struct cpuidle_device *dev;
1432
1433         if (disabled_by_idle_boot_param())
1434                 return 0;
1435
1436         acpi_processor_cstate_first_run_checks();
1437
1438         if (!acpi_processor_get_power_info(pr))
1439                 pr->flags.power_setup_done = 1;
1440
1441         /*
1442          * Install the idle handler if processor power management is supported.
1443          * Note that we use previously set idle handler will be used on
1444          * platforms that only support C1.
1445          */
1446         if (pr->flags.power) {
1447                 /* Register acpi_idle_driver if not already registered */
1448                 if (!acpi_processor_registered) {
1449                         acpi_processor_setup_cpuidle_states(pr);
1450                         retval = cpuidle_register_driver(&acpi_idle_driver);
1451                         if (retval)
1452                                 return retval;
1453                         pr_debug("%s registered with cpuidle\n",
1454                                  acpi_idle_driver.name);
1455                 }
1456
1457                 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1458                 if (!dev)
1459                         return -ENOMEM;
1460                 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1461
1462                 acpi_processor_setup_cpuidle_dev(pr, dev);
1463
1464                 /* Register per-cpu cpuidle_device. Cpuidle driver
1465                  * must already be registered before registering device
1466                  */
1467                 retval = cpuidle_register_device(dev);
1468                 if (retval) {
1469                         if (acpi_processor_registered == 0)
1470                                 cpuidle_unregister_driver(&acpi_idle_driver);
1471                         return retval;
1472                 }
1473                 acpi_processor_registered++;
1474         }
1475         return 0;
1476 }
1477
1478 int acpi_processor_power_exit(struct acpi_processor *pr)
1479 {
1480         struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1481
1482         if (disabled_by_idle_boot_param())
1483                 return 0;
1484
1485         if (pr->flags.power) {
1486                 cpuidle_unregister_device(dev);
1487                 acpi_processor_registered--;
1488                 if (acpi_processor_registered == 0)
1489                         cpuidle_unregister_driver(&acpi_idle_driver);
1490         }
1491
1492         pr->flags.power_setup_done = 0;
1493         return 0;
1494 }