2 * Copyright (C) 2005 Intel Corporation
3 * Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
4 * - Added _PDC for SMP C-states on Intel CPUs
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/init.h>
10 #include <linux/acpi.h>
11 #include <linux/cpu.h>
12 #include <linux/sched.h>
14 #include <acpi/processor.h>
15 #include <asm/mwait.h>
16 #include <asm/special_insns.h>
19 * Initialize bm_flags based on the CPU cache properties
20 * On SMP it depends on cache configuration
21 * - When cache is not shared among all CPUs, we flush cache
23 * - When cache is shared among all CPUs, we use bm_check
24 * mechanism as in UP case
26 * This routine is called only after all the CPUs are online
28 void acpi_processor_power_init_bm_check(struct acpi_processor_flags *flags,
31 struct cpuinfo_x86 *c = &cpu_data(cpu);
34 if (num_online_cpus() == 1)
36 else if (c->x86_vendor == X86_VENDOR_INTEL) {
38 * Today all MP CPUs that support C3 share cache.
39 * And caches should not be flushed by software while
40 * entering C3 type state.
46 * On all recent Intel platforms, ARB_DISABLE is a nop.
47 * So, set bm_control to zero to indicate that ARB_DISABLE
48 * is not required while entering C3 type state on
49 * P4, Core and beyond CPUs
51 if (c->x86_vendor == X86_VENDOR_INTEL &&
52 (c->x86 > 0xf || (c->x86 == 6 && c->x86_model >= 0x0f)))
53 flags->bm_control = 0;
55 * For all recent Centaur CPUs, the ucode will make sure that each
56 * core can keep cache coherence with each other while entering C3
57 * type state. So, set bm_check to 1 to indicate that the kernel
58 * doesn't need to execute a cache flush operation (WBINVD) when
59 * entering C3 type state.
61 if (c->x86_vendor == X86_VENDOR_CENTAUR) {
62 if (c->x86 > 6 || (c->x86 == 6 && c->x86_model == 0x0f &&
63 c->x86_stepping >= 0x0e))
67 EXPORT_SYMBOL(acpi_processor_power_init_bm_check);
69 /* The code below handles cstate entry with monitor-mwait pair on Intel*/
75 } states[ACPI_PROCESSOR_MAX_POWER];
77 static struct cstate_entry __percpu *cpu_cstate_entry; /* per CPU ptr */
79 static short mwait_supported[ACPI_PROCESSOR_MAX_POWER];
81 #define NATIVE_CSTATE_BEYOND_HALT (2)
83 static long acpi_processor_ffh_cstate_probe_cpu(void *_cx)
85 struct acpi_processor_cx *cx = _cx;
87 unsigned int eax, ebx, ecx, edx;
88 unsigned int edx_part;
89 unsigned int cstate_type; /* C-state type and not ACPI C-state type */
90 unsigned int num_cstate_subtype;
92 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
94 /* Check whether this particular cx_type (in CST) is supported or not */
95 cstate_type = ((cx->address >> MWAIT_SUBSTATE_SIZE) &
96 MWAIT_CSTATE_MASK) + 1;
97 edx_part = edx >> (cstate_type * MWAIT_SUBSTATE_SIZE);
98 num_cstate_subtype = edx_part & MWAIT_SUBSTATE_MASK;
101 /* If the HW does not support any sub-states in this C-state */
102 if (num_cstate_subtype == 0) {
103 pr_warn(FW_BUG "ACPI MWAIT C-state 0x%x not supported by HW (0x%x)\n",
104 cx->address, edx_part);
109 /* mwait ecx extensions INTERRUPT_BREAK should be supported for C2/C3 */
110 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
111 !(ecx & CPUID5_ECX_INTERRUPT_BREAK)) {
116 if (!mwait_supported[cstate_type]) {
117 mwait_supported[cstate_type] = 1;
119 "Monitor-Mwait will be used to enter C-%d state\n",
123 ACPI_CX_DESC_LEN, "ACPI FFH MWAIT 0x%x",
129 int acpi_processor_ffh_cstate_probe(unsigned int cpu,
130 struct acpi_processor_cx *cx, struct acpi_power_register *reg)
132 struct cstate_entry *percpu_entry;
133 struct cpuinfo_x86 *c = &cpu_data(cpu);
136 if (!cpu_cstate_entry || c->cpuid_level < CPUID_MWAIT_LEAF)
139 if (reg->bit_offset != NATIVE_CSTATE_BEYOND_HALT)
142 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
143 percpu_entry->states[cx->index].eax = 0;
144 percpu_entry->states[cx->index].ecx = 0;
146 /* Make sure we are running on right CPU */
148 retval = work_on_cpu(cpu, acpi_processor_ffh_cstate_probe_cpu, cx);
150 /* Use the hint in CST */
151 percpu_entry->states[cx->index].eax = cx->address;
152 percpu_entry->states[cx->index].ecx = MWAIT_ECX_INTERRUPT_BREAK;
156 * For _CST FFH on Intel, if GAS.access_size bit 1 is cleared,
157 * then we should skip checking BM_STS for this C-state.
158 * ref: "Intel Processor Vendor-Specific ACPI Interface Specification"
160 if ((c->x86_vendor == X86_VENDOR_INTEL) && !(reg->access_size & 0x2))
165 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_probe);
167 void __cpuidle acpi_processor_ffh_cstate_enter(struct acpi_processor_cx *cx)
169 unsigned int cpu = smp_processor_id();
170 struct cstate_entry *percpu_entry;
172 percpu_entry = per_cpu_ptr(cpu_cstate_entry, cpu);
173 mwait_idle_with_hints(percpu_entry->states[cx->index].eax,
174 percpu_entry->states[cx->index].ecx);
176 EXPORT_SYMBOL_GPL(acpi_processor_ffh_cstate_enter);
178 static int __init ffh_cstate_init(void)
180 struct cpuinfo_x86 *c = &boot_cpu_data;
182 if (c->x86_vendor != X86_VENDOR_INTEL &&
183 c->x86_vendor != X86_VENDOR_AMD)
186 cpu_cstate_entry = alloc_percpu(struct cstate_entry);
190 static void __exit ffh_cstate_exit(void)
192 free_percpu(cpu_cstate_entry);
193 cpu_cstate_entry = NULL;
196 arch_initcall(ffh_cstate_init);
197 __exitcall(ffh_cstate_exit);