2 * Routines to indentify caches on Intel CPU.
5 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
6 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
7 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
10 #include <linux/init.h>
11 #include <linux/slab.h>
12 #include <linux/device.h>
13 #include <linux/compiler.h>
14 #include <linux/cpu.h>
15 #include <linux/sched.h>
16 #include <linux/pci.h>
18 #include <asm/processor.h>
19 #include <linux/smp.h>
29 unsigned char descriptor;
34 /* All the cache descriptor types we care about (no TLB or
35 trace cache entries) */
37 static const struct _cache_table __cpuinitconst cache_table[] =
39 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
40 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
41 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
42 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
43 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
45 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
46 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
47 { 0x23, LVL_3, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
48 { 0x25, LVL_3, 2048 }, /* 8-way set assoc, sectored cache, 64 byte line size */
49 { 0x29, LVL_3, 4096 }, /* 8-way set assoc, sectored cache, 64 byte line size */
50 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
51 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
52 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
53 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
54 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
55 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
56 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
57 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
58 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
59 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
60 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
61 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
62 { 0x44, LVL_2, 1024 }, /* 4-way set assoc, 32 byte line size */
63 { 0x45, LVL_2, 2048 }, /* 4-way set assoc, 32 byte line size */
64 { 0x46, LVL_3, 4096 }, /* 4-way set assoc, 64 byte line size */
65 { 0x47, LVL_3, 8192 }, /* 8-way set assoc, 64 byte line size */
66 { 0x49, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
67 { 0x4a, LVL_3, 6144 }, /* 12-way set assoc, 64 byte line size */
68 { 0x4b, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
69 { 0x4c, LVL_3, 12288 }, /* 12-way set assoc, 64 byte line size */
70 { 0x4d, LVL_3, 16384 }, /* 16-way set assoc, 64 byte line size */
71 { 0x4e, LVL_2, 6144 }, /* 24-way set assoc, 64 byte line size */
72 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
73 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
74 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
75 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
76 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
77 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
78 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
79 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
80 { 0x78, LVL_2, 1024 }, /* 4-way set assoc, 64 byte line size */
81 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
82 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
83 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
84 { 0x7c, LVL_2, 1024 }, /* 8-way set assoc, sectored cache, 64 byte line size */
85 { 0x7d, LVL_2, 2048 }, /* 8-way set assoc, 64 byte line size */
86 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
87 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
88 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
89 { 0x84, LVL_2, 1024 }, /* 8-way set assoc, 32 byte line size */
90 { 0x85, LVL_2, 2048 }, /* 8-way set assoc, 32 byte line size */
91 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
92 { 0x87, LVL_2, 1024 }, /* 8-way set assoc, 64 byte line size */
93 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
94 { 0xd1, LVL_3, 1024 }, /* 4-way set assoc, 64 byte line size */
95 { 0xd2, LVL_3, 2048 }, /* 4-way set assoc, 64 byte line size */
96 { 0xd6, LVL_3, 1024 }, /* 8-way set assoc, 64 byte line size */
97 { 0xd7, LVL_3, 2038 }, /* 8-way set assoc, 64 byte line size */
98 { 0xd8, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
99 { 0xdc, LVL_3, 2048 }, /* 12-way set assoc, 64 byte line size */
100 { 0xdd, LVL_3, 4096 }, /* 12-way set assoc, 64 byte line size */
101 { 0xde, LVL_3, 8192 }, /* 12-way set assoc, 64 byte line size */
102 { 0xe2, LVL_3, 2048 }, /* 16-way set assoc, 64 byte line size */
103 { 0xe3, LVL_3, 4096 }, /* 16-way set assoc, 64 byte line size */
104 { 0xe4, LVL_3, 8192 }, /* 16-way set assoc, 64 byte line size */
113 CACHE_TYPE_UNIFIED = 3
116 union _cpuid4_leaf_eax {
118 enum _cache_type type:5;
119 unsigned int level:3;
120 unsigned int is_self_initializing:1;
121 unsigned int is_fully_associative:1;
122 unsigned int reserved:4;
123 unsigned int num_threads_sharing:12;
124 unsigned int num_cores_on_die:6;
129 union _cpuid4_leaf_ebx {
131 unsigned int coherency_line_size:12;
132 unsigned int physical_line_partition:10;
133 unsigned int ways_of_associativity:10;
138 union _cpuid4_leaf_ecx {
140 unsigned int number_of_sets:32;
145 struct _cpuid4_info {
146 union _cpuid4_leaf_eax eax;
147 union _cpuid4_leaf_ebx ebx;
148 union _cpuid4_leaf_ecx ecx;
150 unsigned long can_disable;
151 DECLARE_BITMAP(shared_cpu_map, NR_CPUS);
154 /* subset of above _cpuid4_info w/o shared_cpu_map */
155 struct _cpuid4_info_regs {
156 union _cpuid4_leaf_eax eax;
157 union _cpuid4_leaf_ebx ebx;
158 union _cpuid4_leaf_ecx ecx;
160 unsigned long can_disable;
163 unsigned short num_cache_leaves;
165 /* AMD doesn't have CPUID4. Emulate it here to report the same
166 information to the user. This makes some assumptions about the machine:
167 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
169 In theory the TLBs could be reported as fake type (they are in "dummy").
173 unsigned line_size:8;
174 unsigned lines_per_tag:8;
176 unsigned size_in_kb:8;
183 unsigned line_size:8;
184 unsigned lines_per_tag:4;
186 unsigned size_in_kb:16;
193 unsigned line_size:8;
194 unsigned lines_per_tag:4;
197 unsigned size_encoded:14;
202 static const unsigned short __cpuinitconst assocs[] = {
213 [0xf] = 0xffff /* fully associative - no way to show this currently */
216 static const unsigned char __cpuinitconst levels[] = { 1, 1, 2, 3 };
217 static const unsigned char __cpuinitconst types[] = { 1, 2, 3, 3 };
219 static void __cpuinit
220 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
221 union _cpuid4_leaf_ebx *ebx,
222 union _cpuid4_leaf_ecx *ecx)
225 unsigned line_size, lines_per_tag, assoc, size_in_kb;
226 union l1_cache l1i, l1d;
229 union l1_cache *l1 = &l1d;
235 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
236 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
244 assoc = assocs[l1->assoc];
245 line_size = l1->line_size;
246 lines_per_tag = l1->lines_per_tag;
247 size_in_kb = l1->size_in_kb;
252 assoc = assocs[l2.assoc];
253 line_size = l2.line_size;
254 lines_per_tag = l2.lines_per_tag;
255 /* cpu_data has errata corrections for K7 applied */
256 size_in_kb = current_cpu_data.x86_cache_size;
261 assoc = assocs[l3.assoc];
262 line_size = l3.line_size;
263 lines_per_tag = l3.lines_per_tag;
264 size_in_kb = l3.size_encoded * 512;
265 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
266 size_in_kb = size_in_kb >> 1;
274 eax->split.is_self_initializing = 1;
275 eax->split.type = types[leaf];
276 eax->split.level = levels[leaf];
277 eax->split.num_threads_sharing = 0;
278 eax->split.num_cores_on_die = current_cpu_data.x86_max_cores - 1;
282 eax->split.is_fully_associative = 1;
283 ebx->split.coherency_line_size = line_size - 1;
284 ebx->split.ways_of_associativity = assoc - 1;
285 ebx->split.physical_line_partition = lines_per_tag - 1;
286 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
287 (ebx->split.ways_of_associativity + 1) - 1;
290 static void __cpuinit
291 amd_check_l3_disable(int index, struct _cpuid4_info_regs *this_leaf)
296 if (boot_cpu_data.x86 == 0x11)
299 /* see erratum #382 */
300 if ((boot_cpu_data.x86 == 0x10) && (boot_cpu_data.x86_model < 0x8))
303 this_leaf->can_disable = 1;
307 __cpuinit cpuid4_cache_lookup_regs(int index,
308 struct _cpuid4_info_regs *this_leaf)
310 union _cpuid4_leaf_eax eax;
311 union _cpuid4_leaf_ebx ebx;
312 union _cpuid4_leaf_ecx ecx;
315 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
316 amd_cpuid4(index, &eax, &ebx, &ecx);
317 if (boot_cpu_data.x86 >= 0x10)
318 amd_check_l3_disable(index, this_leaf);
320 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
323 if (eax.split.type == CACHE_TYPE_NULL)
324 return -EIO; /* better error ? */
326 this_leaf->eax = eax;
327 this_leaf->ebx = ebx;
328 this_leaf->ecx = ecx;
329 this_leaf->size = (ecx.split.number_of_sets + 1) *
330 (ebx.split.coherency_line_size + 1) *
331 (ebx.split.physical_line_partition + 1) *
332 (ebx.split.ways_of_associativity + 1);
336 static int __cpuinit find_num_cache_leaves(void)
338 unsigned int eax, ebx, ecx, edx;
339 union _cpuid4_leaf_eax cache_eax;
344 /* Do cpuid(4) loop to find out num_cache_leaves */
345 cpuid_count(4, i, &eax, &ebx, &ecx, &edx);
346 cache_eax.full = eax;
347 } while (cache_eax.split.type != CACHE_TYPE_NULL);
351 unsigned int __cpuinit init_intel_cacheinfo(struct cpuinfo_x86 *c)
354 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
355 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
356 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
357 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
359 unsigned int cpu = c->cpu_index;
362 if (c->cpuid_level > 3) {
363 static int is_initialized;
365 if (is_initialized == 0) {
366 /* Init num_cache_leaves from boot CPU */
367 num_cache_leaves = find_num_cache_leaves();
372 * Whenever possible use cpuid(4), deterministic cache
373 * parameters cpuid leaf to find the cache details
375 for (i = 0; i < num_cache_leaves; i++) {
376 struct _cpuid4_info_regs this_leaf;
379 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
381 switch (this_leaf.eax.split.level) {
383 if (this_leaf.eax.split.type ==
385 new_l1d = this_leaf.size/1024;
386 else if (this_leaf.eax.split.type ==
388 new_l1i = this_leaf.size/1024;
391 new_l2 = this_leaf.size/1024;
392 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
393 index_msb = get_count_order(num_threads_sharing);
394 l2_id = c->apicid >> index_msb;
397 new_l3 = this_leaf.size/1024;
398 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
399 index_msb = get_count_order(
400 num_threads_sharing);
401 l3_id = c->apicid >> index_msb;
410 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
413 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
414 /* supports eax=2 call */
416 unsigned int regs[4];
417 unsigned char *dp = (unsigned char *)regs;
420 if (num_cache_leaves != 0 && c->x86 == 15)
423 /* Number of times to iterate */
424 n = cpuid_eax(2) & 0xFF;
426 for (i = 0 ; i < n ; i++) {
427 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
429 /* If bit 31 is set, this is an unknown format */
430 for (j = 0 ; j < 3 ; j++)
431 if (regs[j] & (1 << 31))
434 /* Byte 0 is level count, not a descriptor */
435 for (j = 1 ; j < 16 ; j++) {
436 unsigned char des = dp[j];
439 /* look up this descriptor in the table */
440 while (cache_table[k].descriptor != 0) {
441 if (cache_table[k].descriptor == des) {
442 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
444 switch (cache_table[k].cache_type) {
446 l1i += cache_table[k].size;
449 l1d += cache_table[k].size;
452 l2 += cache_table[k].size;
455 l3 += cache_table[k].size;
458 trace += cache_table[k].size;
480 per_cpu(cpu_llc_id, cpu) = l2_id;
487 per_cpu(cpu_llc_id, cpu) = l3_id;
491 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
498 /* pointer to _cpuid4_info array (for each cache leaf) */
499 static DEFINE_PER_CPU(struct _cpuid4_info *, cpuid4_info);
500 #define CPUID4_INFO_IDX(x, y) (&((per_cpu(cpuid4_info, x))[y]))
503 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
505 struct _cpuid4_info *this_leaf, *sibling_leaf;
506 unsigned long num_threads_sharing;
508 struct cpuinfo_x86 *c = &cpu_data(cpu);
510 if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) {
511 struct cpuinfo_x86 *d;
512 for_each_online_cpu(i) {
513 if (!per_cpu(cpuid4_info, i))
516 this_leaf = CPUID4_INFO_IDX(i, index);
517 cpumask_copy(to_cpumask(this_leaf->shared_cpu_map),
522 this_leaf = CPUID4_INFO_IDX(cpu, index);
523 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
525 if (num_threads_sharing == 1)
526 cpumask_set_cpu(cpu, to_cpumask(this_leaf->shared_cpu_map));
528 index_msb = get_count_order(num_threads_sharing);
530 for_each_online_cpu(i) {
531 if (cpu_data(i).apicid >> index_msb ==
532 c->apicid >> index_msb) {
534 to_cpumask(this_leaf->shared_cpu_map));
535 if (i != cpu && per_cpu(cpuid4_info, i)) {
537 CPUID4_INFO_IDX(i, index);
538 cpumask_set_cpu(cpu, to_cpumask(
539 sibling_leaf->shared_cpu_map));
545 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
547 struct _cpuid4_info *this_leaf, *sibling_leaf;
550 this_leaf = CPUID4_INFO_IDX(cpu, index);
551 for_each_cpu(sibling, to_cpumask(this_leaf->shared_cpu_map)) {
552 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
553 cpumask_clear_cpu(cpu,
554 to_cpumask(sibling_leaf->shared_cpu_map));
558 static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
562 static void __cpuinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
567 static void __cpuinit free_cache_attributes(unsigned int cpu)
571 for (i = 0; i < num_cache_leaves; i++)
572 cache_remove_shared_cpu_map(cpu, i);
574 kfree(per_cpu(cpuid4_info, cpu));
575 per_cpu(cpuid4_info, cpu) = NULL;
579 __cpuinit cpuid4_cache_lookup(int index, struct _cpuid4_info *this_leaf)
581 struct _cpuid4_info_regs *leaf_regs =
582 (struct _cpuid4_info_regs *)this_leaf;
584 return cpuid4_cache_lookup_regs(index, leaf_regs);
587 static void __cpuinit get_cpu_leaves(void *_retval)
589 int j, *retval = _retval, cpu = smp_processor_id();
591 /* Do cpuid and store the results */
592 for (j = 0; j < num_cache_leaves; j++) {
593 struct _cpuid4_info *this_leaf;
594 this_leaf = CPUID4_INFO_IDX(cpu, j);
595 *retval = cpuid4_cache_lookup(j, this_leaf);
596 if (unlikely(*retval < 0)) {
599 for (i = 0; i < j; i++)
600 cache_remove_shared_cpu_map(cpu, i);
603 cache_shared_cpu_map_setup(cpu, j);
607 static int __cpuinit detect_cache_attributes(unsigned int cpu)
611 if (num_cache_leaves == 0)
614 per_cpu(cpuid4_info, cpu) = kzalloc(
615 sizeof(struct _cpuid4_info) * num_cache_leaves, GFP_KERNEL);
616 if (per_cpu(cpuid4_info, cpu) == NULL)
619 smp_call_function_single(cpu, get_cpu_leaves, &retval, true);
621 kfree(per_cpu(cpuid4_info, cpu));
622 per_cpu(cpuid4_info, cpu) = NULL;
628 #include <linux/kobject.h>
629 #include <linux/sysfs.h>
631 extern struct sysdev_class cpu_sysdev_class; /* from drivers/base/cpu.c */
633 /* pointer to kobject for cpuX/cache */
634 static DEFINE_PER_CPU(struct kobject *, cache_kobject);
636 struct _index_kobject {
639 unsigned short index;
642 /* pointer to array of kobjects for cpuX/cache/indexY */
643 static DEFINE_PER_CPU(struct _index_kobject *, index_kobject);
644 #define INDEX_KOBJECT_PTR(x, y) (&((per_cpu(index_kobject, x))[y]))
646 #define show_one_plus(file_name, object, val) \
647 static ssize_t show_##file_name \
648 (struct _cpuid4_info *this_leaf, char *buf) \
650 return sprintf(buf, "%lu\n", (unsigned long)this_leaf->object + val); \
653 show_one_plus(level, eax.split.level, 0);
654 show_one_plus(coherency_line_size, ebx.split.coherency_line_size, 1);
655 show_one_plus(physical_line_partition, ebx.split.physical_line_partition, 1);
656 show_one_plus(ways_of_associativity, ebx.split.ways_of_associativity, 1);
657 show_one_plus(number_of_sets, ecx.split.number_of_sets, 1);
659 static ssize_t show_size(struct _cpuid4_info *this_leaf, char *buf)
661 return sprintf(buf, "%luK\n", this_leaf->size / 1024);
664 static ssize_t show_shared_cpu_map_func(struct _cpuid4_info *this_leaf,
667 ptrdiff_t len = PTR_ALIGN(buf + PAGE_SIZE - 1, PAGE_SIZE) - buf;
671 const struct cpumask *mask;
673 mask = to_cpumask(this_leaf->shared_cpu_map);
675 cpulist_scnprintf(buf, len-2, mask) :
676 cpumask_scnprintf(buf, len-2, mask);
683 static inline ssize_t show_shared_cpu_map(struct _cpuid4_info *leaf, char *buf)
685 return show_shared_cpu_map_func(leaf, 0, buf);
688 static inline ssize_t show_shared_cpu_list(struct _cpuid4_info *leaf, char *buf)
690 return show_shared_cpu_map_func(leaf, 1, buf);
693 static ssize_t show_type(struct _cpuid4_info *this_leaf, char *buf)
695 switch (this_leaf->eax.split.type) {
696 case CACHE_TYPE_DATA:
697 return sprintf(buf, "Data\n");
698 case CACHE_TYPE_INST:
699 return sprintf(buf, "Instruction\n");
700 case CACHE_TYPE_UNIFIED:
701 return sprintf(buf, "Unified\n");
703 return sprintf(buf, "Unknown\n");
707 #define to_object(k) container_of(k, struct _index_kobject, kobj)
708 #define to_attr(a) container_of(a, struct _cache_attr, attr)
710 static ssize_t show_cache_disable(struct _cpuid4_info *this_leaf, char *buf,
713 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
714 int node = cpu_to_node(cpu);
715 struct pci_dev *dev = node_to_k8_nb_misc(node);
716 unsigned int reg = 0;
718 if (!this_leaf->can_disable)
724 pci_read_config_dword(dev, 0x1BC + index * 4, ®);
725 return sprintf(buf, "%x\n", reg);
728 #define SHOW_CACHE_DISABLE(index) \
730 show_cache_disable_##index(struct _cpuid4_info *this_leaf, char *buf) \
732 return show_cache_disable(this_leaf, buf, index); \
734 SHOW_CACHE_DISABLE(0)
735 SHOW_CACHE_DISABLE(1)
737 static ssize_t store_cache_disable(struct _cpuid4_info *this_leaf,
738 const char *buf, size_t count, unsigned int index)
740 int cpu = cpumask_first(to_cpumask(this_leaf->shared_cpu_map));
741 int node = cpu_to_node(cpu);
742 struct pci_dev *dev = node_to_k8_nb_misc(node);
743 unsigned long val = 0;
744 unsigned int scrubber = 0;
746 if (!this_leaf->can_disable)
749 if (!capable(CAP_SYS_ADMIN))
755 if (strict_strtoul(buf, 10, &val) < 0)
760 pci_read_config_dword(dev, 0x58, &scrubber);
761 scrubber &= ~0x1f000000;
762 pci_write_config_dword(dev, 0x58, scrubber);
764 pci_write_config_dword(dev, 0x1BC + index * 4, val & ~0x40000000);
766 pci_write_config_dword(dev, 0x1BC + index * 4, val);
770 #define STORE_CACHE_DISABLE(index) \
772 store_cache_disable_##index(struct _cpuid4_info *this_leaf, \
773 const char *buf, size_t count) \
775 return store_cache_disable(this_leaf, buf, count, index); \
777 STORE_CACHE_DISABLE(0)
778 STORE_CACHE_DISABLE(1)
781 struct attribute attr;
782 ssize_t (*show)(struct _cpuid4_info *, char *);
783 ssize_t (*store)(struct _cpuid4_info *, const char *, size_t count);
786 #define define_one_ro(_name) \
787 static struct _cache_attr _name = \
788 __ATTR(_name, 0444, show_##_name, NULL)
790 define_one_ro(level);
792 define_one_ro(coherency_line_size);
793 define_one_ro(physical_line_partition);
794 define_one_ro(ways_of_associativity);
795 define_one_ro(number_of_sets);
797 define_one_ro(shared_cpu_map);
798 define_one_ro(shared_cpu_list);
800 static struct _cache_attr cache_disable_0 = __ATTR(cache_disable_0, 0644,
801 show_cache_disable_0, store_cache_disable_0);
802 static struct _cache_attr cache_disable_1 = __ATTR(cache_disable_1, 0644,
803 show_cache_disable_1, store_cache_disable_1);
805 static struct attribute *default_attrs[] = {
808 &coherency_line_size.attr,
809 &physical_line_partition.attr,
810 &ways_of_associativity.attr,
811 &number_of_sets.attr,
813 &shared_cpu_map.attr,
814 &shared_cpu_list.attr,
815 &cache_disable_0.attr,
816 &cache_disable_1.attr,
820 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
822 struct _cache_attr *fattr = to_attr(attr);
823 struct _index_kobject *this_leaf = to_object(kobj);
827 fattr->show(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
833 static ssize_t store(struct kobject *kobj, struct attribute *attr,
834 const char *buf, size_t count)
836 struct _cache_attr *fattr = to_attr(attr);
837 struct _index_kobject *this_leaf = to_object(kobj);
841 fattr->store(CPUID4_INFO_IDX(this_leaf->cpu, this_leaf->index),
847 static struct sysfs_ops sysfs_ops = {
852 static struct kobj_type ktype_cache = {
853 .sysfs_ops = &sysfs_ops,
854 .default_attrs = default_attrs,
857 static struct kobj_type ktype_percpu_entry = {
858 .sysfs_ops = &sysfs_ops,
861 static void __cpuinit cpuid4_cache_sysfs_exit(unsigned int cpu)
863 kfree(per_cpu(cache_kobject, cpu));
864 kfree(per_cpu(index_kobject, cpu));
865 per_cpu(cache_kobject, cpu) = NULL;
866 per_cpu(index_kobject, cpu) = NULL;
867 free_cache_attributes(cpu);
870 static int __cpuinit cpuid4_cache_sysfs_init(unsigned int cpu)
874 if (num_cache_leaves == 0)
877 err = detect_cache_attributes(cpu);
881 /* Allocate all required memory */
882 per_cpu(cache_kobject, cpu) =
883 kzalloc(sizeof(struct kobject), GFP_KERNEL);
884 if (unlikely(per_cpu(cache_kobject, cpu) == NULL))
887 per_cpu(index_kobject, cpu) = kzalloc(
888 sizeof(struct _index_kobject) * num_cache_leaves, GFP_KERNEL);
889 if (unlikely(per_cpu(index_kobject, cpu) == NULL))
895 cpuid4_cache_sysfs_exit(cpu);
899 static DECLARE_BITMAP(cache_dev_map, NR_CPUS);
901 /* Add/Remove cache interface for CPU device */
902 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
904 unsigned int cpu = sys_dev->id;
906 struct _index_kobject *this_object;
909 retval = cpuid4_cache_sysfs_init(cpu);
910 if (unlikely(retval < 0))
913 retval = kobject_init_and_add(per_cpu(cache_kobject, cpu),
915 &sys_dev->kobj, "%s", "cache");
917 cpuid4_cache_sysfs_exit(cpu);
921 for (i = 0; i < num_cache_leaves; i++) {
922 this_object = INDEX_KOBJECT_PTR(cpu, i);
923 this_object->cpu = cpu;
924 this_object->index = i;
925 retval = kobject_init_and_add(&(this_object->kobj),
927 per_cpu(cache_kobject, cpu),
929 if (unlikely(retval)) {
930 for (j = 0; j < i; j++)
931 kobject_put(&(INDEX_KOBJECT_PTR(cpu, j)->kobj));
932 kobject_put(per_cpu(cache_kobject, cpu));
933 cpuid4_cache_sysfs_exit(cpu);
936 kobject_uevent(&(this_object->kobj), KOBJ_ADD);
938 cpumask_set_cpu(cpu, to_cpumask(cache_dev_map));
940 kobject_uevent(per_cpu(cache_kobject, cpu), KOBJ_ADD);
944 static void __cpuinit cache_remove_dev(struct sys_device * sys_dev)
946 unsigned int cpu = sys_dev->id;
949 if (per_cpu(cpuid4_info, cpu) == NULL)
951 if (!cpumask_test_cpu(cpu, to_cpumask(cache_dev_map)))
953 cpumask_clear_cpu(cpu, to_cpumask(cache_dev_map));
955 for (i = 0; i < num_cache_leaves; i++)
956 kobject_put(&(INDEX_KOBJECT_PTR(cpu, i)->kobj));
957 kobject_put(per_cpu(cache_kobject, cpu));
958 cpuid4_cache_sysfs_exit(cpu);
961 static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
962 unsigned long action, void *hcpu)
964 unsigned int cpu = (unsigned long)hcpu;
965 struct sys_device *sys_dev;
967 sys_dev = get_cpu_sysdev(cpu);
970 case CPU_ONLINE_FROZEN:
971 cache_add_dev(sys_dev);
974 case CPU_DEAD_FROZEN:
975 cache_remove_dev(sys_dev);
981 static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
982 .notifier_call = cacheinfo_cpu_callback,
985 static int __cpuinit cache_sysfs_init(void)
989 if (num_cache_leaves == 0)
992 for_each_online_cpu(i) {
994 struct sys_device *sys_dev = get_cpu_sysdev(i);
996 err = cache_add_dev(sys_dev);
1000 register_hotcpu_notifier(&cacheinfo_cpu_notifier);
1004 device_initcall(cache_sysfs_init);