Merge master.kernel.org:/pub/scm/linux/kernel/git/kyle/parisc-2.6
[sfrench/cifs-2.6.git] / arch / ia64 / kernel / topology.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * This file contains NUMA specific variables and functions which can
7  * be split away from DISCONTIGMEM and are used on NUMA machines with
8  * contiguous memory.
9  *              2002/08/07 Erich Focht <efocht@ess.nec.de>
10  * Populate cpu entries in sysfs for non-numa systems as well
11  *      Intel Corporation - Ashok Raj
12  * 02/27/2006 Zhang, Yanmin
13  *      Populate cpu cache entries in sysfs for cpu cache info
14  */
15
16 #include <linux/config.h>
17 #include <linux/cpu.h>
18 #include <linux/kernel.h>
19 #include <linux/mm.h>
20 #include <linux/node.h>
21 #include <linux/init.h>
22 #include <linux/bootmem.h>
23 #include <linux/nodemask.h>
24 #include <linux/notifier.h>
25 #include <asm/mmzone.h>
26 #include <asm/numa.h>
27 #include <asm/cpu.h>
28
29 static struct ia64_cpu *sysfs_cpus;
30
31 int arch_register_cpu(int num)
32 {
33 #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
34         /*
35          * If CPEI cannot be re-targetted, and this is
36          * CPEI target, then dont create the control file
37          */
38         if (!can_cpei_retarget() && is_cpu_cpei_target(num))
39                 sysfs_cpus[num].cpu.no_control = 1;
40 #endif
41
42         return register_cpu(&sysfs_cpus[num].cpu, num);
43 }
44
45 #ifdef CONFIG_HOTPLUG_CPU
46
47 void arch_unregister_cpu(int num)
48 {
49         return unregister_cpu(&sysfs_cpus[num].cpu);
50 }
51 EXPORT_SYMBOL(arch_register_cpu);
52 EXPORT_SYMBOL(arch_unregister_cpu);
53 #endif /*CONFIG_HOTPLUG_CPU*/
54
55
56 static int __init topology_init(void)
57 {
58         int i, err = 0;
59
60 #ifdef CONFIG_NUMA
61         /*
62          * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
63          */
64         for_each_online_node(i) {
65                 if ((err = register_one_node(i)))
66                         goto out;
67         }
68 #endif
69
70         sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
71         if (!sysfs_cpus) {
72                 err = -ENOMEM;
73                 goto out;
74         }
75
76         for_each_present_cpu(i) {
77                 if((err = arch_register_cpu(i)))
78                         goto out;
79         }
80 out:
81         return err;
82 }
83
84 subsys_initcall(topology_init);
85
86
87 /*
88  * Export cpu cache information through sysfs
89  */
90
91 /*
92  *  A bunch of string array to get pretty printing
93  */
94 static const char *cache_types[] = {
95         "",                     /* not used */
96         "Instruction",
97         "Data",
98         "Unified"       /* unified */
99 };
100
101 static const char *cache_mattrib[]={
102         "WriteThrough",
103         "WriteBack",
104         "",             /* reserved */
105         ""              /* reserved */
106 };
107
108 struct cache_info {
109         pal_cache_config_info_t cci;
110         cpumask_t shared_cpu_map;
111         int level;
112         int type;
113         struct kobject kobj;
114 };
115
116 struct cpu_cache_info {
117         struct cache_info *cache_leaves;
118         int     num_cache_leaves;
119         struct kobject kobj;
120 };
121
122 static struct cpu_cache_info    all_cpu_cache_info[NR_CPUS];
123 #define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
124
125 #ifdef CONFIG_SMP
126 static void cache_shared_cpu_map_setup( unsigned int cpu,
127                 struct cache_info * this_leaf)
128 {
129         pal_cache_shared_info_t csi;
130         int num_shared, i = 0;
131         unsigned int j;
132
133         if (cpu_data(cpu)->threads_per_core <= 1 &&
134                 cpu_data(cpu)->cores_per_socket <= 1) {
135                 cpu_set(cpu, this_leaf->shared_cpu_map);
136                 return;
137         }
138
139         if (ia64_pal_cache_shared_info(this_leaf->level,
140                                         this_leaf->type,
141                                         0,
142                                         &csi) != PAL_STATUS_SUCCESS)
143                 return;
144
145         num_shared = (int) csi.num_shared;
146         do {
147                 for_each_possible_cpu(j)
148                         if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
149                                 && cpu_data(j)->core_id == csi.log1_cid
150                                 && cpu_data(j)->thread_id == csi.log1_tid)
151                                 cpu_set(j, this_leaf->shared_cpu_map);
152
153                 i++;
154         } while (i < num_shared &&
155                 ia64_pal_cache_shared_info(this_leaf->level,
156                                 this_leaf->type,
157                                 i,
158                                 &csi) == PAL_STATUS_SUCCESS);
159 }
160 #else
161 static void cache_shared_cpu_map_setup(unsigned int cpu,
162                 struct cache_info * this_leaf)
163 {
164         cpu_set(cpu, this_leaf->shared_cpu_map);
165         return;
166 }
167 #endif
168
169 static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
170                                         char *buf)
171 {
172         return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
173 }
174
175 static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
176                                         char *buf)
177 {
178         return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
179 }
180
181 static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
182 {
183         return sprintf(buf,
184                         "%s\n",
185                         cache_mattrib[this_leaf->cci.pcci_cache_attr]);
186 }
187
188 static ssize_t show_size(struct cache_info *this_leaf, char *buf)
189 {
190         return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
191 }
192
193 static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
194 {
195         unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
196         number_of_sets /= this_leaf->cci.pcci_assoc;
197         number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
198
199         return sprintf(buf, "%u\n", number_of_sets);
200 }
201
202 static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
203 {
204         ssize_t len;
205         cpumask_t shared_cpu_map;
206
207         cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
208         len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
209         len += sprintf(buf+len, "\n");
210         return len;
211 }
212
213 static ssize_t show_type(struct cache_info *this_leaf, char *buf)
214 {
215         int type = this_leaf->type + this_leaf->cci.pcci_unified;
216         return sprintf(buf, "%s\n", cache_types[type]);
217 }
218
219 static ssize_t show_level(struct cache_info *this_leaf, char *buf)
220 {
221         return sprintf(buf, "%u\n", this_leaf->level);
222 }
223
224 struct cache_attr {
225         struct attribute attr;
226         ssize_t (*show)(struct cache_info *, char *);
227         ssize_t (*store)(struct cache_info *, const char *, size_t count);
228 };
229
230 #ifdef define_one_ro
231         #undef define_one_ro
232 #endif
233 #define define_one_ro(_name) \
234         static struct cache_attr _name = \
235 __ATTR(_name, 0444, show_##_name, NULL)
236
237 define_one_ro(level);
238 define_one_ro(type);
239 define_one_ro(coherency_line_size);
240 define_one_ro(ways_of_associativity);
241 define_one_ro(size);
242 define_one_ro(number_of_sets);
243 define_one_ro(shared_cpu_map);
244 define_one_ro(attributes);
245
246 static struct attribute * cache_default_attrs[] = {
247         &type.attr,
248         &level.attr,
249         &coherency_line_size.attr,
250         &ways_of_associativity.attr,
251         &attributes.attr,
252         &size.attr,
253         &number_of_sets.attr,
254         &shared_cpu_map.attr,
255         NULL
256 };
257
258 #define to_object(k) container_of(k, struct cache_info, kobj)
259 #define to_attr(a) container_of(a, struct cache_attr, attr)
260
261 static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
262 {
263         struct cache_attr *fattr = to_attr(attr);
264         struct cache_info *this_leaf = to_object(kobj);
265         ssize_t ret;
266
267         ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
268         return ret;
269 }
270
271 static struct sysfs_ops cache_sysfs_ops = {
272         .show   = cache_show
273 };
274
275 static struct kobj_type cache_ktype = {
276         .sysfs_ops      = &cache_sysfs_ops,
277         .default_attrs  = cache_default_attrs,
278 };
279
280 static struct kobj_type cache_ktype_percpu_entry = {
281         .sysfs_ops      = &cache_sysfs_ops,
282 };
283
284 static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
285 {
286         kfree(all_cpu_cache_info[cpu].cache_leaves);
287         all_cpu_cache_info[cpu].cache_leaves = NULL;
288         all_cpu_cache_info[cpu].num_cache_leaves = 0;
289         memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
290         return;
291 }
292
293 static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
294 {
295         u64 i, levels, unique_caches;
296         pal_cache_config_info_t cci;
297         int j;
298         s64 status;
299         struct cache_info *this_cache;
300         int num_cache_leaves = 0;
301
302         if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
303                 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
304                 return -1;
305         }
306
307         this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
308                         GFP_KERNEL);
309         if (this_cache == NULL)
310                 return -ENOMEM;
311
312         for (i=0; i < levels; i++) {
313                 for (j=2; j >0 ; j--) {
314                         if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
315                                         PAL_STATUS_SUCCESS)
316                                 continue;
317
318                         this_cache[num_cache_leaves].cci = cci;
319                         this_cache[num_cache_leaves].level = i + 1;
320                         this_cache[num_cache_leaves].type = j;
321
322                         cache_shared_cpu_map_setup(cpu,
323                                         &this_cache[num_cache_leaves]);
324                         num_cache_leaves ++;
325                 }
326         }
327
328         all_cpu_cache_info[cpu].cache_leaves = this_cache;
329         all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
330
331         memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
332
333         return 0;
334 }
335
336 /* Add cache interface for CPU device */
337 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
338 {
339         unsigned int cpu = sys_dev->id;
340         unsigned long i, j;
341         struct cache_info *this_object;
342         int retval = 0;
343         cpumask_t oldmask;
344
345         if (all_cpu_cache_info[cpu].kobj.parent)
346                 return 0;
347
348         oldmask = current->cpus_allowed;
349         retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
350         if (unlikely(retval))
351                 return retval;
352
353         retval = cpu_cache_sysfs_init(cpu);
354         set_cpus_allowed(current, oldmask);
355         if (unlikely(retval < 0))
356                 return retval;
357
358         all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
359         kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
360         all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
361         retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
362
363         for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
364                 this_object = LEAF_KOBJECT_PTR(cpu,i);
365                 this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
366                 kobject_set_name(&(this_object->kobj), "index%1lu", i);
367                 this_object->kobj.ktype = &cache_ktype;
368                 retval = kobject_register(&(this_object->kobj));
369                 if (unlikely(retval)) {
370                         for (j = 0; j < i; j++) {
371                                 kobject_unregister(
372                                         &(LEAF_KOBJECT_PTR(cpu,j)->kobj));
373                         }
374                         kobject_unregister(&all_cpu_cache_info[cpu].kobj);
375                         cpu_cache_sysfs_exit(cpu);
376                         break;
377                 }
378         }
379         return retval;
380 }
381
382 /* Remove cache interface for CPU device */
383 static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
384 {
385         unsigned int cpu = sys_dev->id;
386         unsigned long i;
387
388         for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
389                 kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
390
391         if (all_cpu_cache_info[cpu].kobj.parent) {
392                 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
393                 memset(&all_cpu_cache_info[cpu].kobj,
394                         0,
395                         sizeof(struct kobject));
396         }
397
398         cpu_cache_sysfs_exit(cpu);
399
400         return 0;
401 }
402
403 /*
404  * When a cpu is hot-plugged, do a check and initiate
405  * cache kobject if necessary
406  */
407 static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
408                 unsigned long action, void *hcpu)
409 {
410         unsigned int cpu = (unsigned long)hcpu;
411         struct sys_device *sys_dev;
412
413         sys_dev = get_cpu_sysdev(cpu);
414         switch (action) {
415         case CPU_ONLINE:
416                 cache_add_dev(sys_dev);
417                 break;
418         case CPU_DEAD:
419                 cache_remove_dev(sys_dev);
420                 break;
421         }
422         return NOTIFY_OK;
423 }
424
425 static struct notifier_block __cpuinitdata cache_cpu_notifier =
426 {
427         .notifier_call = cache_cpu_callback
428 };
429
430 static int __cpuinit cache_sysfs_init(void)
431 {
432         int i;
433
434         for_each_online_cpu(i) {
435                 cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
436                                 (void *)(long)i);
437         }
438
439         register_cpu_notifier(&cache_cpu_notifier);
440
441         return 0;
442 }
443
444 device_initcall(cache_sysfs_init);
445