Merge branch 'master' into upstream
[sfrench/cifs-2.6.git] / arch / ia64 / kernel / topology.c
1 /*
2  * This file is subject to the terms and conditions of the GNU General Public
3  * License.  See the file "COPYING" in the main directory of this archive
4  * for more details.
5  *
6  * This file contains NUMA specific variables and functions which can
7  * be split away from DISCONTIGMEM and are used on NUMA machines with
8  * contiguous memory.
9  *              2002/08/07 Erich Focht <efocht@ess.nec.de>
10  * Populate cpu entries in sysfs for non-numa systems as well
11  *      Intel Corporation - Ashok Raj
12  * 02/27/2006 Zhang, Yanmin
13  *      Populate cpu cache entries in sysfs for cpu cache info
14  */
15
16 #include <linux/cpu.h>
17 #include <linux/kernel.h>
18 #include <linux/mm.h>
19 #include <linux/node.h>
20 #include <linux/init.h>
21 #include <linux/bootmem.h>
22 #include <linux/nodemask.h>
23 #include <linux/notifier.h>
24 #include <asm/mmzone.h>
25 #include <asm/numa.h>
26 #include <asm/cpu.h>
27
28 static struct ia64_cpu *sysfs_cpus;
29
30 int arch_register_cpu(int num)
31 {
32 #if defined (CONFIG_ACPI) && defined (CONFIG_HOTPLUG_CPU)
33         /*
34          * If CPEI cannot be re-targetted, and this is
35          * CPEI target, then dont create the control file
36          */
37         if (!can_cpei_retarget() && is_cpu_cpei_target(num))
38                 sysfs_cpus[num].cpu.no_control = 1;
39 #ifdef CONFIG_NUMA
40         map_cpu_to_node(num, node_cpuid[num].nid);
41 #endif
42 #endif
43
44         return register_cpu(&sysfs_cpus[num].cpu, num);
45 }
46
47 #ifdef CONFIG_HOTPLUG_CPU
48
49 void arch_unregister_cpu(int num)
50 {
51         unregister_cpu(&sysfs_cpus[num].cpu);
52         unmap_cpu_from_node(num, cpu_to_node(num));
53 }
54 EXPORT_SYMBOL(arch_register_cpu);
55 EXPORT_SYMBOL(arch_unregister_cpu);
56 #endif /*CONFIG_HOTPLUG_CPU*/
57
58
59 static int __init topology_init(void)
60 {
61         int i, err = 0;
62
63 #ifdef CONFIG_NUMA
64         /*
65          * MCD - Do we want to register all ONLINE nodes, or all POSSIBLE nodes?
66          */
67         for_each_online_node(i) {
68                 if ((err = register_one_node(i)))
69                         goto out;
70         }
71 #endif
72
73         sysfs_cpus = kzalloc(sizeof(struct ia64_cpu) * NR_CPUS, GFP_KERNEL);
74         if (!sysfs_cpus)
75                 panic("kzalloc in topology_init failed - NR_CPUS too big?");
76
77         for_each_present_cpu(i) {
78                 if((err = arch_register_cpu(i)))
79                         goto out;
80         }
81 out:
82         return err;
83 }
84
85 subsys_initcall(topology_init);
86
87
88 /*
89  * Export cpu cache information through sysfs
90  */
91
92 /*
93  *  A bunch of string array to get pretty printing
94  */
95 static const char *cache_types[] = {
96         "",                     /* not used */
97         "Instruction",
98         "Data",
99         "Unified"       /* unified */
100 };
101
102 static const char *cache_mattrib[]={
103         "WriteThrough",
104         "WriteBack",
105         "",             /* reserved */
106         ""              /* reserved */
107 };
108
109 struct cache_info {
110         pal_cache_config_info_t cci;
111         cpumask_t shared_cpu_map;
112         int level;
113         int type;
114         struct kobject kobj;
115 };
116
117 struct cpu_cache_info {
118         struct cache_info *cache_leaves;
119         int     num_cache_leaves;
120         struct kobject kobj;
121 };
122
123 static struct cpu_cache_info    all_cpu_cache_info[NR_CPUS];
124 #define LEAF_KOBJECT_PTR(x,y)    (&all_cpu_cache_info[x].cache_leaves[y])
125
126 #ifdef CONFIG_SMP
127 static void cache_shared_cpu_map_setup( unsigned int cpu,
128                 struct cache_info * this_leaf)
129 {
130         pal_cache_shared_info_t csi;
131         int num_shared, i = 0;
132         unsigned int j;
133
134         if (cpu_data(cpu)->threads_per_core <= 1 &&
135                 cpu_data(cpu)->cores_per_socket <= 1) {
136                 cpu_set(cpu, this_leaf->shared_cpu_map);
137                 return;
138         }
139
140         if (ia64_pal_cache_shared_info(this_leaf->level,
141                                         this_leaf->type,
142                                         0,
143                                         &csi) != PAL_STATUS_SUCCESS)
144                 return;
145
146         num_shared = (int) csi.num_shared;
147         do {
148                 for_each_possible_cpu(j)
149                         if (cpu_data(cpu)->socket_id == cpu_data(j)->socket_id
150                                 && cpu_data(j)->core_id == csi.log1_cid
151                                 && cpu_data(j)->thread_id == csi.log1_tid)
152                                 cpu_set(j, this_leaf->shared_cpu_map);
153
154                 i++;
155         } while (i < num_shared &&
156                 ia64_pal_cache_shared_info(this_leaf->level,
157                                 this_leaf->type,
158                                 i,
159                                 &csi) == PAL_STATUS_SUCCESS);
160 }
161 #else
162 static void cache_shared_cpu_map_setup(unsigned int cpu,
163                 struct cache_info * this_leaf)
164 {
165         cpu_set(cpu, this_leaf->shared_cpu_map);
166         return;
167 }
168 #endif
169
170 static ssize_t show_coherency_line_size(struct cache_info *this_leaf,
171                                         char *buf)
172 {
173         return sprintf(buf, "%u\n", 1 << this_leaf->cci.pcci_line_size);
174 }
175
176 static ssize_t show_ways_of_associativity(struct cache_info *this_leaf,
177                                         char *buf)
178 {
179         return sprintf(buf, "%u\n", this_leaf->cci.pcci_assoc);
180 }
181
182 static ssize_t show_attributes(struct cache_info *this_leaf, char *buf)
183 {
184         return sprintf(buf,
185                         "%s\n",
186                         cache_mattrib[this_leaf->cci.pcci_cache_attr]);
187 }
188
189 static ssize_t show_size(struct cache_info *this_leaf, char *buf)
190 {
191         return sprintf(buf, "%uK\n", this_leaf->cci.pcci_cache_size / 1024);
192 }
193
194 static ssize_t show_number_of_sets(struct cache_info *this_leaf, char *buf)
195 {
196         unsigned number_of_sets = this_leaf->cci.pcci_cache_size;
197         number_of_sets /= this_leaf->cci.pcci_assoc;
198         number_of_sets /= 1 << this_leaf->cci.pcci_line_size;
199
200         return sprintf(buf, "%u\n", number_of_sets);
201 }
202
203 static ssize_t show_shared_cpu_map(struct cache_info *this_leaf, char *buf)
204 {
205         ssize_t len;
206         cpumask_t shared_cpu_map;
207
208         cpus_and(shared_cpu_map, this_leaf->shared_cpu_map, cpu_online_map);
209         len = cpumask_scnprintf(buf, NR_CPUS+1, shared_cpu_map);
210         len += sprintf(buf+len, "\n");
211         return len;
212 }
213
214 static ssize_t show_type(struct cache_info *this_leaf, char *buf)
215 {
216         int type = this_leaf->type + this_leaf->cci.pcci_unified;
217         return sprintf(buf, "%s\n", cache_types[type]);
218 }
219
220 static ssize_t show_level(struct cache_info *this_leaf, char *buf)
221 {
222         return sprintf(buf, "%u\n", this_leaf->level);
223 }
224
225 struct cache_attr {
226         struct attribute attr;
227         ssize_t (*show)(struct cache_info *, char *);
228         ssize_t (*store)(struct cache_info *, const char *, size_t count);
229 };
230
231 #ifdef define_one_ro
232         #undef define_one_ro
233 #endif
234 #define define_one_ro(_name) \
235         static struct cache_attr _name = \
236 __ATTR(_name, 0444, show_##_name, NULL)
237
238 define_one_ro(level);
239 define_one_ro(type);
240 define_one_ro(coherency_line_size);
241 define_one_ro(ways_of_associativity);
242 define_one_ro(size);
243 define_one_ro(number_of_sets);
244 define_one_ro(shared_cpu_map);
245 define_one_ro(attributes);
246
247 static struct attribute * cache_default_attrs[] = {
248         &type.attr,
249         &level.attr,
250         &coherency_line_size.attr,
251         &ways_of_associativity.attr,
252         &attributes.attr,
253         &size.attr,
254         &number_of_sets.attr,
255         &shared_cpu_map.attr,
256         NULL
257 };
258
259 #define to_object(k) container_of(k, struct cache_info, kobj)
260 #define to_attr(a) container_of(a, struct cache_attr, attr)
261
262 static ssize_t cache_show(struct kobject * kobj, struct attribute * attr, char * buf)
263 {
264         struct cache_attr *fattr = to_attr(attr);
265         struct cache_info *this_leaf = to_object(kobj);
266         ssize_t ret;
267
268         ret = fattr->show ? fattr->show(this_leaf, buf) : 0;
269         return ret;
270 }
271
272 static struct sysfs_ops cache_sysfs_ops = {
273         .show   = cache_show
274 };
275
276 static struct kobj_type cache_ktype = {
277         .sysfs_ops      = &cache_sysfs_ops,
278         .default_attrs  = cache_default_attrs,
279 };
280
281 static struct kobj_type cache_ktype_percpu_entry = {
282         .sysfs_ops      = &cache_sysfs_ops,
283 };
284
285 static void __cpuinit cpu_cache_sysfs_exit(unsigned int cpu)
286 {
287         kfree(all_cpu_cache_info[cpu].cache_leaves);
288         all_cpu_cache_info[cpu].cache_leaves = NULL;
289         all_cpu_cache_info[cpu].num_cache_leaves = 0;
290         memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
291         return;
292 }
293
294 static int __cpuinit cpu_cache_sysfs_init(unsigned int cpu)
295 {
296         u64 i, levels, unique_caches;
297         pal_cache_config_info_t cci;
298         int j;
299         s64 status;
300         struct cache_info *this_cache;
301         int num_cache_leaves = 0;
302
303         if ((status = ia64_pal_cache_summary(&levels, &unique_caches)) != 0) {
304                 printk(KERN_ERR "ia64_pal_cache_summary=%ld\n", status);
305                 return -1;
306         }
307
308         this_cache=kzalloc(sizeof(struct cache_info)*unique_caches,
309                         GFP_KERNEL);
310         if (this_cache == NULL)
311                 return -ENOMEM;
312
313         for (i=0; i < levels; i++) {
314                 for (j=2; j >0 ; j--) {
315                         if ((status=ia64_pal_cache_config_info(i,j, &cci)) !=
316                                         PAL_STATUS_SUCCESS)
317                                 continue;
318
319                         this_cache[num_cache_leaves].cci = cci;
320                         this_cache[num_cache_leaves].level = i + 1;
321                         this_cache[num_cache_leaves].type = j;
322
323                         cache_shared_cpu_map_setup(cpu,
324                                         &this_cache[num_cache_leaves]);
325                         num_cache_leaves ++;
326                 }
327         }
328
329         all_cpu_cache_info[cpu].cache_leaves = this_cache;
330         all_cpu_cache_info[cpu].num_cache_leaves = num_cache_leaves;
331
332         memset(&all_cpu_cache_info[cpu].kobj, 0, sizeof(struct kobject));
333
334         return 0;
335 }
336
337 /* Add cache interface for CPU device */
338 static int __cpuinit cache_add_dev(struct sys_device * sys_dev)
339 {
340         unsigned int cpu = sys_dev->id;
341         unsigned long i, j;
342         struct cache_info *this_object;
343         int retval = 0;
344         cpumask_t oldmask;
345
346         if (all_cpu_cache_info[cpu].kobj.parent)
347                 return 0;
348
349         oldmask = current->cpus_allowed;
350         retval = set_cpus_allowed(current, cpumask_of_cpu(cpu));
351         if (unlikely(retval))
352                 return retval;
353
354         retval = cpu_cache_sysfs_init(cpu);
355         set_cpus_allowed(current, oldmask);
356         if (unlikely(retval < 0))
357                 return retval;
358
359         all_cpu_cache_info[cpu].kobj.parent = &sys_dev->kobj;
360         kobject_set_name(&all_cpu_cache_info[cpu].kobj, "%s", "cache");
361         all_cpu_cache_info[cpu].kobj.ktype = &cache_ktype_percpu_entry;
362         retval = kobject_register(&all_cpu_cache_info[cpu].kobj);
363
364         for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++) {
365                 this_object = LEAF_KOBJECT_PTR(cpu,i);
366                 this_object->kobj.parent = &all_cpu_cache_info[cpu].kobj;
367                 kobject_set_name(&(this_object->kobj), "index%1lu", i);
368                 this_object->kobj.ktype = &cache_ktype;
369                 retval = kobject_register(&(this_object->kobj));
370                 if (unlikely(retval)) {
371                         for (j = 0; j < i; j++) {
372                                 kobject_unregister(
373                                         &(LEAF_KOBJECT_PTR(cpu,j)->kobj));
374                         }
375                         kobject_unregister(&all_cpu_cache_info[cpu].kobj);
376                         cpu_cache_sysfs_exit(cpu);
377                         break;
378                 }
379         }
380         return retval;
381 }
382
383 /* Remove cache interface for CPU device */
384 static int __cpuinit cache_remove_dev(struct sys_device * sys_dev)
385 {
386         unsigned int cpu = sys_dev->id;
387         unsigned long i;
388
389         for (i = 0; i < all_cpu_cache_info[cpu].num_cache_leaves; i++)
390                 kobject_unregister(&(LEAF_KOBJECT_PTR(cpu,i)->kobj));
391
392         if (all_cpu_cache_info[cpu].kobj.parent) {
393                 kobject_unregister(&all_cpu_cache_info[cpu].kobj);
394                 memset(&all_cpu_cache_info[cpu].kobj,
395                         0,
396                         sizeof(struct kobject));
397         }
398
399         cpu_cache_sysfs_exit(cpu);
400
401         return 0;
402 }
403
404 /*
405  * When a cpu is hot-plugged, do a check and initiate
406  * cache kobject if necessary
407  */
408 static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
409                 unsigned long action, void *hcpu)
410 {
411         unsigned int cpu = (unsigned long)hcpu;
412         struct sys_device *sys_dev;
413
414         sys_dev = get_cpu_sysdev(cpu);
415         switch (action) {
416         case CPU_ONLINE:
417                 cache_add_dev(sys_dev);
418                 break;
419         case CPU_DEAD:
420                 cache_remove_dev(sys_dev);
421                 break;
422         }
423         return NOTIFY_OK;
424 }
425
426 static struct notifier_block __cpuinitdata cache_cpu_notifier =
427 {
428         .notifier_call = cache_cpu_callback
429 };
430
431 static int __cpuinit cache_sysfs_init(void)
432 {
433         int i;
434
435         for_each_online_cpu(i) {
436                 cache_cpu_callback(&cache_cpu_notifier, CPU_ONLINE,
437                                 (void *)(long)i);
438         }
439
440         register_hotcpu_notifier(&cache_cpu_notifier);
441
442         return 0;
443 }
444
445 device_initcall(cache_sysfs_init);
446