Merge tag 'dma-buf-for-4.0-rc3' of git://git.kernel.org/pub/scm/linux/kernel/git...
[sfrench/cifs-2.6.git] / drivers / base / cacheinfo.c
1 /*
2  * cacheinfo support - processor cache information via sysfs
3  *
4  * Based on arch/x86/kernel/cpu/intel_cacheinfo.c
5  * Author: Sudeep Holla <sudeep.holla@arm.com>
6  *
7  * This program is free software; you can redistribute it and/or modify
8  * it under the terms of the GNU General Public License version 2 as
9  * published by the Free Software Foundation.
10  *
11  * This program is distributed "as is" WITHOUT ANY WARRANTY of any
12  * kind, whether express or implied; without even the implied warranty
13  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14  * GNU General Public License for more details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program.  If not, see <http://www.gnu.org/licenses/>.
18  */
19 #include <linux/bitops.h>
20 #include <linux/cacheinfo.h>
21 #include <linux/compiler.h>
22 #include <linux/cpu.h>
23 #include <linux/device.h>
24 #include <linux/init.h>
25 #include <linux/of.h>
26 #include <linux/sched.h>
27 #include <linux/slab.h>
28 #include <linux/smp.h>
29 #include <linux/sysfs.h>
30
31 /* pointer to per cpu cacheinfo */
32 static DEFINE_PER_CPU(struct cpu_cacheinfo, ci_cpu_cacheinfo);
33 #define ci_cacheinfo(cpu)       (&per_cpu(ci_cpu_cacheinfo, cpu))
34 #define cache_leaves(cpu)       (ci_cacheinfo(cpu)->num_leaves)
35 #define per_cpu_cacheinfo(cpu)  (ci_cacheinfo(cpu)->info_list)
36
37 struct cpu_cacheinfo *get_cpu_cacheinfo(unsigned int cpu)
38 {
39         return ci_cacheinfo(cpu);
40 }
41
42 #ifdef CONFIG_OF
43 static int cache_setup_of_node(unsigned int cpu)
44 {
45         struct device_node *np;
46         struct cacheinfo *this_leaf;
47         struct device *cpu_dev = get_cpu_device(cpu);
48         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
49         unsigned int index = 0;
50
51         /* skip if of_node is already populated */
52         if (this_cpu_ci->info_list->of_node)
53                 return 0;
54
55         if (!cpu_dev) {
56                 pr_err("No cpu device for CPU %d\n", cpu);
57                 return -ENODEV;
58         }
59         np = cpu_dev->of_node;
60         if (!np) {
61                 pr_err("Failed to find cpu%d device node\n", cpu);
62                 return -ENOENT;
63         }
64
65         while (np && index < cache_leaves(cpu)) {
66                 this_leaf = this_cpu_ci->info_list + index;
67                 if (this_leaf->level != 1)
68                         np = of_find_next_cache_node(np);
69                 else
70                         np = of_node_get(np);/* cpu node itself */
71                 this_leaf->of_node = np;
72                 index++;
73         }
74         return 0;
75 }
76
77 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
78                                            struct cacheinfo *sib_leaf)
79 {
80         return sib_leaf->of_node == this_leaf->of_node;
81 }
82 #else
83 static inline int cache_setup_of_node(unsigned int cpu) { return 0; }
84 static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
85                                            struct cacheinfo *sib_leaf)
86 {
87         /*
88          * For non-DT systems, assume unique level 1 cache, system-wide
89          * shared caches for all other levels. This will be used only if
90          * arch specific code has not populated shared_cpu_map
91          */
92         return !(this_leaf->level == 1);
93 }
94 #endif
95
96 static int cache_shared_cpu_map_setup(unsigned int cpu)
97 {
98         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
99         struct cacheinfo *this_leaf, *sib_leaf;
100         unsigned int index;
101         int ret;
102
103         ret = cache_setup_of_node(cpu);
104         if (ret)
105                 return ret;
106
107         for (index = 0; index < cache_leaves(cpu); index++) {
108                 unsigned int i;
109
110                 this_leaf = this_cpu_ci->info_list + index;
111                 /* skip if shared_cpu_map is already populated */
112                 if (!cpumask_empty(&this_leaf->shared_cpu_map))
113                         continue;
114
115                 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
116                 for_each_online_cpu(i) {
117                         struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
118
119                         if (i == cpu || !sib_cpu_ci->info_list)
120                                 continue;/* skip if itself or no cacheinfo */
121                         sib_leaf = sib_cpu_ci->info_list + index;
122                         if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
123                                 cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
124                                 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
125                         }
126                 }
127         }
128
129         return 0;
130 }
131
132 static void cache_shared_cpu_map_remove(unsigned int cpu)
133 {
134         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
135         struct cacheinfo *this_leaf, *sib_leaf;
136         unsigned int sibling, index;
137
138         for (index = 0; index < cache_leaves(cpu); index++) {
139                 this_leaf = this_cpu_ci->info_list + index;
140                 for_each_cpu(sibling, &this_leaf->shared_cpu_map) {
141                         struct cpu_cacheinfo *sib_cpu_ci;
142
143                         if (sibling == cpu) /* skip itself */
144                                 continue;
145                         sib_cpu_ci = get_cpu_cacheinfo(sibling);
146                         sib_leaf = sib_cpu_ci->info_list + index;
147                         cpumask_clear_cpu(cpu, &sib_leaf->shared_cpu_map);
148                         cpumask_clear_cpu(sibling, &this_leaf->shared_cpu_map);
149                 }
150                 of_node_put(this_leaf->of_node);
151         }
152 }
153
154 static void free_cache_attributes(unsigned int cpu)
155 {
156         cache_shared_cpu_map_remove(cpu);
157
158         kfree(per_cpu_cacheinfo(cpu));
159         per_cpu_cacheinfo(cpu) = NULL;
160 }
161
162 int __weak init_cache_level(unsigned int cpu)
163 {
164         return -ENOENT;
165 }
166
167 int __weak populate_cache_leaves(unsigned int cpu)
168 {
169         return -ENOENT;
170 }
171
172 static int detect_cache_attributes(unsigned int cpu)
173 {
174         int ret;
175
176         if (init_cache_level(cpu))
177                 return -ENOENT;
178
179         per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
180                                          sizeof(struct cacheinfo), GFP_KERNEL);
181         if (per_cpu_cacheinfo(cpu) == NULL)
182                 return -ENOMEM;
183
184         ret = populate_cache_leaves(cpu);
185         if (ret)
186                 goto free_ci;
187         /*
188          * For systems using DT for cache hierarcy, of_node and shared_cpu_map
189          * will be set up here only if they are not populated already
190          */
191         ret = cache_shared_cpu_map_setup(cpu);
192         if (ret)
193                 goto free_ci;
194         return 0;
195
196 free_ci:
197         free_cache_attributes(cpu);
198         return ret;
199 }
200
201 /* pointer to cpuX/cache device */
202 static DEFINE_PER_CPU(struct device *, ci_cache_dev);
203 #define per_cpu_cache_dev(cpu)  (per_cpu(ci_cache_dev, cpu))
204
205 static cpumask_t cache_dev_map;
206
207 /* pointer to array of devices for cpuX/cache/indexY */
208 static DEFINE_PER_CPU(struct device **, ci_index_dev);
209 #define per_cpu_index_dev(cpu)  (per_cpu(ci_index_dev, cpu))
210 #define per_cache_index_dev(cpu, idx)   ((per_cpu_index_dev(cpu))[idx])
211
212 #define show_one(file_name, object)                             \
213 static ssize_t file_name##_show(struct device *dev,             \
214                 struct device_attribute *attr, char *buf)       \
215 {                                                               \
216         struct cacheinfo *this_leaf = dev_get_drvdata(dev);     \
217         return sprintf(buf, "%u\n", this_leaf->object);         \
218 }
219
220 show_one(level, level);
221 show_one(coherency_line_size, coherency_line_size);
222 show_one(number_of_sets, number_of_sets);
223 show_one(physical_line_partition, physical_line_partition);
224 show_one(ways_of_associativity, ways_of_associativity);
225
226 static ssize_t size_show(struct device *dev,
227                          struct device_attribute *attr, char *buf)
228 {
229         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
230
231         return sprintf(buf, "%uK\n", this_leaf->size >> 10);
232 }
233
234 static ssize_t shared_cpumap_show_func(struct device *dev, bool list, char *buf)
235 {
236         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
237         const struct cpumask *mask = &this_leaf->shared_cpu_map;
238
239         return cpumap_print_to_pagebuf(list, buf, mask);
240 }
241
242 static ssize_t shared_cpu_map_show(struct device *dev,
243                                    struct device_attribute *attr, char *buf)
244 {
245         return shared_cpumap_show_func(dev, false, buf);
246 }
247
248 static ssize_t shared_cpu_list_show(struct device *dev,
249                                     struct device_attribute *attr, char *buf)
250 {
251         return shared_cpumap_show_func(dev, true, buf);
252 }
253
254 static ssize_t type_show(struct device *dev,
255                          struct device_attribute *attr, char *buf)
256 {
257         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
258
259         switch (this_leaf->type) {
260         case CACHE_TYPE_DATA:
261                 return sprintf(buf, "Data\n");
262         case CACHE_TYPE_INST:
263                 return sprintf(buf, "Instruction\n");
264         case CACHE_TYPE_UNIFIED:
265                 return sprintf(buf, "Unified\n");
266         default:
267                 return -EINVAL;
268         }
269 }
270
271 static ssize_t allocation_policy_show(struct device *dev,
272                                       struct device_attribute *attr, char *buf)
273 {
274         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
275         unsigned int ci_attr = this_leaf->attributes;
276         int n = 0;
277
278         if ((ci_attr & CACHE_READ_ALLOCATE) && (ci_attr & CACHE_WRITE_ALLOCATE))
279                 n = sprintf(buf, "ReadWriteAllocate\n");
280         else if (ci_attr & CACHE_READ_ALLOCATE)
281                 n = sprintf(buf, "ReadAllocate\n");
282         else if (ci_attr & CACHE_WRITE_ALLOCATE)
283                 n = sprintf(buf, "WriteAllocate\n");
284         return n;
285 }
286
287 static ssize_t write_policy_show(struct device *dev,
288                                  struct device_attribute *attr, char *buf)
289 {
290         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
291         unsigned int ci_attr = this_leaf->attributes;
292         int n = 0;
293
294         if (ci_attr & CACHE_WRITE_THROUGH)
295                 n = sprintf(buf, "WriteThrough\n");
296         else if (ci_attr & CACHE_WRITE_BACK)
297                 n = sprintf(buf, "WriteBack\n");
298         return n;
299 }
300
301 static DEVICE_ATTR_RO(level);
302 static DEVICE_ATTR_RO(type);
303 static DEVICE_ATTR_RO(coherency_line_size);
304 static DEVICE_ATTR_RO(ways_of_associativity);
305 static DEVICE_ATTR_RO(number_of_sets);
306 static DEVICE_ATTR_RO(size);
307 static DEVICE_ATTR_RO(allocation_policy);
308 static DEVICE_ATTR_RO(write_policy);
309 static DEVICE_ATTR_RO(shared_cpu_map);
310 static DEVICE_ATTR_RO(shared_cpu_list);
311 static DEVICE_ATTR_RO(physical_line_partition);
312
313 static struct attribute *cache_default_attrs[] = {
314         &dev_attr_type.attr,
315         &dev_attr_level.attr,
316         &dev_attr_shared_cpu_map.attr,
317         &dev_attr_shared_cpu_list.attr,
318         &dev_attr_coherency_line_size.attr,
319         &dev_attr_ways_of_associativity.attr,
320         &dev_attr_number_of_sets.attr,
321         &dev_attr_size.attr,
322         &dev_attr_allocation_policy.attr,
323         &dev_attr_write_policy.attr,
324         &dev_attr_physical_line_partition.attr,
325         NULL
326 };
327
328 static umode_t
329 cache_default_attrs_is_visible(struct kobject *kobj,
330                                struct attribute *attr, int unused)
331 {
332         struct device *dev = kobj_to_dev(kobj);
333         struct cacheinfo *this_leaf = dev_get_drvdata(dev);
334         const struct cpumask *mask = &this_leaf->shared_cpu_map;
335         umode_t mode = attr->mode;
336
337         if ((attr == &dev_attr_type.attr) && this_leaf->type)
338                 return mode;
339         if ((attr == &dev_attr_level.attr) && this_leaf->level)
340                 return mode;
341         if ((attr == &dev_attr_shared_cpu_map.attr) && !cpumask_empty(mask))
342                 return mode;
343         if ((attr == &dev_attr_shared_cpu_list.attr) && !cpumask_empty(mask))
344                 return mode;
345         if ((attr == &dev_attr_coherency_line_size.attr) &&
346             this_leaf->coherency_line_size)
347                 return mode;
348         if ((attr == &dev_attr_ways_of_associativity.attr) &&
349             this_leaf->size) /* allow 0 = full associativity */
350                 return mode;
351         if ((attr == &dev_attr_number_of_sets.attr) &&
352             this_leaf->number_of_sets)
353                 return mode;
354         if ((attr == &dev_attr_size.attr) && this_leaf->size)
355                 return mode;
356         if ((attr == &dev_attr_write_policy.attr) &&
357             (this_leaf->attributes & CACHE_WRITE_POLICY_MASK))
358                 return mode;
359         if ((attr == &dev_attr_allocation_policy.attr) &&
360             (this_leaf->attributes & CACHE_ALLOCATE_POLICY_MASK))
361                 return mode;
362         if ((attr == &dev_attr_physical_line_partition.attr) &&
363             this_leaf->physical_line_partition)
364                 return mode;
365
366         return 0;
367 }
368
369 static const struct attribute_group cache_default_group = {
370         .attrs = cache_default_attrs,
371         .is_visible = cache_default_attrs_is_visible,
372 };
373
374 static const struct attribute_group *cache_default_groups[] = {
375         &cache_default_group,
376         NULL,
377 };
378
379 static const struct attribute_group *cache_private_groups[] = {
380         &cache_default_group,
381         NULL, /* Place holder for private group */
382         NULL,
383 };
384
385 const struct attribute_group *
386 __weak cache_get_priv_group(struct cacheinfo *this_leaf)
387 {
388         return NULL;
389 }
390
391 static const struct attribute_group **
392 cache_get_attribute_groups(struct cacheinfo *this_leaf)
393 {
394         const struct attribute_group *priv_group =
395                         cache_get_priv_group(this_leaf);
396
397         if (!priv_group)
398                 return cache_default_groups;
399
400         if (!cache_private_groups[1])
401                 cache_private_groups[1] = priv_group;
402
403         return cache_private_groups;
404 }
405
406 /* Add/Remove cache interface for CPU device */
407 static void cpu_cache_sysfs_exit(unsigned int cpu)
408 {
409         int i;
410         struct device *ci_dev;
411
412         if (per_cpu_index_dev(cpu)) {
413                 for (i = 0; i < cache_leaves(cpu); i++) {
414                         ci_dev = per_cache_index_dev(cpu, i);
415                         if (!ci_dev)
416                                 continue;
417                         device_unregister(ci_dev);
418                 }
419                 kfree(per_cpu_index_dev(cpu));
420                 per_cpu_index_dev(cpu) = NULL;
421         }
422         device_unregister(per_cpu_cache_dev(cpu));
423         per_cpu_cache_dev(cpu) = NULL;
424 }
425
426 static int cpu_cache_sysfs_init(unsigned int cpu)
427 {
428         struct device *dev = get_cpu_device(cpu);
429
430         if (per_cpu_cacheinfo(cpu) == NULL)
431                 return -ENOENT;
432
433         per_cpu_cache_dev(cpu) = cpu_device_create(dev, NULL, NULL, "cache");
434         if (IS_ERR(per_cpu_cache_dev(cpu)))
435                 return PTR_ERR(per_cpu_cache_dev(cpu));
436
437         /* Allocate all required memory */
438         per_cpu_index_dev(cpu) = kcalloc(cache_leaves(cpu),
439                                          sizeof(struct device *), GFP_KERNEL);
440         if (unlikely(per_cpu_index_dev(cpu) == NULL))
441                 goto err_out;
442
443         return 0;
444
445 err_out:
446         cpu_cache_sysfs_exit(cpu);
447         return -ENOMEM;
448 }
449
450 static int cache_add_dev(unsigned int cpu)
451 {
452         unsigned int i;
453         int rc;
454         struct device *ci_dev, *parent;
455         struct cacheinfo *this_leaf;
456         struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
457         const struct attribute_group **cache_groups;
458
459         rc = cpu_cache_sysfs_init(cpu);
460         if (unlikely(rc < 0))
461                 return rc;
462
463         parent = per_cpu_cache_dev(cpu);
464         for (i = 0; i < cache_leaves(cpu); i++) {
465                 this_leaf = this_cpu_ci->info_list + i;
466                 if (this_leaf->disable_sysfs)
467                         continue;
468                 cache_groups = cache_get_attribute_groups(this_leaf);
469                 ci_dev = cpu_device_create(parent, this_leaf, cache_groups,
470                                            "index%1u", i);
471                 if (IS_ERR(ci_dev)) {
472                         rc = PTR_ERR(ci_dev);
473                         goto err;
474                 }
475                 per_cache_index_dev(cpu, i) = ci_dev;
476         }
477         cpumask_set_cpu(cpu, &cache_dev_map);
478
479         return 0;
480 err:
481         cpu_cache_sysfs_exit(cpu);
482         return rc;
483 }
484
485 static void cache_remove_dev(unsigned int cpu)
486 {
487         if (!cpumask_test_cpu(cpu, &cache_dev_map))
488                 return;
489         cpumask_clear_cpu(cpu, &cache_dev_map);
490
491         cpu_cache_sysfs_exit(cpu);
492 }
493
494 static int cacheinfo_cpu_callback(struct notifier_block *nfb,
495                                   unsigned long action, void *hcpu)
496 {
497         unsigned int cpu = (unsigned long)hcpu;
498         int rc = 0;
499
500         switch (action & ~CPU_TASKS_FROZEN) {
501         case CPU_ONLINE:
502                 rc = detect_cache_attributes(cpu);
503                 if (!rc)
504                         rc = cache_add_dev(cpu);
505                 break;
506         case CPU_DEAD:
507                 cache_remove_dev(cpu);
508                 if (per_cpu_cacheinfo(cpu))
509                         free_cache_attributes(cpu);
510                 break;
511         }
512         return notifier_from_errno(rc);
513 }
514
515 static int __init cacheinfo_sysfs_init(void)
516 {
517         int cpu, rc = 0;
518
519         cpu_notifier_register_begin();
520
521         for_each_online_cpu(cpu) {
522                 rc = detect_cache_attributes(cpu);
523                 if (rc)
524                         goto out;
525                 rc = cache_add_dev(cpu);
526                 if (rc) {
527                         free_cache_attributes(cpu);
528                         pr_err("error populating cacheinfo..cpu%d\n", cpu);
529                         goto out;
530                 }
531         }
532         __hotcpu_notifier(cacheinfo_cpu_callback, 0);
533
534 out:
535         cpu_notifier_register_done();
536         return rc;
537 }
538
539 device_initcall(cacheinfo_sysfs_init);