Merge tag 'nfsd-5.3-1' of git://linux-nfs.org/~bfields/linux
[sfrench/cifs-2.6.git] / drivers / md / bcache / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18
19 extern bool bcache_is_reboot;
20
21 /* Default is 0 ("writethrough") */
22 static const char * const bch_cache_modes[] = {
23         "writethrough",
24         "writeback",
25         "writearound",
26         "none",
27         NULL
28 };
29
30 /* Default is 0 ("auto") */
31 static const char * const bch_stop_on_failure_modes[] = {
32         "auto",
33         "always",
34         NULL
35 };
36
37 static const char * const cache_replacement_policies[] = {
38         "lru",
39         "fifo",
40         "random",
41         NULL
42 };
43
44 static const char * const error_actions[] = {
45         "unregister",
46         "panic",
47         NULL
48 };
49
50 write_attribute(attach);
51 write_attribute(detach);
52 write_attribute(unregister);
53 write_attribute(stop);
54 write_attribute(clear_stats);
55 write_attribute(trigger_gc);
56 write_attribute(prune_cache);
57 write_attribute(flash_vol_create);
58
59 read_attribute(bucket_size);
60 read_attribute(block_size);
61 read_attribute(nbuckets);
62 read_attribute(tree_depth);
63 read_attribute(root_usage_percent);
64 read_attribute(priority_stats);
65 read_attribute(btree_cache_size);
66 read_attribute(btree_cache_max_chain);
67 read_attribute(cache_available_percent);
68 read_attribute(written);
69 read_attribute(btree_written);
70 read_attribute(metadata_written);
71 read_attribute(active_journal_entries);
72 read_attribute(backing_dev_name);
73 read_attribute(backing_dev_uuid);
74
75 sysfs_time_stats_attribute(btree_gc,    sec, ms);
76 sysfs_time_stats_attribute(btree_split, sec, us);
77 sysfs_time_stats_attribute(btree_sort,  ms,  us);
78 sysfs_time_stats_attribute(btree_read,  ms,  us);
79
80 read_attribute(btree_nodes);
81 read_attribute(btree_used_percent);
82 read_attribute(average_key_size);
83 read_attribute(dirty_data);
84 read_attribute(bset_tree_stats);
85
86 read_attribute(state);
87 read_attribute(cache_read_races);
88 read_attribute(reclaim);
89 read_attribute(reclaimed_journal_buckets);
90 read_attribute(flush_write);
91 read_attribute(writeback_keys_done);
92 read_attribute(writeback_keys_failed);
93 read_attribute(io_errors);
94 read_attribute(congested);
95 read_attribute(cutoff_writeback);
96 read_attribute(cutoff_writeback_sync);
97 rw_attribute(congested_read_threshold_us);
98 rw_attribute(congested_write_threshold_us);
99
100 rw_attribute(sequential_cutoff);
101 rw_attribute(data_csum);
102 rw_attribute(cache_mode);
103 rw_attribute(stop_when_cache_set_failed);
104 rw_attribute(writeback_metadata);
105 rw_attribute(writeback_running);
106 rw_attribute(writeback_percent);
107 rw_attribute(writeback_delay);
108 rw_attribute(writeback_rate);
109
110 rw_attribute(writeback_rate_update_seconds);
111 rw_attribute(writeback_rate_i_term_inverse);
112 rw_attribute(writeback_rate_p_term_inverse);
113 rw_attribute(writeback_rate_minimum);
114 read_attribute(writeback_rate_debug);
115
116 read_attribute(stripe_size);
117 read_attribute(partial_stripes_expensive);
118
119 rw_attribute(synchronous);
120 rw_attribute(journal_delay_ms);
121 rw_attribute(io_disable);
122 rw_attribute(discard);
123 rw_attribute(running);
124 rw_attribute(label);
125 rw_attribute(readahead);
126 rw_attribute(errors);
127 rw_attribute(io_error_limit);
128 rw_attribute(io_error_halflife);
129 rw_attribute(verify);
130 rw_attribute(bypass_torture_test);
131 rw_attribute(key_merging_disabled);
132 rw_attribute(gc_always_rewrite);
133 rw_attribute(expensive_debug_checks);
134 rw_attribute(cache_replacement_policy);
135 rw_attribute(btree_shrinker_disabled);
136 rw_attribute(copy_gc_enabled);
137 rw_attribute(gc_after_writeback);
138 rw_attribute(size);
139
140 static ssize_t bch_snprint_string_list(char *buf,
141                                        size_t size,
142                                        const char * const list[],
143                                        size_t selected)
144 {
145         char *out = buf;
146         size_t i;
147
148         for (i = 0; list[i]; i++)
149                 out += snprintf(out, buf + size - out,
150                                 i == selected ? "[%s] " : "%s ", list[i]);
151
152         out[-1] = '\n';
153         return out - buf;
154 }
155
156 SHOW(__bch_cached_dev)
157 {
158         struct cached_dev *dc = container_of(kobj, struct cached_dev,
159                                              disk.kobj);
160         char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
161         int wb = dc->writeback_running;
162
163 #define var(stat)               (dc->stat)
164
165         if (attr == &sysfs_cache_mode)
166                 return bch_snprint_string_list(buf, PAGE_SIZE,
167                                                bch_cache_modes,
168                                                BDEV_CACHE_MODE(&dc->sb));
169
170         if (attr == &sysfs_stop_when_cache_set_failed)
171                 return bch_snprint_string_list(buf, PAGE_SIZE,
172                                                bch_stop_on_failure_modes,
173                                                dc->stop_when_cache_set_failed);
174
175
176         sysfs_printf(data_csum,         "%i", dc->disk.data_csum);
177         var_printf(verify,              "%i");
178         var_printf(bypass_torture_test, "%i");
179         var_printf(writeback_metadata,  "%i");
180         var_printf(writeback_running,   "%i");
181         var_print(writeback_delay);
182         var_print(writeback_percent);
183         sysfs_hprint(writeback_rate,
184                      wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
185         sysfs_printf(io_errors,         "%i", atomic_read(&dc->io_errors));
186         sysfs_printf(io_error_limit,    "%i", dc->error_limit);
187         sysfs_printf(io_disable,        "%i", dc->io_disable);
188         var_print(writeback_rate_update_seconds);
189         var_print(writeback_rate_i_term_inverse);
190         var_print(writeback_rate_p_term_inverse);
191         var_print(writeback_rate_minimum);
192
193         if (attr == &sysfs_writeback_rate_debug) {
194                 char rate[20];
195                 char dirty[20];
196                 char target[20];
197                 char proportional[20];
198                 char integral[20];
199                 char change[20];
200                 s64 next_io;
201
202                 /*
203                  * Except for dirty and target, other values should
204                  * be 0 if writeback is not running.
205                  */
206                 bch_hprint(rate,
207                            wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
208                               : 0);
209                 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
210                 bch_hprint(target, dc->writeback_rate_target << 9);
211                 bch_hprint(proportional,
212                            wb ? dc->writeback_rate_proportional << 9 : 0);
213                 bch_hprint(integral,
214                            wb ? dc->writeback_rate_integral_scaled << 9 : 0);
215                 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
216                 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
217                                          NSEC_PER_MSEC) : 0;
218
219                 return sprintf(buf,
220                                "rate:\t\t%s/sec\n"
221                                "dirty:\t\t%s\n"
222                                "target:\t\t%s\n"
223                                "proportional:\t%s\n"
224                                "integral:\t%s\n"
225                                "change:\t\t%s/sec\n"
226                                "next io:\t%llims\n",
227                                rate, dirty, target, proportional,
228                                integral, change, next_io);
229         }
230
231         sysfs_hprint(dirty_data,
232                      bcache_dev_sectors_dirty(&dc->disk) << 9);
233
234         sysfs_hprint(stripe_size,        ((uint64_t)dc->disk.stripe_size) << 9);
235         var_printf(partial_stripes_expensive,   "%u");
236
237         var_hprint(sequential_cutoff);
238         var_hprint(readahead);
239
240         sysfs_print(running,            atomic_read(&dc->running));
241         sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
242
243         if (attr == &sysfs_label) {
244                 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
245                 buf[SB_LABEL_SIZE + 1] = '\0';
246                 strcat(buf, "\n");
247                 return strlen(buf);
248         }
249
250         if (attr == &sysfs_backing_dev_name) {
251                 snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
252                 strcat(buf, "\n");
253                 return strlen(buf);
254         }
255
256         if (attr == &sysfs_backing_dev_uuid) {
257                 /* convert binary uuid into 36-byte string plus '\0' */
258                 snprintf(buf, 36+1, "%pU", dc->sb.uuid);
259                 strcat(buf, "\n");
260                 return strlen(buf);
261         }
262
263 #undef var
264         return 0;
265 }
266 SHOW_LOCKED(bch_cached_dev)
267
268 STORE(__cached_dev)
269 {
270         struct cached_dev *dc = container_of(kobj, struct cached_dev,
271                                              disk.kobj);
272         ssize_t v;
273         struct cache_set *c;
274         struct kobj_uevent_env *env;
275
276         /* no user space access if system is rebooting */
277         if (bcache_is_reboot)
278                 return -EBUSY;
279
280 #define d_strtoul(var)          sysfs_strtoul(var, dc->var)
281 #define d_strtoul_nonzero(var)  sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
282 #define d_strtoi_h(var)         sysfs_hatoi(var, dc->var)
283
284         sysfs_strtoul(data_csum,        dc->disk.data_csum);
285         d_strtoul(verify);
286         sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
287         sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
288         sysfs_strtoul_bool(writeback_running, dc->writeback_running);
289         sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
290
291         sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
292                             0, bch_cutoff_writeback);
293
294         if (attr == &sysfs_writeback_rate) {
295                 ssize_t ret;
296                 long int v = atomic_long_read(&dc->writeback_rate.rate);
297
298                 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
299
300                 if (!ret) {
301                         atomic_long_set(&dc->writeback_rate.rate, v);
302                         ret = size;
303                 }
304
305                 return ret;
306         }
307
308         sysfs_strtoul_clamp(writeback_rate_update_seconds,
309                             dc->writeback_rate_update_seconds,
310                             1, WRITEBACK_RATE_UPDATE_SECS_MAX);
311         sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
312                             dc->writeback_rate_i_term_inverse,
313                             1, UINT_MAX);
314         sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
315                             dc->writeback_rate_p_term_inverse,
316                             1, UINT_MAX);
317         sysfs_strtoul_clamp(writeback_rate_minimum,
318                             dc->writeback_rate_minimum,
319                             1, UINT_MAX);
320
321         sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
322
323         if (attr == &sysfs_io_disable) {
324                 int v = strtoul_or_return(buf);
325
326                 dc->io_disable = v ? 1 : 0;
327         }
328
329         sysfs_strtoul_clamp(sequential_cutoff,
330                             dc->sequential_cutoff,
331                             0, UINT_MAX);
332         d_strtoi_h(readahead);
333
334         if (attr == &sysfs_clear_stats)
335                 bch_cache_accounting_clear(&dc->accounting);
336
337         if (attr == &sysfs_running &&
338             strtoul_or_return(buf)) {
339                 v = bch_cached_dev_run(dc);
340                 if (v)
341                         return v;
342         }
343
344         if (attr == &sysfs_cache_mode) {
345                 v = __sysfs_match_string(bch_cache_modes, -1, buf);
346                 if (v < 0)
347                         return v;
348
349                 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
350                         SET_BDEV_CACHE_MODE(&dc->sb, v);
351                         bch_write_bdev_super(dc, NULL);
352                 }
353         }
354
355         if (attr == &sysfs_stop_when_cache_set_failed) {
356                 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
357                 if (v < 0)
358                         return v;
359
360                 dc->stop_when_cache_set_failed = v;
361         }
362
363         if (attr == &sysfs_label) {
364                 if (size > SB_LABEL_SIZE)
365                         return -EINVAL;
366                 memcpy(dc->sb.label, buf, size);
367                 if (size < SB_LABEL_SIZE)
368                         dc->sb.label[size] = '\0';
369                 if (size && dc->sb.label[size - 1] == '\n')
370                         dc->sb.label[size - 1] = '\0';
371                 bch_write_bdev_super(dc, NULL);
372                 if (dc->disk.c) {
373                         memcpy(dc->disk.c->uuids[dc->disk.id].label,
374                                buf, SB_LABEL_SIZE);
375                         bch_uuid_write(dc->disk.c);
376                 }
377                 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
378                 if (!env)
379                         return -ENOMEM;
380                 add_uevent_var(env, "DRIVER=bcache");
381                 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
382                 add_uevent_var(env, "CACHED_LABEL=%s", buf);
383                 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
384                                    KOBJ_CHANGE,
385                                    env->envp);
386                 kfree(env);
387         }
388
389         if (attr == &sysfs_attach) {
390                 uint8_t         set_uuid[16];
391
392                 if (bch_parse_uuid(buf, set_uuid) < 16)
393                         return -EINVAL;
394
395                 v = -ENOENT;
396                 list_for_each_entry(c, &bch_cache_sets, list) {
397                         v = bch_cached_dev_attach(dc, c, set_uuid);
398                         if (!v)
399                                 return size;
400                 }
401                 if (v == -ENOENT)
402                         pr_err("Can't attach %s: cache set not found", buf);
403                 return v;
404         }
405
406         if (attr == &sysfs_detach && dc->disk.c)
407                 bch_cached_dev_detach(dc);
408
409         if (attr == &sysfs_stop)
410                 bcache_device_stop(&dc->disk);
411
412         return size;
413 }
414
415 STORE(bch_cached_dev)
416 {
417         struct cached_dev *dc = container_of(kobj, struct cached_dev,
418                                              disk.kobj);
419
420         /* no user space access if system is rebooting */
421         if (bcache_is_reboot)
422                 return -EBUSY;
423
424         mutex_lock(&bch_register_lock);
425         size = __cached_dev_store(kobj, attr, buf, size);
426
427         if (attr == &sysfs_writeback_running) {
428                 /* dc->writeback_running changed in __cached_dev_store() */
429                 if (IS_ERR_OR_NULL(dc->writeback_thread)) {
430                         /*
431                          * reject setting it to 1 via sysfs if writeback
432                          * kthread is not created yet.
433                          */
434                         if (dc->writeback_running) {
435                                 dc->writeback_running = false;
436                                 pr_err("%s: failed to run non-existent writeback thread",
437                                                 dc->disk.disk->disk_name);
438                         }
439                 } else
440                         /*
441                          * writeback kthread will check if dc->writeback_running
442                          * is true or false.
443                          */
444                         bch_writeback_queue(dc);
445         }
446
447         /*
448          * Only set BCACHE_DEV_WB_RUNNING when cached device attached to
449          * a cache set, otherwise it doesn't make sense.
450          */
451         if (attr == &sysfs_writeback_percent)
452                 if ((dc->disk.c != NULL) &&
453                     (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags)))
454                         schedule_delayed_work(&dc->writeback_rate_update,
455                                       dc->writeback_rate_update_seconds * HZ);
456
457         mutex_unlock(&bch_register_lock);
458         return size;
459 }
460
461 static struct attribute *bch_cached_dev_files[] = {
462         &sysfs_attach,
463         &sysfs_detach,
464         &sysfs_stop,
465 #if 0
466         &sysfs_data_csum,
467 #endif
468         &sysfs_cache_mode,
469         &sysfs_stop_when_cache_set_failed,
470         &sysfs_writeback_metadata,
471         &sysfs_writeback_running,
472         &sysfs_writeback_delay,
473         &sysfs_writeback_percent,
474         &sysfs_writeback_rate,
475         &sysfs_writeback_rate_update_seconds,
476         &sysfs_writeback_rate_i_term_inverse,
477         &sysfs_writeback_rate_p_term_inverse,
478         &sysfs_writeback_rate_minimum,
479         &sysfs_writeback_rate_debug,
480         &sysfs_io_errors,
481         &sysfs_io_error_limit,
482         &sysfs_io_disable,
483         &sysfs_dirty_data,
484         &sysfs_stripe_size,
485         &sysfs_partial_stripes_expensive,
486         &sysfs_sequential_cutoff,
487         &sysfs_clear_stats,
488         &sysfs_running,
489         &sysfs_state,
490         &sysfs_label,
491         &sysfs_readahead,
492 #ifdef CONFIG_BCACHE_DEBUG
493         &sysfs_verify,
494         &sysfs_bypass_torture_test,
495 #endif
496         &sysfs_backing_dev_name,
497         &sysfs_backing_dev_uuid,
498         NULL
499 };
500 KTYPE(bch_cached_dev);
501
502 SHOW(bch_flash_dev)
503 {
504         struct bcache_device *d = container_of(kobj, struct bcache_device,
505                                                kobj);
506         struct uuid_entry *u = &d->c->uuids[d->id];
507
508         sysfs_printf(data_csum, "%i", d->data_csum);
509         sysfs_hprint(size,      u->sectors << 9);
510
511         if (attr == &sysfs_label) {
512                 memcpy(buf, u->label, SB_LABEL_SIZE);
513                 buf[SB_LABEL_SIZE + 1] = '\0';
514                 strcat(buf, "\n");
515                 return strlen(buf);
516         }
517
518         return 0;
519 }
520
521 STORE(__bch_flash_dev)
522 {
523         struct bcache_device *d = container_of(kobj, struct bcache_device,
524                                                kobj);
525         struct uuid_entry *u = &d->c->uuids[d->id];
526
527         /* no user space access if system is rebooting */
528         if (bcache_is_reboot)
529                 return -EBUSY;
530
531         sysfs_strtoul(data_csum,        d->data_csum);
532
533         if (attr == &sysfs_size) {
534                 uint64_t v;
535
536                 strtoi_h_or_return(buf, v);
537
538                 u->sectors = v >> 9;
539                 bch_uuid_write(d->c);
540                 set_capacity(d->disk, u->sectors);
541         }
542
543         if (attr == &sysfs_label) {
544                 memcpy(u->label, buf, SB_LABEL_SIZE);
545                 bch_uuid_write(d->c);
546         }
547
548         if (attr == &sysfs_unregister) {
549                 set_bit(BCACHE_DEV_DETACHING, &d->flags);
550                 bcache_device_stop(d);
551         }
552
553         return size;
554 }
555 STORE_LOCKED(bch_flash_dev)
556
557 static struct attribute *bch_flash_dev_files[] = {
558         &sysfs_unregister,
559 #if 0
560         &sysfs_data_csum,
561 #endif
562         &sysfs_label,
563         &sysfs_size,
564         NULL
565 };
566 KTYPE(bch_flash_dev);
567
568 struct bset_stats_op {
569         struct btree_op op;
570         size_t nodes;
571         struct bset_stats stats;
572 };
573
574 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
575 {
576         struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
577
578         op->nodes++;
579         bch_btree_keys_stats(&b->keys, &op->stats);
580
581         return MAP_CONTINUE;
582 }
583
584 static int bch_bset_print_stats(struct cache_set *c, char *buf)
585 {
586         struct bset_stats_op op;
587         int ret;
588
589         memset(&op, 0, sizeof(op));
590         bch_btree_op_init(&op.op, -1);
591
592         ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
593         if (ret < 0)
594                 return ret;
595
596         return snprintf(buf, PAGE_SIZE,
597                         "btree nodes:           %zu\n"
598                         "written sets:          %zu\n"
599                         "unwritten sets:                %zu\n"
600                         "written key bytes:     %zu\n"
601                         "unwritten key bytes:   %zu\n"
602                         "floats:                        %zu\n"
603                         "failed:                        %zu\n",
604                         op.nodes,
605                         op.stats.sets_written, op.stats.sets_unwritten,
606                         op.stats.bytes_written, op.stats.bytes_unwritten,
607                         op.stats.floats, op.stats.failed);
608 }
609
610 static unsigned int bch_root_usage(struct cache_set *c)
611 {
612         unsigned int bytes = 0;
613         struct bkey *k;
614         struct btree *b;
615         struct btree_iter iter;
616
617         goto lock_root;
618
619         do {
620                 rw_unlock(false, b);
621 lock_root:
622                 b = c->root;
623                 rw_lock(false, b, b->level);
624         } while (b != c->root);
625
626         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
627                 bytes += bkey_bytes(k);
628
629         rw_unlock(false, b);
630
631         return (bytes * 100) / btree_bytes(c);
632 }
633
634 static size_t bch_cache_size(struct cache_set *c)
635 {
636         size_t ret = 0;
637         struct btree *b;
638
639         mutex_lock(&c->bucket_lock);
640         list_for_each_entry(b, &c->btree_cache, list)
641                 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
642
643         mutex_unlock(&c->bucket_lock);
644         return ret;
645 }
646
647 static unsigned int bch_cache_max_chain(struct cache_set *c)
648 {
649         unsigned int ret = 0;
650         struct hlist_head *h;
651
652         mutex_lock(&c->bucket_lock);
653
654         for (h = c->bucket_hash;
655              h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
656              h++) {
657                 unsigned int i = 0;
658                 struct hlist_node *p;
659
660                 hlist_for_each(p, h)
661                         i++;
662
663                 ret = max(ret, i);
664         }
665
666         mutex_unlock(&c->bucket_lock);
667         return ret;
668 }
669
670 static unsigned int bch_btree_used(struct cache_set *c)
671 {
672         return div64_u64(c->gc_stats.key_bytes * 100,
673                          (c->gc_stats.nodes ?: 1) * btree_bytes(c));
674 }
675
676 static unsigned int bch_average_key_size(struct cache_set *c)
677 {
678         return c->gc_stats.nkeys
679                 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
680                 : 0;
681 }
682
683 SHOW(__bch_cache_set)
684 {
685         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
686
687         sysfs_print(synchronous,                CACHE_SYNC(&c->sb));
688         sysfs_print(journal_delay_ms,           c->journal_delay_ms);
689         sysfs_hprint(bucket_size,               bucket_bytes(c));
690         sysfs_hprint(block_size,                block_bytes(c));
691         sysfs_print(tree_depth,                 c->root->level);
692         sysfs_print(root_usage_percent,         bch_root_usage(c));
693
694         sysfs_hprint(btree_cache_size,          bch_cache_size(c));
695         sysfs_print(btree_cache_max_chain,      bch_cache_max_chain(c));
696         sysfs_print(cache_available_percent,    100 - c->gc_stats.in_use);
697
698         sysfs_print_time_stats(&c->btree_gc_time,       btree_gc, sec, ms);
699         sysfs_print_time_stats(&c->btree_split_time,    btree_split, sec, us);
700         sysfs_print_time_stats(&c->sort.time,           btree_sort, ms, us);
701         sysfs_print_time_stats(&c->btree_read_time,     btree_read, ms, us);
702
703         sysfs_print(btree_used_percent, bch_btree_used(c));
704         sysfs_print(btree_nodes,        c->gc_stats.nodes);
705         sysfs_hprint(average_key_size,  bch_average_key_size(c));
706
707         sysfs_print(cache_read_races,
708                     atomic_long_read(&c->cache_read_races));
709
710         sysfs_print(reclaim,
711                     atomic_long_read(&c->reclaim));
712
713         sysfs_print(reclaimed_journal_buckets,
714                     atomic_long_read(&c->reclaimed_journal_buckets));
715
716         sysfs_print(flush_write,
717                     atomic_long_read(&c->flush_write));
718
719         sysfs_print(writeback_keys_done,
720                     atomic_long_read(&c->writeback_keys_done));
721         sysfs_print(writeback_keys_failed,
722                     atomic_long_read(&c->writeback_keys_failed));
723
724         if (attr == &sysfs_errors)
725                 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
726                                                c->on_error);
727
728         /* See count_io_errors for why 88 */
729         sysfs_print(io_error_halflife,  c->error_decay * 88);
730         sysfs_print(io_error_limit,     c->error_limit);
731
732         sysfs_hprint(congested,
733                      ((uint64_t) bch_get_congested(c)) << 9);
734         sysfs_print(congested_read_threshold_us,
735                     c->congested_read_threshold_us);
736         sysfs_print(congested_write_threshold_us,
737                     c->congested_write_threshold_us);
738
739         sysfs_print(cutoff_writeback, bch_cutoff_writeback);
740         sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
741
742         sysfs_print(active_journal_entries,     fifo_used(&c->journal.pin));
743         sysfs_printf(verify,                    "%i", c->verify);
744         sysfs_printf(key_merging_disabled,      "%i", c->key_merging_disabled);
745         sysfs_printf(expensive_debug_checks,
746                      "%i", c->expensive_debug_checks);
747         sysfs_printf(gc_always_rewrite,         "%i", c->gc_always_rewrite);
748         sysfs_printf(btree_shrinker_disabled,   "%i", c->shrinker_disabled);
749         sysfs_printf(copy_gc_enabled,           "%i", c->copy_gc_enabled);
750         sysfs_printf(gc_after_writeback,        "%i", c->gc_after_writeback);
751         sysfs_printf(io_disable,                "%i",
752                      test_bit(CACHE_SET_IO_DISABLE, &c->flags));
753
754         if (attr == &sysfs_bset_tree_stats)
755                 return bch_bset_print_stats(c, buf);
756
757         return 0;
758 }
759 SHOW_LOCKED(bch_cache_set)
760
761 STORE(__bch_cache_set)
762 {
763         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
764         ssize_t v;
765
766         /* no user space access if system is rebooting */
767         if (bcache_is_reboot)
768                 return -EBUSY;
769
770         if (attr == &sysfs_unregister)
771                 bch_cache_set_unregister(c);
772
773         if (attr == &sysfs_stop)
774                 bch_cache_set_stop(c);
775
776         if (attr == &sysfs_synchronous) {
777                 bool sync = strtoul_or_return(buf);
778
779                 if (sync != CACHE_SYNC(&c->sb)) {
780                         SET_CACHE_SYNC(&c->sb, sync);
781                         bcache_write_super(c);
782                 }
783         }
784
785         if (attr == &sysfs_flash_vol_create) {
786                 int r;
787                 uint64_t v;
788
789                 strtoi_h_or_return(buf, v);
790
791                 r = bch_flash_dev_create(c, v);
792                 if (r)
793                         return r;
794         }
795
796         if (attr == &sysfs_clear_stats) {
797                 atomic_long_set(&c->writeback_keys_done,        0);
798                 atomic_long_set(&c->writeback_keys_failed,      0);
799
800                 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
801                 bch_cache_accounting_clear(&c->accounting);
802         }
803
804         if (attr == &sysfs_trigger_gc)
805                 force_wake_up_gc(c);
806
807         if (attr == &sysfs_prune_cache) {
808                 struct shrink_control sc;
809
810                 sc.gfp_mask = GFP_KERNEL;
811                 sc.nr_to_scan = strtoul_or_return(buf);
812                 c->shrink.scan_objects(&c->shrink, &sc);
813         }
814
815         sysfs_strtoul_clamp(congested_read_threshold_us,
816                             c->congested_read_threshold_us,
817                             0, UINT_MAX);
818         sysfs_strtoul_clamp(congested_write_threshold_us,
819                             c->congested_write_threshold_us,
820                             0, UINT_MAX);
821
822         if (attr == &sysfs_errors) {
823                 v = __sysfs_match_string(error_actions, -1, buf);
824                 if (v < 0)
825                         return v;
826
827                 c->on_error = v;
828         }
829
830         sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
831
832         /* See count_io_errors() for why 88 */
833         if (attr == &sysfs_io_error_halflife) {
834                 unsigned long v = 0;
835                 ssize_t ret;
836
837                 ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
838                 if (!ret) {
839                         c->error_decay = v / 88;
840                         return size;
841                 }
842                 return ret;
843         }
844
845         if (attr == &sysfs_io_disable) {
846                 v = strtoul_or_return(buf);
847                 if (v) {
848                         if (test_and_set_bit(CACHE_SET_IO_DISABLE,
849                                              &c->flags))
850                                 pr_warn("CACHE_SET_IO_DISABLE already set");
851                 } else {
852                         if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
853                                                 &c->flags))
854                                 pr_warn("CACHE_SET_IO_DISABLE already cleared");
855                 }
856         }
857
858         sysfs_strtoul_clamp(journal_delay_ms,
859                             c->journal_delay_ms,
860                             0, USHRT_MAX);
861         sysfs_strtoul_bool(verify,              c->verify);
862         sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
863         sysfs_strtoul(expensive_debug_checks,   c->expensive_debug_checks);
864         sysfs_strtoul_bool(gc_always_rewrite,   c->gc_always_rewrite);
865         sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
866         sysfs_strtoul_bool(copy_gc_enabled,     c->copy_gc_enabled);
867         /*
868          * write gc_after_writeback here may overwrite an already set
869          * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
870          * set in next chance.
871          */
872         sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
873
874         return size;
875 }
876 STORE_LOCKED(bch_cache_set)
877
878 SHOW(bch_cache_set_internal)
879 {
880         struct cache_set *c = container_of(kobj, struct cache_set, internal);
881
882         return bch_cache_set_show(&c->kobj, attr, buf);
883 }
884
885 STORE(bch_cache_set_internal)
886 {
887         struct cache_set *c = container_of(kobj, struct cache_set, internal);
888
889         /* no user space access if system is rebooting */
890         if (bcache_is_reboot)
891                 return -EBUSY;
892
893         return bch_cache_set_store(&c->kobj, attr, buf, size);
894 }
895
896 static void bch_cache_set_internal_release(struct kobject *k)
897 {
898 }
899
900 static struct attribute *bch_cache_set_files[] = {
901         &sysfs_unregister,
902         &sysfs_stop,
903         &sysfs_synchronous,
904         &sysfs_journal_delay_ms,
905         &sysfs_flash_vol_create,
906
907         &sysfs_bucket_size,
908         &sysfs_block_size,
909         &sysfs_tree_depth,
910         &sysfs_root_usage_percent,
911         &sysfs_btree_cache_size,
912         &sysfs_cache_available_percent,
913
914         &sysfs_average_key_size,
915
916         &sysfs_errors,
917         &sysfs_io_error_limit,
918         &sysfs_io_error_halflife,
919         &sysfs_congested,
920         &sysfs_congested_read_threshold_us,
921         &sysfs_congested_write_threshold_us,
922         &sysfs_clear_stats,
923         NULL
924 };
925 KTYPE(bch_cache_set);
926
927 static struct attribute *bch_cache_set_internal_files[] = {
928         &sysfs_active_journal_entries,
929
930         sysfs_time_stats_attribute_list(btree_gc, sec, ms)
931         sysfs_time_stats_attribute_list(btree_split, sec, us)
932         sysfs_time_stats_attribute_list(btree_sort, ms, us)
933         sysfs_time_stats_attribute_list(btree_read, ms, us)
934
935         &sysfs_btree_nodes,
936         &sysfs_btree_used_percent,
937         &sysfs_btree_cache_max_chain,
938
939         &sysfs_bset_tree_stats,
940         &sysfs_cache_read_races,
941         &sysfs_reclaim,
942         &sysfs_reclaimed_journal_buckets,
943         &sysfs_flush_write,
944         &sysfs_writeback_keys_done,
945         &sysfs_writeback_keys_failed,
946
947         &sysfs_trigger_gc,
948         &sysfs_prune_cache,
949 #ifdef CONFIG_BCACHE_DEBUG
950         &sysfs_verify,
951         &sysfs_key_merging_disabled,
952         &sysfs_expensive_debug_checks,
953 #endif
954         &sysfs_gc_always_rewrite,
955         &sysfs_btree_shrinker_disabled,
956         &sysfs_copy_gc_enabled,
957         &sysfs_gc_after_writeback,
958         &sysfs_io_disable,
959         &sysfs_cutoff_writeback,
960         &sysfs_cutoff_writeback_sync,
961         NULL
962 };
963 KTYPE(bch_cache_set_internal);
964
965 static int __bch_cache_cmp(const void *l, const void *r)
966 {
967         return *((uint16_t *)r) - *((uint16_t *)l);
968 }
969
970 SHOW(__bch_cache)
971 {
972         struct cache *ca = container_of(kobj, struct cache, kobj);
973
974         sysfs_hprint(bucket_size,       bucket_bytes(ca));
975         sysfs_hprint(block_size,        block_bytes(ca));
976         sysfs_print(nbuckets,           ca->sb.nbuckets);
977         sysfs_print(discard,            ca->discard);
978         sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
979         sysfs_hprint(btree_written,
980                      atomic_long_read(&ca->btree_sectors_written) << 9);
981         sysfs_hprint(metadata_written,
982                      (atomic_long_read(&ca->meta_sectors_written) +
983                       atomic_long_read(&ca->btree_sectors_written)) << 9);
984
985         sysfs_print(io_errors,
986                     atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
987
988         if (attr == &sysfs_cache_replacement_policy)
989                 return bch_snprint_string_list(buf, PAGE_SIZE,
990                                                cache_replacement_policies,
991                                                CACHE_REPLACEMENT(&ca->sb));
992
993         if (attr == &sysfs_priority_stats) {
994                 struct bucket *b;
995                 size_t n = ca->sb.nbuckets, i;
996                 size_t unused = 0, available = 0, dirty = 0, meta = 0;
997                 uint64_t sum = 0;
998                 /* Compute 31 quantiles */
999                 uint16_t q[31], *p, *cached;
1000                 ssize_t ret;
1001
1002                 cached = p = vmalloc(array_size(sizeof(uint16_t),
1003                                                 ca->sb.nbuckets));
1004                 if (!p)
1005                         return -ENOMEM;
1006
1007                 mutex_lock(&ca->set->bucket_lock);
1008                 for_each_bucket(b, ca) {
1009                         if (!GC_SECTORS_USED(b))
1010                                 unused++;
1011                         if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
1012                                 available++;
1013                         if (GC_MARK(b) == GC_MARK_DIRTY)
1014                                 dirty++;
1015                         if (GC_MARK(b) == GC_MARK_METADATA)
1016                                 meta++;
1017                 }
1018
1019                 for (i = ca->sb.first_bucket; i < n; i++)
1020                         p[i] = ca->buckets[i].prio;
1021                 mutex_unlock(&ca->set->bucket_lock);
1022
1023                 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
1024
1025                 while (n &&
1026                        !cached[n - 1])
1027                         --n;
1028
1029                 while (cached < p + n &&
1030                        *cached == BTREE_PRIO)
1031                         cached++, n--;
1032
1033                 for (i = 0; i < n; i++)
1034                         sum += INITIAL_PRIO - cached[i];
1035
1036                 if (n)
1037                         do_div(sum, n);
1038
1039                 for (i = 0; i < ARRAY_SIZE(q); i++)
1040                         q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1041                                 (ARRAY_SIZE(q) + 1)];
1042
1043                 vfree(p);
1044
1045                 ret = scnprintf(buf, PAGE_SIZE,
1046                                 "Unused:                %zu%%\n"
1047                                 "Clean:         %zu%%\n"
1048                                 "Dirty:         %zu%%\n"
1049                                 "Metadata:      %zu%%\n"
1050                                 "Average:       %llu\n"
1051                                 "Sectors per Q: %zu\n"
1052                                 "Quantiles:     [",
1053                                 unused * 100 / (size_t) ca->sb.nbuckets,
1054                                 available * 100 / (size_t) ca->sb.nbuckets,
1055                                 dirty * 100 / (size_t) ca->sb.nbuckets,
1056                                 meta * 100 / (size_t) ca->sb.nbuckets, sum,
1057                                 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1058
1059                 for (i = 0; i < ARRAY_SIZE(q); i++)
1060                         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1061                                          "%u ", q[i]);
1062                 ret--;
1063
1064                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1065
1066                 return ret;
1067         }
1068
1069         return 0;
1070 }
1071 SHOW_LOCKED(bch_cache)
1072
1073 STORE(__bch_cache)
1074 {
1075         struct cache *ca = container_of(kobj, struct cache, kobj);
1076         ssize_t v;
1077
1078         /* no user space access if system is rebooting */
1079         if (bcache_is_reboot)
1080                 return -EBUSY;
1081
1082         if (attr == &sysfs_discard) {
1083                 bool v = strtoul_or_return(buf);
1084
1085                 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1086                         ca->discard = v;
1087
1088                 if (v != CACHE_DISCARD(&ca->sb)) {
1089                         SET_CACHE_DISCARD(&ca->sb, v);
1090                         bcache_write_super(ca->set);
1091                 }
1092         }
1093
1094         if (attr == &sysfs_cache_replacement_policy) {
1095                 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1096                 if (v < 0)
1097                         return v;
1098
1099                 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1100                         mutex_lock(&ca->set->bucket_lock);
1101                         SET_CACHE_REPLACEMENT(&ca->sb, v);
1102                         mutex_unlock(&ca->set->bucket_lock);
1103
1104                         bcache_write_super(ca->set);
1105                 }
1106         }
1107
1108         if (attr == &sysfs_clear_stats) {
1109                 atomic_long_set(&ca->sectors_written, 0);
1110                 atomic_long_set(&ca->btree_sectors_written, 0);
1111                 atomic_long_set(&ca->meta_sectors_written, 0);
1112                 atomic_set(&ca->io_count, 0);
1113                 atomic_set(&ca->io_errors, 0);
1114         }
1115
1116         return size;
1117 }
1118 STORE_LOCKED(bch_cache)
1119
1120 static struct attribute *bch_cache_files[] = {
1121         &sysfs_bucket_size,
1122         &sysfs_block_size,
1123         &sysfs_nbuckets,
1124         &sysfs_priority_stats,
1125         &sysfs_discard,
1126         &sysfs_written,
1127         &sysfs_btree_written,
1128         &sysfs_metadata_written,
1129         &sysfs_io_errors,
1130         &sysfs_clear_stats,
1131         &sysfs_cache_replacement_policy,
1132         NULL
1133 };
1134 KTYPE(bch_cache);