dt-bindings: reset: imx7: Fix the spelling of 'indices'
[sfrench/cifs-2.6.git] / drivers / md / bcache / sysfs.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * bcache sysfs interfaces
4  *
5  * Copyright 2010, 2011 Kent Overstreet <kent.overstreet@gmail.com>
6  * Copyright 2012 Google, Inc.
7  */
8
9 #include "bcache.h"
10 #include "sysfs.h"
11 #include "btree.h"
12 #include "request.h"
13 #include "writeback.h"
14
15 #include <linux/blkdev.h>
16 #include <linux/sort.h>
17 #include <linux/sched/clock.h>
18
19 /* Default is 0 ("writethrough") */
20 static const char * const bch_cache_modes[] = {
21         "writethrough",
22         "writeback",
23         "writearound",
24         "none",
25         NULL
26 };
27
28 /* Default is 0 ("auto") */
29 static const char * const bch_stop_on_failure_modes[] = {
30         "auto",
31         "always",
32         NULL
33 };
34
35 static const char * const cache_replacement_policies[] = {
36         "lru",
37         "fifo",
38         "random",
39         NULL
40 };
41
42 static const char * const error_actions[] = {
43         "unregister",
44         "panic",
45         NULL
46 };
47
48 write_attribute(attach);
49 write_attribute(detach);
50 write_attribute(unregister);
51 write_attribute(stop);
52 write_attribute(clear_stats);
53 write_attribute(trigger_gc);
54 write_attribute(prune_cache);
55 write_attribute(flash_vol_create);
56
57 read_attribute(bucket_size);
58 read_attribute(block_size);
59 read_attribute(nbuckets);
60 read_attribute(tree_depth);
61 read_attribute(root_usage_percent);
62 read_attribute(priority_stats);
63 read_attribute(btree_cache_size);
64 read_attribute(btree_cache_max_chain);
65 read_attribute(cache_available_percent);
66 read_attribute(written);
67 read_attribute(btree_written);
68 read_attribute(metadata_written);
69 read_attribute(active_journal_entries);
70 read_attribute(backing_dev_name);
71 read_attribute(backing_dev_uuid);
72
73 sysfs_time_stats_attribute(btree_gc,    sec, ms);
74 sysfs_time_stats_attribute(btree_split, sec, us);
75 sysfs_time_stats_attribute(btree_sort,  ms,  us);
76 sysfs_time_stats_attribute(btree_read,  ms,  us);
77
78 read_attribute(btree_nodes);
79 read_attribute(btree_used_percent);
80 read_attribute(average_key_size);
81 read_attribute(dirty_data);
82 read_attribute(bset_tree_stats);
83
84 read_attribute(state);
85 read_attribute(cache_read_races);
86 read_attribute(reclaim);
87 read_attribute(flush_write);
88 read_attribute(retry_flush_write);
89 read_attribute(writeback_keys_done);
90 read_attribute(writeback_keys_failed);
91 read_attribute(io_errors);
92 read_attribute(congested);
93 read_attribute(cutoff_writeback);
94 read_attribute(cutoff_writeback_sync);
95 rw_attribute(congested_read_threshold_us);
96 rw_attribute(congested_write_threshold_us);
97
98 rw_attribute(sequential_cutoff);
99 rw_attribute(data_csum);
100 rw_attribute(cache_mode);
101 rw_attribute(stop_when_cache_set_failed);
102 rw_attribute(writeback_metadata);
103 rw_attribute(writeback_running);
104 rw_attribute(writeback_percent);
105 rw_attribute(writeback_delay);
106 rw_attribute(writeback_rate);
107
108 rw_attribute(writeback_rate_update_seconds);
109 rw_attribute(writeback_rate_i_term_inverse);
110 rw_attribute(writeback_rate_p_term_inverse);
111 rw_attribute(writeback_rate_minimum);
112 read_attribute(writeback_rate_debug);
113
114 read_attribute(stripe_size);
115 read_attribute(partial_stripes_expensive);
116
117 rw_attribute(synchronous);
118 rw_attribute(journal_delay_ms);
119 rw_attribute(io_disable);
120 rw_attribute(discard);
121 rw_attribute(running);
122 rw_attribute(label);
123 rw_attribute(readahead);
124 rw_attribute(errors);
125 rw_attribute(io_error_limit);
126 rw_attribute(io_error_halflife);
127 rw_attribute(verify);
128 rw_attribute(bypass_torture_test);
129 rw_attribute(key_merging_disabled);
130 rw_attribute(gc_always_rewrite);
131 rw_attribute(expensive_debug_checks);
132 rw_attribute(cache_replacement_policy);
133 rw_attribute(btree_shrinker_disabled);
134 rw_attribute(copy_gc_enabled);
135 rw_attribute(gc_after_writeback);
136 rw_attribute(size);
137
138 static ssize_t bch_snprint_string_list(char *buf,
139                                        size_t size,
140                                        const char * const list[],
141                                        size_t selected)
142 {
143         char *out = buf;
144         size_t i;
145
146         for (i = 0; list[i]; i++)
147                 out += snprintf(out, buf + size - out,
148                                 i == selected ? "[%s] " : "%s ", list[i]);
149
150         out[-1] = '\n';
151         return out - buf;
152 }
153
154 SHOW(__bch_cached_dev)
155 {
156         struct cached_dev *dc = container_of(kobj, struct cached_dev,
157                                              disk.kobj);
158         char const *states[] = { "no cache", "clean", "dirty", "inconsistent" };
159         int wb = dc->writeback_running;
160
161 #define var(stat)               (dc->stat)
162
163         if (attr == &sysfs_cache_mode)
164                 return bch_snprint_string_list(buf, PAGE_SIZE,
165                                                bch_cache_modes,
166                                                BDEV_CACHE_MODE(&dc->sb));
167
168         if (attr == &sysfs_stop_when_cache_set_failed)
169                 return bch_snprint_string_list(buf, PAGE_SIZE,
170                                                bch_stop_on_failure_modes,
171                                                dc->stop_when_cache_set_failed);
172
173
174         sysfs_printf(data_csum,         "%i", dc->disk.data_csum);
175         var_printf(verify,              "%i");
176         var_printf(bypass_torture_test, "%i");
177         var_printf(writeback_metadata,  "%i");
178         var_printf(writeback_running,   "%i");
179         var_print(writeback_delay);
180         var_print(writeback_percent);
181         sysfs_hprint(writeback_rate,
182                      wb ? atomic_long_read(&dc->writeback_rate.rate) << 9 : 0);
183         sysfs_hprint(io_errors,         atomic_read(&dc->io_errors));
184         sysfs_printf(io_error_limit,    "%i", dc->error_limit);
185         sysfs_printf(io_disable,        "%i", dc->io_disable);
186         var_print(writeback_rate_update_seconds);
187         var_print(writeback_rate_i_term_inverse);
188         var_print(writeback_rate_p_term_inverse);
189         var_print(writeback_rate_minimum);
190
191         if (attr == &sysfs_writeback_rate_debug) {
192                 char rate[20];
193                 char dirty[20];
194                 char target[20];
195                 char proportional[20];
196                 char integral[20];
197                 char change[20];
198                 s64 next_io;
199
200                 /*
201                  * Except for dirty and target, other values should
202                  * be 0 if writeback is not running.
203                  */
204                 bch_hprint(rate,
205                            wb ? atomic_long_read(&dc->writeback_rate.rate) << 9
206                               : 0);
207                 bch_hprint(dirty, bcache_dev_sectors_dirty(&dc->disk) << 9);
208                 bch_hprint(target, dc->writeback_rate_target << 9);
209                 bch_hprint(proportional,
210                            wb ? dc->writeback_rate_proportional << 9 : 0);
211                 bch_hprint(integral,
212                            wb ? dc->writeback_rate_integral_scaled << 9 : 0);
213                 bch_hprint(change, wb ? dc->writeback_rate_change << 9 : 0);
214                 next_io = wb ? div64_s64(dc->writeback_rate.next-local_clock(),
215                                          NSEC_PER_MSEC) : 0;
216
217                 return sprintf(buf,
218                                "rate:\t\t%s/sec\n"
219                                "dirty:\t\t%s\n"
220                                "target:\t\t%s\n"
221                                "proportional:\t%s\n"
222                                "integral:\t%s\n"
223                                "change:\t\t%s/sec\n"
224                                "next io:\t%llims\n",
225                                rate, dirty, target, proportional,
226                                integral, change, next_io);
227         }
228
229         sysfs_hprint(dirty_data,
230                      bcache_dev_sectors_dirty(&dc->disk) << 9);
231
232         sysfs_hprint(stripe_size,        ((uint64_t)dc->disk.stripe_size) << 9);
233         var_printf(partial_stripes_expensive,   "%u");
234
235         var_hprint(sequential_cutoff);
236         var_hprint(readahead);
237
238         sysfs_print(running,            atomic_read(&dc->running));
239         sysfs_print(state,              states[BDEV_STATE(&dc->sb)]);
240
241         if (attr == &sysfs_label) {
242                 memcpy(buf, dc->sb.label, SB_LABEL_SIZE);
243                 buf[SB_LABEL_SIZE + 1] = '\0';
244                 strcat(buf, "\n");
245                 return strlen(buf);
246         }
247
248         if (attr == &sysfs_backing_dev_name) {
249                 snprintf(buf, BDEVNAME_SIZE + 1, "%s", dc->backing_dev_name);
250                 strcat(buf, "\n");
251                 return strlen(buf);
252         }
253
254         if (attr == &sysfs_backing_dev_uuid) {
255                 /* convert binary uuid into 36-byte string plus '\0' */
256                 snprintf(buf, 36+1, "%pU", dc->sb.uuid);
257                 strcat(buf, "\n");
258                 return strlen(buf);
259         }
260
261 #undef var
262         return 0;
263 }
264 SHOW_LOCKED(bch_cached_dev)
265
266 STORE(__cached_dev)
267 {
268         struct cached_dev *dc = container_of(kobj, struct cached_dev,
269                                              disk.kobj);
270         ssize_t v;
271         struct cache_set *c;
272         struct kobj_uevent_env *env;
273
274 #define d_strtoul(var)          sysfs_strtoul(var, dc->var)
275 #define d_strtoul_nonzero(var)  sysfs_strtoul_clamp(var, dc->var, 1, INT_MAX)
276 #define d_strtoi_h(var)         sysfs_hatoi(var, dc->var)
277
278         sysfs_strtoul(data_csum,        dc->disk.data_csum);
279         d_strtoul(verify);
280         sysfs_strtoul_bool(bypass_torture_test, dc->bypass_torture_test);
281         sysfs_strtoul_bool(writeback_metadata, dc->writeback_metadata);
282         sysfs_strtoul_bool(writeback_running, dc->writeback_running);
283         sysfs_strtoul_clamp(writeback_delay, dc->writeback_delay, 0, UINT_MAX);
284
285         sysfs_strtoul_clamp(writeback_percent, dc->writeback_percent,
286                             0, bch_cutoff_writeback);
287
288         if (attr == &sysfs_writeback_rate) {
289                 ssize_t ret;
290                 long int v = atomic_long_read(&dc->writeback_rate.rate);
291
292                 ret = strtoul_safe_clamp(buf, v, 1, INT_MAX);
293
294                 if (!ret) {
295                         atomic_long_set(&dc->writeback_rate.rate, v);
296                         ret = size;
297                 }
298
299                 return ret;
300         }
301
302         sysfs_strtoul_clamp(writeback_rate_update_seconds,
303                             dc->writeback_rate_update_seconds,
304                             1, WRITEBACK_RATE_UPDATE_SECS_MAX);
305         sysfs_strtoul_clamp(writeback_rate_i_term_inverse,
306                             dc->writeback_rate_i_term_inverse,
307                             1, UINT_MAX);
308         sysfs_strtoul_clamp(writeback_rate_p_term_inverse,
309                             dc->writeback_rate_p_term_inverse,
310                             1, UINT_MAX);
311         sysfs_strtoul_clamp(writeback_rate_minimum,
312                             dc->writeback_rate_minimum,
313                             1, UINT_MAX);
314
315         sysfs_strtoul_clamp(io_error_limit, dc->error_limit, 0, INT_MAX);
316
317         if (attr == &sysfs_io_disable) {
318                 int v = strtoul_or_return(buf);
319
320                 dc->io_disable = v ? 1 : 0;
321         }
322
323         sysfs_strtoul_clamp(sequential_cutoff,
324                             dc->sequential_cutoff,
325                             0, UINT_MAX);
326         d_strtoi_h(readahead);
327
328         if (attr == &sysfs_clear_stats)
329                 bch_cache_accounting_clear(&dc->accounting);
330
331         if (attr == &sysfs_running &&
332             strtoul_or_return(buf))
333                 bch_cached_dev_run(dc);
334
335         if (attr == &sysfs_cache_mode) {
336                 v = __sysfs_match_string(bch_cache_modes, -1, buf);
337                 if (v < 0)
338                         return v;
339
340                 if ((unsigned int) v != BDEV_CACHE_MODE(&dc->sb)) {
341                         SET_BDEV_CACHE_MODE(&dc->sb, v);
342                         bch_write_bdev_super(dc, NULL);
343                 }
344         }
345
346         if (attr == &sysfs_stop_when_cache_set_failed) {
347                 v = __sysfs_match_string(bch_stop_on_failure_modes, -1, buf);
348                 if (v < 0)
349                         return v;
350
351                 dc->stop_when_cache_set_failed = v;
352         }
353
354         if (attr == &sysfs_label) {
355                 if (size > SB_LABEL_SIZE)
356                         return -EINVAL;
357                 memcpy(dc->sb.label, buf, size);
358                 if (size < SB_LABEL_SIZE)
359                         dc->sb.label[size] = '\0';
360                 if (size && dc->sb.label[size - 1] == '\n')
361                         dc->sb.label[size - 1] = '\0';
362                 bch_write_bdev_super(dc, NULL);
363                 if (dc->disk.c) {
364                         memcpy(dc->disk.c->uuids[dc->disk.id].label,
365                                buf, SB_LABEL_SIZE);
366                         bch_uuid_write(dc->disk.c);
367                 }
368                 env = kzalloc(sizeof(struct kobj_uevent_env), GFP_KERNEL);
369                 if (!env)
370                         return -ENOMEM;
371                 add_uevent_var(env, "DRIVER=bcache");
372                 add_uevent_var(env, "CACHED_UUID=%pU", dc->sb.uuid),
373                 add_uevent_var(env, "CACHED_LABEL=%s", buf);
374                 kobject_uevent_env(&disk_to_dev(dc->disk.disk)->kobj,
375                                    KOBJ_CHANGE,
376                                    env->envp);
377                 kfree(env);
378         }
379
380         if (attr == &sysfs_attach) {
381                 uint8_t         set_uuid[16];
382
383                 if (bch_parse_uuid(buf, set_uuid) < 16)
384                         return -EINVAL;
385
386                 v = -ENOENT;
387                 list_for_each_entry(c, &bch_cache_sets, list) {
388                         v = bch_cached_dev_attach(dc, c, set_uuid);
389                         if (!v)
390                                 return size;
391                 }
392                 if (v == -ENOENT)
393                         pr_err("Can't attach %s: cache set not found", buf);
394                 return v;
395         }
396
397         if (attr == &sysfs_detach && dc->disk.c)
398                 bch_cached_dev_detach(dc);
399
400         if (attr == &sysfs_stop)
401                 bcache_device_stop(&dc->disk);
402
403         return size;
404 }
405
406 STORE(bch_cached_dev)
407 {
408         struct cached_dev *dc = container_of(kobj, struct cached_dev,
409                                              disk.kobj);
410
411         mutex_lock(&bch_register_lock);
412         size = __cached_dev_store(kobj, attr, buf, size);
413
414         if (attr == &sysfs_writeback_running) {
415                 /* dc->writeback_running changed in __cached_dev_store() */
416                 if (IS_ERR_OR_NULL(dc->writeback_thread)) {
417                         /*
418                          * reject setting it to 1 via sysfs if writeback
419                          * kthread is not created yet.
420                          */
421                         if (dc->writeback_running) {
422                                 dc->writeback_running = false;
423                                 pr_err("%s: failed to run non-existent writeback thread",
424                                                 dc->disk.disk->disk_name);
425                         }
426                 } else
427                         /*
428                          * writeback kthread will check if dc->writeback_running
429                          * is true or false.
430                          */
431                         bch_writeback_queue(dc);
432         }
433
434         if (attr == &sysfs_writeback_percent)
435                 if (!test_and_set_bit(BCACHE_DEV_WB_RUNNING, &dc->disk.flags))
436                         schedule_delayed_work(&dc->writeback_rate_update,
437                                       dc->writeback_rate_update_seconds * HZ);
438
439         mutex_unlock(&bch_register_lock);
440         return size;
441 }
442
443 static struct attribute *bch_cached_dev_files[] = {
444         &sysfs_attach,
445         &sysfs_detach,
446         &sysfs_stop,
447 #if 0
448         &sysfs_data_csum,
449 #endif
450         &sysfs_cache_mode,
451         &sysfs_stop_when_cache_set_failed,
452         &sysfs_writeback_metadata,
453         &sysfs_writeback_running,
454         &sysfs_writeback_delay,
455         &sysfs_writeback_percent,
456         &sysfs_writeback_rate,
457         &sysfs_writeback_rate_update_seconds,
458         &sysfs_writeback_rate_i_term_inverse,
459         &sysfs_writeback_rate_p_term_inverse,
460         &sysfs_writeback_rate_minimum,
461         &sysfs_writeback_rate_debug,
462         &sysfs_errors,
463         &sysfs_io_error_limit,
464         &sysfs_io_disable,
465         &sysfs_dirty_data,
466         &sysfs_stripe_size,
467         &sysfs_partial_stripes_expensive,
468         &sysfs_sequential_cutoff,
469         &sysfs_clear_stats,
470         &sysfs_running,
471         &sysfs_state,
472         &sysfs_label,
473         &sysfs_readahead,
474 #ifdef CONFIG_BCACHE_DEBUG
475         &sysfs_verify,
476         &sysfs_bypass_torture_test,
477 #endif
478         &sysfs_backing_dev_name,
479         &sysfs_backing_dev_uuid,
480         NULL
481 };
482 KTYPE(bch_cached_dev);
483
484 SHOW(bch_flash_dev)
485 {
486         struct bcache_device *d = container_of(kobj, struct bcache_device,
487                                                kobj);
488         struct uuid_entry *u = &d->c->uuids[d->id];
489
490         sysfs_printf(data_csum, "%i", d->data_csum);
491         sysfs_hprint(size,      u->sectors << 9);
492
493         if (attr == &sysfs_label) {
494                 memcpy(buf, u->label, SB_LABEL_SIZE);
495                 buf[SB_LABEL_SIZE + 1] = '\0';
496                 strcat(buf, "\n");
497                 return strlen(buf);
498         }
499
500         return 0;
501 }
502
503 STORE(__bch_flash_dev)
504 {
505         struct bcache_device *d = container_of(kobj, struct bcache_device,
506                                                kobj);
507         struct uuid_entry *u = &d->c->uuids[d->id];
508
509         sysfs_strtoul(data_csum,        d->data_csum);
510
511         if (attr == &sysfs_size) {
512                 uint64_t v;
513
514                 strtoi_h_or_return(buf, v);
515
516                 u->sectors = v >> 9;
517                 bch_uuid_write(d->c);
518                 set_capacity(d->disk, u->sectors);
519         }
520
521         if (attr == &sysfs_label) {
522                 memcpy(u->label, buf, SB_LABEL_SIZE);
523                 bch_uuid_write(d->c);
524         }
525
526         if (attr == &sysfs_unregister) {
527                 set_bit(BCACHE_DEV_DETACHING, &d->flags);
528                 bcache_device_stop(d);
529         }
530
531         return size;
532 }
533 STORE_LOCKED(bch_flash_dev)
534
535 static struct attribute *bch_flash_dev_files[] = {
536         &sysfs_unregister,
537 #if 0
538         &sysfs_data_csum,
539 #endif
540         &sysfs_label,
541         &sysfs_size,
542         NULL
543 };
544 KTYPE(bch_flash_dev);
545
546 struct bset_stats_op {
547         struct btree_op op;
548         size_t nodes;
549         struct bset_stats stats;
550 };
551
552 static int bch_btree_bset_stats(struct btree_op *b_op, struct btree *b)
553 {
554         struct bset_stats_op *op = container_of(b_op, struct bset_stats_op, op);
555
556         op->nodes++;
557         bch_btree_keys_stats(&b->keys, &op->stats);
558
559         return MAP_CONTINUE;
560 }
561
562 static int bch_bset_print_stats(struct cache_set *c, char *buf)
563 {
564         struct bset_stats_op op;
565         int ret;
566
567         memset(&op, 0, sizeof(op));
568         bch_btree_op_init(&op.op, -1);
569
570         ret = bch_btree_map_nodes(&op.op, c, &ZERO_KEY, bch_btree_bset_stats);
571         if (ret < 0)
572                 return ret;
573
574         return snprintf(buf, PAGE_SIZE,
575                         "btree nodes:           %zu\n"
576                         "written sets:          %zu\n"
577                         "unwritten sets:                %zu\n"
578                         "written key bytes:     %zu\n"
579                         "unwritten key bytes:   %zu\n"
580                         "floats:                        %zu\n"
581                         "failed:                        %zu\n",
582                         op.nodes,
583                         op.stats.sets_written, op.stats.sets_unwritten,
584                         op.stats.bytes_written, op.stats.bytes_unwritten,
585                         op.stats.floats, op.stats.failed);
586 }
587
588 static unsigned int bch_root_usage(struct cache_set *c)
589 {
590         unsigned int bytes = 0;
591         struct bkey *k;
592         struct btree *b;
593         struct btree_iter iter;
594
595         goto lock_root;
596
597         do {
598                 rw_unlock(false, b);
599 lock_root:
600                 b = c->root;
601                 rw_lock(false, b, b->level);
602         } while (b != c->root);
603
604         for_each_key_filter(&b->keys, k, &iter, bch_ptr_bad)
605                 bytes += bkey_bytes(k);
606
607         rw_unlock(false, b);
608
609         return (bytes * 100) / btree_bytes(c);
610 }
611
612 static size_t bch_cache_size(struct cache_set *c)
613 {
614         size_t ret = 0;
615         struct btree *b;
616
617         mutex_lock(&c->bucket_lock);
618         list_for_each_entry(b, &c->btree_cache, list)
619                 ret += 1 << (b->keys.page_order + PAGE_SHIFT);
620
621         mutex_unlock(&c->bucket_lock);
622         return ret;
623 }
624
625 static unsigned int bch_cache_max_chain(struct cache_set *c)
626 {
627         unsigned int ret = 0;
628         struct hlist_head *h;
629
630         mutex_lock(&c->bucket_lock);
631
632         for (h = c->bucket_hash;
633              h < c->bucket_hash + (1 << BUCKET_HASH_BITS);
634              h++) {
635                 unsigned int i = 0;
636                 struct hlist_node *p;
637
638                 hlist_for_each(p, h)
639                         i++;
640
641                 ret = max(ret, i);
642         }
643
644         mutex_unlock(&c->bucket_lock);
645         return ret;
646 }
647
648 static unsigned int bch_btree_used(struct cache_set *c)
649 {
650         return div64_u64(c->gc_stats.key_bytes * 100,
651                          (c->gc_stats.nodes ?: 1) * btree_bytes(c));
652 }
653
654 static unsigned int bch_average_key_size(struct cache_set *c)
655 {
656         return c->gc_stats.nkeys
657                 ? div64_u64(c->gc_stats.data, c->gc_stats.nkeys)
658                 : 0;
659 }
660
661 SHOW(__bch_cache_set)
662 {
663         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
664
665         sysfs_print(synchronous,                CACHE_SYNC(&c->sb));
666         sysfs_print(journal_delay_ms,           c->journal_delay_ms);
667         sysfs_hprint(bucket_size,               bucket_bytes(c));
668         sysfs_hprint(block_size,                block_bytes(c));
669         sysfs_print(tree_depth,                 c->root->level);
670         sysfs_print(root_usage_percent,         bch_root_usage(c));
671
672         sysfs_hprint(btree_cache_size,          bch_cache_size(c));
673         sysfs_print(btree_cache_max_chain,      bch_cache_max_chain(c));
674         sysfs_print(cache_available_percent,    100 - c->gc_stats.in_use);
675
676         sysfs_print_time_stats(&c->btree_gc_time,       btree_gc, sec, ms);
677         sysfs_print_time_stats(&c->btree_split_time,    btree_split, sec, us);
678         sysfs_print_time_stats(&c->sort.time,           btree_sort, ms, us);
679         sysfs_print_time_stats(&c->btree_read_time,     btree_read, ms, us);
680
681         sysfs_print(btree_used_percent, bch_btree_used(c));
682         sysfs_print(btree_nodes,        c->gc_stats.nodes);
683         sysfs_hprint(average_key_size,  bch_average_key_size(c));
684
685         sysfs_print(cache_read_races,
686                     atomic_long_read(&c->cache_read_races));
687
688         sysfs_print(reclaim,
689                     atomic_long_read(&c->reclaim));
690
691         sysfs_print(flush_write,
692                     atomic_long_read(&c->flush_write));
693
694         sysfs_print(retry_flush_write,
695                     atomic_long_read(&c->retry_flush_write));
696
697         sysfs_print(writeback_keys_done,
698                     atomic_long_read(&c->writeback_keys_done));
699         sysfs_print(writeback_keys_failed,
700                     atomic_long_read(&c->writeback_keys_failed));
701
702         if (attr == &sysfs_errors)
703                 return bch_snprint_string_list(buf, PAGE_SIZE, error_actions,
704                                                c->on_error);
705
706         /* See count_io_errors for why 88 */
707         sysfs_print(io_error_halflife,  c->error_decay * 88);
708         sysfs_print(io_error_limit,     c->error_limit);
709
710         sysfs_hprint(congested,
711                      ((uint64_t) bch_get_congested(c)) << 9);
712         sysfs_print(congested_read_threshold_us,
713                     c->congested_read_threshold_us);
714         sysfs_print(congested_write_threshold_us,
715                     c->congested_write_threshold_us);
716
717         sysfs_print(cutoff_writeback, bch_cutoff_writeback);
718         sysfs_print(cutoff_writeback_sync, bch_cutoff_writeback_sync);
719
720         sysfs_print(active_journal_entries,     fifo_used(&c->journal.pin));
721         sysfs_printf(verify,                    "%i", c->verify);
722         sysfs_printf(key_merging_disabled,      "%i", c->key_merging_disabled);
723         sysfs_printf(expensive_debug_checks,
724                      "%i", c->expensive_debug_checks);
725         sysfs_printf(gc_always_rewrite,         "%i", c->gc_always_rewrite);
726         sysfs_printf(btree_shrinker_disabled,   "%i", c->shrinker_disabled);
727         sysfs_printf(copy_gc_enabled,           "%i", c->copy_gc_enabled);
728         sysfs_printf(gc_after_writeback,        "%i", c->gc_after_writeback);
729         sysfs_printf(io_disable,                "%i",
730                      test_bit(CACHE_SET_IO_DISABLE, &c->flags));
731
732         if (attr == &sysfs_bset_tree_stats)
733                 return bch_bset_print_stats(c, buf);
734
735         return 0;
736 }
737 SHOW_LOCKED(bch_cache_set)
738
739 STORE(__bch_cache_set)
740 {
741         struct cache_set *c = container_of(kobj, struct cache_set, kobj);
742         ssize_t v;
743
744         if (attr == &sysfs_unregister)
745                 bch_cache_set_unregister(c);
746
747         if (attr == &sysfs_stop)
748                 bch_cache_set_stop(c);
749
750         if (attr == &sysfs_synchronous) {
751                 bool sync = strtoul_or_return(buf);
752
753                 if (sync != CACHE_SYNC(&c->sb)) {
754                         SET_CACHE_SYNC(&c->sb, sync);
755                         bcache_write_super(c);
756                 }
757         }
758
759         if (attr == &sysfs_flash_vol_create) {
760                 int r;
761                 uint64_t v;
762
763                 strtoi_h_or_return(buf, v);
764
765                 r = bch_flash_dev_create(c, v);
766                 if (r)
767                         return r;
768         }
769
770         if (attr == &sysfs_clear_stats) {
771                 atomic_long_set(&c->writeback_keys_done,        0);
772                 atomic_long_set(&c->writeback_keys_failed,      0);
773
774                 memset(&c->gc_stats, 0, sizeof(struct gc_stat));
775                 bch_cache_accounting_clear(&c->accounting);
776         }
777
778         if (attr == &sysfs_trigger_gc)
779                 force_wake_up_gc(c);
780
781         if (attr == &sysfs_prune_cache) {
782                 struct shrink_control sc;
783
784                 sc.gfp_mask = GFP_KERNEL;
785                 sc.nr_to_scan = strtoul_or_return(buf);
786                 c->shrink.scan_objects(&c->shrink, &sc);
787         }
788
789         sysfs_strtoul_clamp(congested_read_threshold_us,
790                             c->congested_read_threshold_us,
791                             0, UINT_MAX);
792         sysfs_strtoul_clamp(congested_write_threshold_us,
793                             c->congested_write_threshold_us,
794                             0, UINT_MAX);
795
796         if (attr == &sysfs_errors) {
797                 v = __sysfs_match_string(error_actions, -1, buf);
798                 if (v < 0)
799                         return v;
800
801                 c->on_error = v;
802         }
803
804         sysfs_strtoul_clamp(io_error_limit, c->error_limit, 0, UINT_MAX);
805
806         /* See count_io_errors() for why 88 */
807         if (attr == &sysfs_io_error_halflife) {
808                 unsigned long v = 0;
809                 ssize_t ret;
810
811                 ret = strtoul_safe_clamp(buf, v, 0, UINT_MAX);
812                 if (!ret) {
813                         c->error_decay = v / 88;
814                         return size;
815                 }
816                 return ret;
817         }
818
819         if (attr == &sysfs_io_disable) {
820                 v = strtoul_or_return(buf);
821                 if (v) {
822                         if (test_and_set_bit(CACHE_SET_IO_DISABLE,
823                                              &c->flags))
824                                 pr_warn("CACHE_SET_IO_DISABLE already set");
825                 } else {
826                         if (!test_and_clear_bit(CACHE_SET_IO_DISABLE,
827                                                 &c->flags))
828                                 pr_warn("CACHE_SET_IO_DISABLE already cleared");
829                 }
830         }
831
832         sysfs_strtoul_clamp(journal_delay_ms,
833                             c->journal_delay_ms,
834                             0, USHRT_MAX);
835         sysfs_strtoul_bool(verify,              c->verify);
836         sysfs_strtoul_bool(key_merging_disabled, c->key_merging_disabled);
837         sysfs_strtoul(expensive_debug_checks,   c->expensive_debug_checks);
838         sysfs_strtoul_bool(gc_always_rewrite,   c->gc_always_rewrite);
839         sysfs_strtoul_bool(btree_shrinker_disabled, c->shrinker_disabled);
840         sysfs_strtoul_bool(copy_gc_enabled,     c->copy_gc_enabled);
841         /*
842          * write gc_after_writeback here may overwrite an already set
843          * BCH_DO_AUTO_GC, it doesn't matter because this flag will be
844          * set in next chance.
845          */
846         sysfs_strtoul_clamp(gc_after_writeback, c->gc_after_writeback, 0, 1);
847
848         return size;
849 }
850 STORE_LOCKED(bch_cache_set)
851
852 SHOW(bch_cache_set_internal)
853 {
854         struct cache_set *c = container_of(kobj, struct cache_set, internal);
855
856         return bch_cache_set_show(&c->kobj, attr, buf);
857 }
858
859 STORE(bch_cache_set_internal)
860 {
861         struct cache_set *c = container_of(kobj, struct cache_set, internal);
862
863         return bch_cache_set_store(&c->kobj, attr, buf, size);
864 }
865
866 static void bch_cache_set_internal_release(struct kobject *k)
867 {
868 }
869
870 static struct attribute *bch_cache_set_files[] = {
871         &sysfs_unregister,
872         &sysfs_stop,
873         &sysfs_synchronous,
874         &sysfs_journal_delay_ms,
875         &sysfs_flash_vol_create,
876
877         &sysfs_bucket_size,
878         &sysfs_block_size,
879         &sysfs_tree_depth,
880         &sysfs_root_usage_percent,
881         &sysfs_btree_cache_size,
882         &sysfs_cache_available_percent,
883
884         &sysfs_average_key_size,
885
886         &sysfs_errors,
887         &sysfs_io_error_limit,
888         &sysfs_io_error_halflife,
889         &sysfs_congested,
890         &sysfs_congested_read_threshold_us,
891         &sysfs_congested_write_threshold_us,
892         &sysfs_clear_stats,
893         NULL
894 };
895 KTYPE(bch_cache_set);
896
897 static struct attribute *bch_cache_set_internal_files[] = {
898         &sysfs_active_journal_entries,
899
900         sysfs_time_stats_attribute_list(btree_gc, sec, ms)
901         sysfs_time_stats_attribute_list(btree_split, sec, us)
902         sysfs_time_stats_attribute_list(btree_sort, ms, us)
903         sysfs_time_stats_attribute_list(btree_read, ms, us)
904
905         &sysfs_btree_nodes,
906         &sysfs_btree_used_percent,
907         &sysfs_btree_cache_max_chain,
908
909         &sysfs_bset_tree_stats,
910         &sysfs_cache_read_races,
911         &sysfs_reclaim,
912         &sysfs_flush_write,
913         &sysfs_retry_flush_write,
914         &sysfs_writeback_keys_done,
915         &sysfs_writeback_keys_failed,
916
917         &sysfs_trigger_gc,
918         &sysfs_prune_cache,
919 #ifdef CONFIG_BCACHE_DEBUG
920         &sysfs_verify,
921         &sysfs_key_merging_disabled,
922         &sysfs_expensive_debug_checks,
923 #endif
924         &sysfs_gc_always_rewrite,
925         &sysfs_btree_shrinker_disabled,
926         &sysfs_copy_gc_enabled,
927         &sysfs_gc_after_writeback,
928         &sysfs_io_disable,
929         &sysfs_cutoff_writeback,
930         &sysfs_cutoff_writeback_sync,
931         NULL
932 };
933 KTYPE(bch_cache_set_internal);
934
935 static int __bch_cache_cmp(const void *l, const void *r)
936 {
937         return *((uint16_t *)r) - *((uint16_t *)l);
938 }
939
940 SHOW(__bch_cache)
941 {
942         struct cache *ca = container_of(kobj, struct cache, kobj);
943
944         sysfs_hprint(bucket_size,       bucket_bytes(ca));
945         sysfs_hprint(block_size,        block_bytes(ca));
946         sysfs_print(nbuckets,           ca->sb.nbuckets);
947         sysfs_print(discard,            ca->discard);
948         sysfs_hprint(written, atomic_long_read(&ca->sectors_written) << 9);
949         sysfs_hprint(btree_written,
950                      atomic_long_read(&ca->btree_sectors_written) << 9);
951         sysfs_hprint(metadata_written,
952                      (atomic_long_read(&ca->meta_sectors_written) +
953                       atomic_long_read(&ca->btree_sectors_written)) << 9);
954
955         sysfs_print(io_errors,
956                     atomic_read(&ca->io_errors) >> IO_ERROR_SHIFT);
957
958         if (attr == &sysfs_cache_replacement_policy)
959                 return bch_snprint_string_list(buf, PAGE_SIZE,
960                                                cache_replacement_policies,
961                                                CACHE_REPLACEMENT(&ca->sb));
962
963         if (attr == &sysfs_priority_stats) {
964                 struct bucket *b;
965                 size_t n = ca->sb.nbuckets, i;
966                 size_t unused = 0, available = 0, dirty = 0, meta = 0;
967                 uint64_t sum = 0;
968                 /* Compute 31 quantiles */
969                 uint16_t q[31], *p, *cached;
970                 ssize_t ret;
971
972                 cached = p = vmalloc(array_size(sizeof(uint16_t),
973                                                 ca->sb.nbuckets));
974                 if (!p)
975                         return -ENOMEM;
976
977                 mutex_lock(&ca->set->bucket_lock);
978                 for_each_bucket(b, ca) {
979                         if (!GC_SECTORS_USED(b))
980                                 unused++;
981                         if (GC_MARK(b) == GC_MARK_RECLAIMABLE)
982                                 available++;
983                         if (GC_MARK(b) == GC_MARK_DIRTY)
984                                 dirty++;
985                         if (GC_MARK(b) == GC_MARK_METADATA)
986                                 meta++;
987                 }
988
989                 for (i = ca->sb.first_bucket; i < n; i++)
990                         p[i] = ca->buckets[i].prio;
991                 mutex_unlock(&ca->set->bucket_lock);
992
993                 sort(p, n, sizeof(uint16_t), __bch_cache_cmp, NULL);
994
995                 while (n &&
996                        !cached[n - 1])
997                         --n;
998
999                 while (cached < p + n &&
1000                        *cached == BTREE_PRIO)
1001                         cached++, n--;
1002
1003                 for (i = 0; i < n; i++)
1004                         sum += INITIAL_PRIO - cached[i];
1005
1006                 if (n)
1007                         do_div(sum, n);
1008
1009                 for (i = 0; i < ARRAY_SIZE(q); i++)
1010                         q[i] = INITIAL_PRIO - cached[n * (i + 1) /
1011                                 (ARRAY_SIZE(q) + 1)];
1012
1013                 vfree(p);
1014
1015                 ret = scnprintf(buf, PAGE_SIZE,
1016                                 "Unused:                %zu%%\n"
1017                                 "Clean:         %zu%%\n"
1018                                 "Dirty:         %zu%%\n"
1019                                 "Metadata:      %zu%%\n"
1020                                 "Average:       %llu\n"
1021                                 "Sectors per Q: %zu\n"
1022                                 "Quantiles:     [",
1023                                 unused * 100 / (size_t) ca->sb.nbuckets,
1024                                 available * 100 / (size_t) ca->sb.nbuckets,
1025                                 dirty * 100 / (size_t) ca->sb.nbuckets,
1026                                 meta * 100 / (size_t) ca->sb.nbuckets, sum,
1027                                 n * ca->sb.bucket_size / (ARRAY_SIZE(q) + 1));
1028
1029                 for (i = 0; i < ARRAY_SIZE(q); i++)
1030                         ret += scnprintf(buf + ret, PAGE_SIZE - ret,
1031                                          "%u ", q[i]);
1032                 ret--;
1033
1034                 ret += scnprintf(buf + ret, PAGE_SIZE - ret, "]\n");
1035
1036                 return ret;
1037         }
1038
1039         return 0;
1040 }
1041 SHOW_LOCKED(bch_cache)
1042
1043 STORE(__bch_cache)
1044 {
1045         struct cache *ca = container_of(kobj, struct cache, kobj);
1046         ssize_t v;
1047
1048         if (attr == &sysfs_discard) {
1049                 bool v = strtoul_or_return(buf);
1050
1051                 if (blk_queue_discard(bdev_get_queue(ca->bdev)))
1052                         ca->discard = v;
1053
1054                 if (v != CACHE_DISCARD(&ca->sb)) {
1055                         SET_CACHE_DISCARD(&ca->sb, v);
1056                         bcache_write_super(ca->set);
1057                 }
1058         }
1059
1060         if (attr == &sysfs_cache_replacement_policy) {
1061                 v = __sysfs_match_string(cache_replacement_policies, -1, buf);
1062                 if (v < 0)
1063                         return v;
1064
1065                 if ((unsigned int) v != CACHE_REPLACEMENT(&ca->sb)) {
1066                         mutex_lock(&ca->set->bucket_lock);
1067                         SET_CACHE_REPLACEMENT(&ca->sb, v);
1068                         mutex_unlock(&ca->set->bucket_lock);
1069
1070                         bcache_write_super(ca->set);
1071                 }
1072         }
1073
1074         if (attr == &sysfs_clear_stats) {
1075                 atomic_long_set(&ca->sectors_written, 0);
1076                 atomic_long_set(&ca->btree_sectors_written, 0);
1077                 atomic_long_set(&ca->meta_sectors_written, 0);
1078                 atomic_set(&ca->io_count, 0);
1079                 atomic_set(&ca->io_errors, 0);
1080         }
1081
1082         return size;
1083 }
1084 STORE_LOCKED(bch_cache)
1085
1086 static struct attribute *bch_cache_files[] = {
1087         &sysfs_bucket_size,
1088         &sysfs_block_size,
1089         &sysfs_nbuckets,
1090         &sysfs_priority_stats,
1091         &sysfs_discard,
1092         &sysfs_written,
1093         &sysfs_btree_written,
1094         &sysfs_metadata_written,
1095         &sysfs_io_errors,
1096         &sysfs_clear_stats,
1097         &sysfs_cache_replacement_policy,
1098         NULL
1099 };
1100 KTYPE(bch_cache);