1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/kernel.h>
3 #include <linux/module.h>
4 #include <linux/backing-dev.h>
6 #include <linux/blkdev.h>
8 #include <linux/init.h>
9 #include <linux/slab.h>
10 #include <linux/workqueue.h>
11 #include <linux/smp.h>
13 #include <linux/blk-mq.h>
15 #include "blk-mq-tag.h"
17 static void blk_mq_sysfs_release(struct kobject *kobj)
19 struct blk_mq_ctxs *ctxs = container_of(kobj, struct blk_mq_ctxs, kobj);
21 free_percpu(ctxs->queue_ctx);
25 static void blk_mq_ctx_sysfs_release(struct kobject *kobj)
27 struct blk_mq_ctx *ctx = container_of(kobj, struct blk_mq_ctx, kobj);
29 /* ctx->ctxs won't be released until all ctx are freed */
30 kobject_put(&ctx->ctxs->kobj);
33 static void blk_mq_hw_sysfs_release(struct kobject *kobj)
35 struct blk_mq_hw_ctx *hctx = container_of(kobj, struct blk_mq_hw_ctx,
37 free_cpumask_var(hctx->cpumask);
42 struct blk_mq_ctx_sysfs_entry {
43 struct attribute attr;
44 ssize_t (*show)(struct blk_mq_ctx *, char *);
45 ssize_t (*store)(struct blk_mq_ctx *, const char *, size_t);
48 struct blk_mq_hw_ctx_sysfs_entry {
49 struct attribute attr;
50 ssize_t (*show)(struct blk_mq_hw_ctx *, char *);
51 ssize_t (*store)(struct blk_mq_hw_ctx *, const char *, size_t);
54 static ssize_t blk_mq_sysfs_show(struct kobject *kobj, struct attribute *attr,
57 struct blk_mq_ctx_sysfs_entry *entry;
58 struct blk_mq_ctx *ctx;
59 struct request_queue *q;
62 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
63 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
70 mutex_lock(&q->sysfs_lock);
71 if (!blk_queue_dying(q))
72 res = entry->show(ctx, page);
73 mutex_unlock(&q->sysfs_lock);
77 static ssize_t blk_mq_sysfs_store(struct kobject *kobj, struct attribute *attr,
78 const char *page, size_t length)
80 struct blk_mq_ctx_sysfs_entry *entry;
81 struct blk_mq_ctx *ctx;
82 struct request_queue *q;
85 entry = container_of(attr, struct blk_mq_ctx_sysfs_entry, attr);
86 ctx = container_of(kobj, struct blk_mq_ctx, kobj);
93 mutex_lock(&q->sysfs_lock);
94 if (!blk_queue_dying(q))
95 res = entry->store(ctx, page, length);
96 mutex_unlock(&q->sysfs_lock);
100 static ssize_t blk_mq_hw_sysfs_show(struct kobject *kobj,
101 struct attribute *attr, char *page)
103 struct blk_mq_hw_ctx_sysfs_entry *entry;
104 struct blk_mq_hw_ctx *hctx;
105 struct request_queue *q;
108 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
109 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
116 mutex_lock(&q->sysfs_lock);
117 if (!blk_queue_dying(q))
118 res = entry->show(hctx, page);
119 mutex_unlock(&q->sysfs_lock);
123 static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
124 struct attribute *attr, const char *page,
127 struct blk_mq_hw_ctx_sysfs_entry *entry;
128 struct blk_mq_hw_ctx *hctx;
129 struct request_queue *q;
132 entry = container_of(attr, struct blk_mq_hw_ctx_sysfs_entry, attr);
133 hctx = container_of(kobj, struct blk_mq_hw_ctx, kobj);
140 mutex_lock(&q->sysfs_lock);
141 if (!blk_queue_dying(q))
142 res = entry->store(hctx, page, length);
143 mutex_unlock(&q->sysfs_lock);
147 static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
150 return sprintf(page, "%u\n", hctx->tags->nr_tags);
153 static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
156 return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
159 static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
161 unsigned int i, first = 1;
164 for_each_cpu(i, hctx->cpumask) {
166 ret += sprintf(ret + page, "%u", i);
168 ret += sprintf(ret + page, ", %u", i);
173 ret += sprintf(ret + page, "\n");
177 static struct attribute *default_ctx_attrs[] = {
181 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
182 .attr = {.name = "nr_tags", .mode = 0444 },
183 .show = blk_mq_hw_sysfs_nr_tags_show,
185 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
186 .attr = {.name = "nr_reserved_tags", .mode = 0444 },
187 .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
189 static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
190 .attr = {.name = "cpu_list", .mode = 0444 },
191 .show = blk_mq_hw_sysfs_cpus_show,
194 static struct attribute *default_hw_ctx_attrs[] = {
195 &blk_mq_hw_sysfs_nr_tags.attr,
196 &blk_mq_hw_sysfs_nr_reserved_tags.attr,
197 &blk_mq_hw_sysfs_cpus.attr,
201 static const struct sysfs_ops blk_mq_sysfs_ops = {
202 .show = blk_mq_sysfs_show,
203 .store = blk_mq_sysfs_store,
206 static const struct sysfs_ops blk_mq_hw_sysfs_ops = {
207 .show = blk_mq_hw_sysfs_show,
208 .store = blk_mq_hw_sysfs_store,
211 static struct kobj_type blk_mq_ktype = {
212 .sysfs_ops = &blk_mq_sysfs_ops,
213 .release = blk_mq_sysfs_release,
216 static struct kobj_type blk_mq_ctx_ktype = {
217 .sysfs_ops = &blk_mq_sysfs_ops,
218 .default_attrs = default_ctx_attrs,
219 .release = blk_mq_ctx_sysfs_release,
222 static struct kobj_type blk_mq_hw_ktype = {
223 .sysfs_ops = &blk_mq_hw_sysfs_ops,
224 .default_attrs = default_hw_ctx_attrs,
225 .release = blk_mq_hw_sysfs_release,
228 static void blk_mq_unregister_hctx(struct blk_mq_hw_ctx *hctx)
230 struct blk_mq_ctx *ctx;
236 hctx_for_each_ctx(hctx, ctx, i)
237 kobject_del(&ctx->kobj);
239 kobject_del(&hctx->kobj);
242 static int blk_mq_register_hctx(struct blk_mq_hw_ctx *hctx)
244 struct request_queue *q = hctx->queue;
245 struct blk_mq_ctx *ctx;
251 ret = kobject_add(&hctx->kobj, q->mq_kobj, "%u", hctx->queue_num);
255 hctx_for_each_ctx(hctx, ctx, i) {
256 ret = kobject_add(&ctx->kobj, &hctx->kobj, "cpu%u", ctx->cpu);
264 void blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
266 struct blk_mq_hw_ctx *hctx;
269 lockdep_assert_held(&q->sysfs_lock);
271 queue_for_each_hw_ctx(q, hctx, i)
272 blk_mq_unregister_hctx(hctx);
274 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
275 kobject_del(q->mq_kobj);
276 kobject_put(&dev->kobj);
278 q->mq_sysfs_init_done = false;
281 void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx)
283 kobject_init(&hctx->kobj, &blk_mq_hw_ktype);
286 void blk_mq_sysfs_deinit(struct request_queue *q)
288 struct blk_mq_ctx *ctx;
291 for_each_possible_cpu(cpu) {
292 ctx = per_cpu_ptr(q->queue_ctx, cpu);
293 kobject_put(&ctx->kobj);
295 kobject_put(q->mq_kobj);
298 void blk_mq_sysfs_init(struct request_queue *q)
300 struct blk_mq_ctx *ctx;
303 kobject_init(q->mq_kobj, &blk_mq_ktype);
305 for_each_possible_cpu(cpu) {
306 ctx = per_cpu_ptr(q->queue_ctx, cpu);
308 kobject_get(q->mq_kobj);
309 kobject_init(&ctx->kobj, &blk_mq_ctx_ktype);
313 int __blk_mq_register_dev(struct device *dev, struct request_queue *q)
315 struct blk_mq_hw_ctx *hctx;
318 WARN_ON_ONCE(!q->kobj.parent);
319 lockdep_assert_held(&q->sysfs_lock);
321 ret = kobject_add(q->mq_kobj, kobject_get(&dev->kobj), "%s", "mq");
325 kobject_uevent(q->mq_kobj, KOBJ_ADD);
327 queue_for_each_hw_ctx(q, hctx, i) {
328 ret = blk_mq_register_hctx(hctx);
333 q->mq_sysfs_init_done = true;
340 blk_mq_unregister_hctx(q->queue_hw_ctx[i]);
342 kobject_uevent(q->mq_kobj, KOBJ_REMOVE);
343 kobject_del(q->mq_kobj);
344 kobject_put(&dev->kobj);
348 int blk_mq_register_dev(struct device *dev, struct request_queue *q)
352 mutex_lock(&q->sysfs_lock);
353 ret = __blk_mq_register_dev(dev, q);
354 mutex_unlock(&q->sysfs_lock);
359 void blk_mq_sysfs_unregister(struct request_queue *q)
361 struct blk_mq_hw_ctx *hctx;
364 mutex_lock(&q->sysfs_lock);
365 if (!q->mq_sysfs_init_done)
368 queue_for_each_hw_ctx(q, hctx, i)
369 blk_mq_unregister_hctx(hctx);
372 mutex_unlock(&q->sysfs_lock);
375 int blk_mq_sysfs_register(struct request_queue *q)
377 struct blk_mq_hw_ctx *hctx;
380 mutex_lock(&q->sysfs_lock);
381 if (!q->mq_sysfs_init_done)
384 queue_for_each_hw_ctx(q, hctx, i) {
385 ret = blk_mq_register_hctx(hctx);
391 mutex_unlock(&q->sysfs_lock);