drivers: dma-coherent: Account dma_pfn_offset when used with device tree
[sfrench/cifs-2.6.git] / block / blk-mq-tag.c
1 /*
2  * Tag allocation using scalable bitmaps. Uses active queue tracking to support
3  * fairer distribution of tags between multiple submitters when a shared tag map
4  * is used.
5  *
6  * Copyright (C) 2013-2014 Jens Axboe
7  */
8 #include <linux/kernel.h>
9 #include <linux/module.h>
10
11 #include <linux/blk-mq.h>
12 #include "blk.h"
13 #include "blk-mq.h"
14 #include "blk-mq-tag.h"
15
16 bool blk_mq_has_free_tags(struct blk_mq_tags *tags)
17 {
18         if (!tags)
19                 return true;
20
21         return sbitmap_any_bit_clear(&tags->bitmap_tags.sb);
22 }
23
24 /*
25  * If a previously inactive queue goes active, bump the active user count.
26  */
27 bool __blk_mq_tag_busy(struct blk_mq_hw_ctx *hctx)
28 {
29         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state) &&
30             !test_and_set_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
31                 atomic_inc(&hctx->tags->active_queues);
32
33         return true;
34 }
35
36 /*
37  * Wakeup all potentially sleeping on tags
38  */
39 void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool include_reserve)
40 {
41         sbitmap_queue_wake_all(&tags->bitmap_tags);
42         if (include_reserve)
43                 sbitmap_queue_wake_all(&tags->breserved_tags);
44 }
45
46 /*
47  * If a previously busy queue goes inactive, potential waiters could now
48  * be allowed to queue. Wake them up and check.
49  */
50 void __blk_mq_tag_idle(struct blk_mq_hw_ctx *hctx)
51 {
52         struct blk_mq_tags *tags = hctx->tags;
53
54         if (!test_and_clear_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
55                 return;
56
57         atomic_dec(&tags->active_queues);
58
59         blk_mq_tag_wakeup_all(tags, false);
60 }
61
62 /*
63  * For shared tag users, we track the number of currently active users
64  * and attempt to provide a fair share of the tag depth for each of them.
65  */
66 static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
67                                   struct sbitmap_queue *bt)
68 {
69         unsigned int depth, users;
70
71         if (!hctx || !(hctx->flags & BLK_MQ_F_TAG_SHARED))
72                 return true;
73         if (!test_bit(BLK_MQ_S_TAG_ACTIVE, &hctx->state))
74                 return true;
75
76         /*
77          * Don't try dividing an ant
78          */
79         if (bt->sb.depth == 1)
80                 return true;
81
82         users = atomic_read(&hctx->tags->active_queues);
83         if (!users)
84                 return true;
85
86         /*
87          * Allow at least some tags
88          */
89         depth = max((bt->sb.depth + users - 1) / users, 4U);
90         return atomic_read(&hctx->nr_active) < depth;
91 }
92
93 static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
94                             struct sbitmap_queue *bt)
95 {
96         if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
97             !hctx_may_queue(data->hctx, bt))
98                 return -1;
99         if (data->shallow_depth)
100                 return __sbitmap_queue_get_shallow(bt, data->shallow_depth);
101         else
102                 return __sbitmap_queue_get(bt);
103 }
104
105 unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
106 {
107         struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
108         struct sbitmap_queue *bt;
109         struct sbq_wait_state *ws;
110         DEFINE_WAIT(wait);
111         unsigned int tag_offset;
112         bool drop_ctx;
113         int tag;
114
115         if (data->flags & BLK_MQ_REQ_RESERVED) {
116                 if (unlikely(!tags->nr_reserved_tags)) {
117                         WARN_ON_ONCE(1);
118                         return BLK_MQ_TAG_FAIL;
119                 }
120                 bt = &tags->breserved_tags;
121                 tag_offset = 0;
122         } else {
123                 bt = &tags->bitmap_tags;
124                 tag_offset = tags->nr_reserved_tags;
125         }
126
127         tag = __blk_mq_get_tag(data, bt);
128         if (tag != -1)
129                 goto found_tag;
130
131         if (data->flags & BLK_MQ_REQ_NOWAIT)
132                 return BLK_MQ_TAG_FAIL;
133
134         ws = bt_wait_ptr(bt, data->hctx);
135         drop_ctx = data->ctx == NULL;
136         do {
137                 prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
138
139                 tag = __blk_mq_get_tag(data, bt);
140                 if (tag != -1)
141                         break;
142
143                 /*
144                  * We're out of tags on this hardware queue, kick any
145                  * pending IO submits before going to sleep waiting for
146                  * some to complete.
147                  */
148                 blk_mq_run_hw_queue(data->hctx, false);
149
150                 /*
151                  * Retry tag allocation after running the hardware queue,
152                  * as running the queue may also have found completions.
153                  */
154                 tag = __blk_mq_get_tag(data, bt);
155                 if (tag != -1)
156                         break;
157
158                 if (data->ctx)
159                         blk_mq_put_ctx(data->ctx);
160
161                 io_schedule();
162
163                 data->ctx = blk_mq_get_ctx(data->q);
164                 data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
165                 tags = blk_mq_tags_from_data(data);
166                 if (data->flags & BLK_MQ_REQ_RESERVED)
167                         bt = &tags->breserved_tags;
168                 else
169                         bt = &tags->bitmap_tags;
170
171                 finish_wait(&ws->wait, &wait);
172                 ws = bt_wait_ptr(bt, data->hctx);
173         } while (1);
174
175         if (drop_ctx && data->ctx)
176                 blk_mq_put_ctx(data->ctx);
177
178         finish_wait(&ws->wait, &wait);
179
180 found_tag:
181         return tag + tag_offset;
182 }
183
184 void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
185                     struct blk_mq_ctx *ctx, unsigned int tag)
186 {
187         if (!blk_mq_tag_is_reserved(tags, tag)) {
188                 const int real_tag = tag - tags->nr_reserved_tags;
189
190                 BUG_ON(real_tag >= tags->nr_tags);
191                 sbitmap_queue_clear(&tags->bitmap_tags, real_tag, ctx->cpu);
192         } else {
193                 BUG_ON(tag >= tags->nr_reserved_tags);
194                 sbitmap_queue_clear(&tags->breserved_tags, tag, ctx->cpu);
195         }
196 }
197
198 struct bt_iter_data {
199         struct blk_mq_hw_ctx *hctx;
200         busy_iter_fn *fn;
201         void *data;
202         bool reserved;
203 };
204
205 static bool bt_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
206 {
207         struct bt_iter_data *iter_data = data;
208         struct blk_mq_hw_ctx *hctx = iter_data->hctx;
209         struct blk_mq_tags *tags = hctx->tags;
210         bool reserved = iter_data->reserved;
211         struct request *rq;
212
213         if (!reserved)
214                 bitnr += tags->nr_reserved_tags;
215         rq = tags->rqs[bitnr];
216
217         if (rq->q == hctx->queue)
218                 iter_data->fn(hctx, rq, iter_data->data, reserved);
219         return true;
220 }
221
222 static void bt_for_each(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt,
223                         busy_iter_fn *fn, void *data, bool reserved)
224 {
225         struct bt_iter_data iter_data = {
226                 .hctx = hctx,
227                 .fn = fn,
228                 .data = data,
229                 .reserved = reserved,
230         };
231
232         sbitmap_for_each_set(&bt->sb, bt_iter, &iter_data);
233 }
234
235 struct bt_tags_iter_data {
236         struct blk_mq_tags *tags;
237         busy_tag_iter_fn *fn;
238         void *data;
239         bool reserved;
240 };
241
242 static bool bt_tags_iter(struct sbitmap *bitmap, unsigned int bitnr, void *data)
243 {
244         struct bt_tags_iter_data *iter_data = data;
245         struct blk_mq_tags *tags = iter_data->tags;
246         bool reserved = iter_data->reserved;
247         struct request *rq;
248
249         if (!reserved)
250                 bitnr += tags->nr_reserved_tags;
251         rq = tags->rqs[bitnr];
252
253         iter_data->fn(rq, iter_data->data, reserved);
254         return true;
255 }
256
257 static void bt_tags_for_each(struct blk_mq_tags *tags, struct sbitmap_queue *bt,
258                              busy_tag_iter_fn *fn, void *data, bool reserved)
259 {
260         struct bt_tags_iter_data iter_data = {
261                 .tags = tags,
262                 .fn = fn,
263                 .data = data,
264                 .reserved = reserved,
265         };
266
267         if (tags->rqs)
268                 sbitmap_for_each_set(&bt->sb, bt_tags_iter, &iter_data);
269 }
270
271 static void blk_mq_all_tag_busy_iter(struct blk_mq_tags *tags,
272                 busy_tag_iter_fn *fn, void *priv)
273 {
274         if (tags->nr_reserved_tags)
275                 bt_tags_for_each(tags, &tags->breserved_tags, fn, priv, true);
276         bt_tags_for_each(tags, &tags->bitmap_tags, fn, priv, false);
277 }
278
279 void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
280                 busy_tag_iter_fn *fn, void *priv)
281 {
282         int i;
283
284         for (i = 0; i < tagset->nr_hw_queues; i++) {
285                 if (tagset->tags && tagset->tags[i])
286                         blk_mq_all_tag_busy_iter(tagset->tags[i], fn, priv);
287         }
288 }
289 EXPORT_SYMBOL(blk_mq_tagset_busy_iter);
290
291 int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
292 {
293         int i, j, ret = 0;
294
295         if (!set->ops->reinit_request)
296                 goto out;
297
298         for (i = 0; i < set->nr_hw_queues; i++) {
299                 struct blk_mq_tags *tags = set->tags[i];
300
301                 if (!tags)
302                         continue;
303
304                 for (j = 0; j < tags->nr_tags; j++) {
305                         if (!tags->static_rqs[j])
306                                 continue;
307
308                         ret = set->ops->reinit_request(set->driver_data,
309                                                 tags->static_rqs[j]);
310                         if (ret)
311                                 goto out;
312                 }
313         }
314
315 out:
316         return ret;
317 }
318 EXPORT_SYMBOL_GPL(blk_mq_reinit_tagset);
319
320 void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
321                 void *priv)
322 {
323         struct blk_mq_hw_ctx *hctx;
324         int i;
325
326
327         queue_for_each_hw_ctx(q, hctx, i) {
328                 struct blk_mq_tags *tags = hctx->tags;
329
330                 /*
331                  * If not software queues are currently mapped to this
332                  * hardware queue, there's nothing to check
333                  */
334                 if (!blk_mq_hw_queue_mapped(hctx))
335                         continue;
336
337                 if (tags->nr_reserved_tags)
338                         bt_for_each(hctx, &tags->breserved_tags, fn, priv, true);
339                 bt_for_each(hctx, &tags->bitmap_tags, fn, priv, false);
340         }
341
342 }
343
344 static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
345                     bool round_robin, int node)
346 {
347         return sbitmap_queue_init_node(bt, depth, -1, round_robin, GFP_KERNEL,
348                                        node);
349 }
350
351 static struct blk_mq_tags *blk_mq_init_bitmap_tags(struct blk_mq_tags *tags,
352                                                    int node, int alloc_policy)
353 {
354         unsigned int depth = tags->nr_tags - tags->nr_reserved_tags;
355         bool round_robin = alloc_policy == BLK_TAG_ALLOC_RR;
356
357         if (bt_alloc(&tags->bitmap_tags, depth, round_robin, node))
358                 goto free_tags;
359         if (bt_alloc(&tags->breserved_tags, tags->nr_reserved_tags, round_robin,
360                      node))
361                 goto free_bitmap_tags;
362
363         return tags;
364 free_bitmap_tags:
365         sbitmap_queue_free(&tags->bitmap_tags);
366 free_tags:
367         kfree(tags);
368         return NULL;
369 }
370
371 struct blk_mq_tags *blk_mq_init_tags(unsigned int total_tags,
372                                      unsigned int reserved_tags,
373                                      int node, int alloc_policy)
374 {
375         struct blk_mq_tags *tags;
376
377         if (total_tags > BLK_MQ_TAG_MAX) {
378                 pr_err("blk-mq: tag depth too large\n");
379                 return NULL;
380         }
381
382         tags = kzalloc_node(sizeof(*tags), GFP_KERNEL, node);
383         if (!tags)
384                 return NULL;
385
386         tags->nr_tags = total_tags;
387         tags->nr_reserved_tags = reserved_tags;
388
389         return blk_mq_init_bitmap_tags(tags, node, alloc_policy);
390 }
391
392 void blk_mq_free_tags(struct blk_mq_tags *tags)
393 {
394         sbitmap_queue_free(&tags->bitmap_tags);
395         sbitmap_queue_free(&tags->breserved_tags);
396         kfree(tags);
397 }
398
399 int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
400                             struct blk_mq_tags **tagsptr, unsigned int tdepth,
401                             bool can_grow)
402 {
403         struct blk_mq_tags *tags = *tagsptr;
404
405         if (tdepth <= tags->nr_reserved_tags)
406                 return -EINVAL;
407
408         tdepth -= tags->nr_reserved_tags;
409
410         /*
411          * If we are allowed to grow beyond the original size, allocate
412          * a new set of tags before freeing the old one.
413          */
414         if (tdepth > tags->nr_tags) {
415                 struct blk_mq_tag_set *set = hctx->queue->tag_set;
416                 struct blk_mq_tags *new;
417                 bool ret;
418
419                 if (!can_grow)
420                         return -EINVAL;
421
422                 /*
423                  * We need some sort of upper limit, set it high enough that
424                  * no valid use cases should require more.
425                  */
426                 if (tdepth > 16 * BLKDEV_MAX_RQ)
427                         return -EINVAL;
428
429                 new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
430                 if (!new)
431                         return -ENOMEM;
432                 ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
433                 if (ret) {
434                         blk_mq_free_rq_map(new);
435                         return -ENOMEM;
436                 }
437
438                 blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
439                 blk_mq_free_rq_map(*tagsptr);
440                 *tagsptr = new;
441         } else {
442                 /*
443                  * Don't need (or can't) update reserved tags here, they
444                  * remain static and should never need resizing.
445                  */
446                 sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
447         }
448
449         return 0;
450 }
451
452 /**
453  * blk_mq_unique_tag() - return a tag that is unique queue-wide
454  * @rq: request for which to compute a unique tag
455  *
456  * The tag field in struct request is unique per hardware queue but not over
457  * all hardware queues. Hence this function that returns a tag with the
458  * hardware context index in the upper bits and the per hardware queue tag in
459  * the lower bits.
460  *
461  * Note: When called for a request that is queued on a non-multiqueue request
462  * queue, the hardware context index is set to zero.
463  */
464 u32 blk_mq_unique_tag(struct request *rq)
465 {
466         struct request_queue *q = rq->q;
467         struct blk_mq_hw_ctx *hctx;
468         int hwq = 0;
469
470         if (q->mq_ops) {
471                 hctx = blk_mq_map_queue(q, rq->mq_ctx->cpu);
472                 hwq = hctx->queue_num;
473         }
474
475         return (hwq << BLK_MQ_UNIQUE_TAG_BITS) |
476                 (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
477 }
478 EXPORT_SYMBOL(blk_mq_unique_tag);