2 * Copyright (C) 2016 Red Hat, Inc. All rights reserved.
4 * This file is released under the GPL.
10 #include <linux/elevator.h> /* for rq_end_sector() */
11 #include <linux/blk-mq.h>
13 #define DM_MSG_PREFIX "core-rq"
15 #define DM_MQ_NR_HW_QUEUES 1
16 #define DM_MQ_QUEUE_DEPTH 2048
17 static unsigned dm_mq_nr_hw_queues = DM_MQ_NR_HW_QUEUES;
18 static unsigned dm_mq_queue_depth = DM_MQ_QUEUE_DEPTH;
21 * Request-based DM's mempools' reserved IOs set by the user.
23 #define RESERVED_REQUEST_BASED_IOS 256
24 static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
26 unsigned dm_get_reserved_rq_based_ios(void)
28 return __dm_get_module_param(&reserved_rq_based_ios,
29 RESERVED_REQUEST_BASED_IOS, DM_RESERVED_MAX_IOS);
31 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
33 static unsigned dm_get_blk_mq_nr_hw_queues(void)
35 return __dm_get_module_param(&dm_mq_nr_hw_queues, 1, 32);
38 static unsigned dm_get_blk_mq_queue_depth(void)
40 return __dm_get_module_param(&dm_mq_queue_depth,
41 DM_MQ_QUEUE_DEPTH, BLK_MQ_MAX_DEPTH);
44 int dm_request_based(struct mapped_device *md)
46 return queue_is_mq(md->queue);
49 void dm_start_queue(struct request_queue *q)
51 blk_mq_unquiesce_queue(q);
52 blk_mq_kick_requeue_list(q);
55 void dm_stop_queue(struct request_queue *q)
57 if (blk_mq_queue_stopped(q))
60 blk_mq_quiesce_queue(q);
64 * Partial completion handling for request-based dm
66 static void end_clone_bio(struct bio *clone)
68 struct dm_rq_clone_bio_info *info =
69 container_of(clone, struct dm_rq_clone_bio_info, clone);
70 struct dm_rq_target_io *tio = info->tio;
71 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
72 blk_status_t error = clone->bi_status;
73 bool is_last = !clone->bi_next;
79 * An error has already been detected on the request.
80 * Once error occurred, just let clone->end_io() handle
86 * Don't notice the error to the upper layer yet.
87 * The error handling decision is made by the target driver,
88 * when the request is completed.
95 * I/O for the bio successfully completed.
96 * Notice the data completion to the upper layer.
98 tio->completed += nr_bytes;
101 * Update the original request.
102 * Do not use blk_end_request() here, because it may complete
103 * the original request before the clone, and break the ordering.
107 blk_update_request(tio->orig, BLK_STS_OK, tio->completed);
110 static struct dm_rq_target_io *tio_from_request(struct request *rq)
112 return blk_mq_rq_to_pdu(rq);
115 static void rq_end_stats(struct mapped_device *md, struct request *orig)
117 if (unlikely(dm_stats_used(&md->stats))) {
118 struct dm_rq_target_io *tio = tio_from_request(orig);
119 tio->duration_jiffies = jiffies - tio->duration_jiffies;
120 dm_stats_account_io(&md->stats, rq_data_dir(orig),
121 blk_rq_pos(orig), tio->n_sectors, true,
122 tio->duration_jiffies, &tio->stats_aux);
127 * Don't touch any member of the md after calling this function because
128 * the md may be freed in dm_put() at the end of this function.
129 * Or do dm_get() before calling this function and dm_put() later.
131 static void rq_completed(struct mapped_device *md)
133 /* nudge anyone waiting on suspend queue */
134 if (unlikely(wq_has_sleeper(&md->wait)))
138 * dm_put() must be at the end of this function. See the comment above
144 * Complete the clone and the original request.
145 * Must be called without clone's queue lock held,
146 * see end_clone_request() for more details.
148 static void dm_end_request(struct request *clone, blk_status_t error)
150 struct dm_rq_target_io *tio = clone->end_io_data;
151 struct mapped_device *md = tio->md;
152 struct request *rq = tio->orig;
154 blk_rq_unprep_clone(clone);
155 tio->ti->type->release_clone_rq(clone);
157 rq_end_stats(md, rq);
158 blk_mq_end_request(rq, error);
162 static void __dm_mq_kick_requeue_list(struct request_queue *q, unsigned long msecs)
164 blk_mq_delay_kick_requeue_list(q, msecs);
167 void dm_mq_kick_requeue_list(struct mapped_device *md)
169 __dm_mq_kick_requeue_list(dm_get_md_queue(md), 0);
171 EXPORT_SYMBOL(dm_mq_kick_requeue_list);
173 static void dm_mq_delay_requeue_request(struct request *rq, unsigned long msecs)
175 blk_mq_requeue_request(rq, false);
176 __dm_mq_kick_requeue_list(rq->q, msecs);
179 static void dm_requeue_original_request(struct dm_rq_target_io *tio, bool delay_requeue)
181 struct mapped_device *md = tio->md;
182 struct request *rq = tio->orig;
183 unsigned long delay_ms = delay_requeue ? 100 : 0;
185 rq_end_stats(md, rq);
187 blk_rq_unprep_clone(tio->clone);
188 tio->ti->type->release_clone_rq(tio->clone);
191 dm_mq_delay_requeue_request(rq, delay_ms);
195 static void dm_done(struct request *clone, blk_status_t error, bool mapped)
197 int r = DM_ENDIO_DONE;
198 struct dm_rq_target_io *tio = clone->end_io_data;
199 dm_request_endio_fn rq_end_io = NULL;
202 rq_end_io = tio->ti->type->rq_end_io;
204 if (mapped && rq_end_io)
205 r = rq_end_io(tio->ti, clone, error, &tio->info);
208 if (unlikely(error == BLK_STS_TARGET)) {
209 if (req_op(clone) == REQ_OP_WRITE_SAME &&
210 !clone->q->limits.max_write_same_sectors)
211 disable_write_same(tio->md);
212 if (req_op(clone) == REQ_OP_WRITE_ZEROES &&
213 !clone->q->limits.max_write_zeroes_sectors)
214 disable_write_zeroes(tio->md);
219 /* The target wants to complete the I/O */
220 dm_end_request(clone, error);
222 case DM_ENDIO_INCOMPLETE:
223 /* The target will handle the I/O */
225 case DM_ENDIO_REQUEUE:
226 /* The target wants to requeue the I/O */
227 dm_requeue_original_request(tio, false);
229 case DM_ENDIO_DELAY_REQUEUE:
230 /* The target wants to requeue the I/O after a delay */
231 dm_requeue_original_request(tio, true);
234 DMWARN("unimplemented target endio return value: %d", r);
240 * Request completion handler for request-based dm
242 static void dm_softirq_done(struct request *rq)
245 struct dm_rq_target_io *tio = tio_from_request(rq);
246 struct request *clone = tio->clone;
249 struct mapped_device *md = tio->md;
251 rq_end_stats(md, rq);
252 blk_mq_end_request(rq, tio->error);
257 if (rq->rq_flags & RQF_FAILED)
260 dm_done(clone, tio->error, mapped);
264 * Complete the clone and the original request with the error status
265 * through softirq context.
267 static void dm_complete_request(struct request *rq, blk_status_t error)
269 struct dm_rq_target_io *tio = tio_from_request(rq);
272 blk_mq_complete_request(rq);
276 * Complete the not-mapped clone and the original request with the error status
277 * through softirq context.
278 * Target's rq_end_io() function isn't called.
279 * This may be used when the target's clone_and_map_rq() function fails.
281 static void dm_kill_unmapped_request(struct request *rq, blk_status_t error)
283 rq->rq_flags |= RQF_FAILED;
284 dm_complete_request(rq, error);
287 static void end_clone_request(struct request *clone, blk_status_t error)
289 struct dm_rq_target_io *tio = clone->end_io_data;
291 dm_complete_request(tio->orig, error);
294 static blk_status_t dm_dispatch_clone_request(struct request *clone, struct request *rq)
298 if (blk_queue_io_stat(clone->q))
299 clone->rq_flags |= RQF_IO_STAT;
301 clone->start_time_ns = ktime_get_ns();
302 r = blk_insert_cloned_request(clone->q, clone);
303 if (r != BLK_STS_OK && r != BLK_STS_RESOURCE && r != BLK_STS_DEV_RESOURCE)
304 /* must complete clone in terms of original request */
305 dm_complete_request(rq, r);
309 static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
312 struct dm_rq_target_io *tio = data;
313 struct dm_rq_clone_bio_info *info =
314 container_of(bio, struct dm_rq_clone_bio_info, clone);
316 info->orig = bio_orig;
318 bio->bi_end_io = end_clone_bio;
323 static int setup_clone(struct request *clone, struct request *rq,
324 struct dm_rq_target_io *tio, gfp_t gfp_mask)
328 r = blk_rq_prep_clone(clone, rq, &tio->md->bs, gfp_mask,
329 dm_rq_bio_constructor, tio);
333 clone->end_io = end_clone_request;
334 clone->end_io_data = tio;
341 static void init_tio(struct dm_rq_target_io *tio, struct request *rq,
342 struct mapped_device *md)
351 * Avoid initializing info for blk-mq; it passes
352 * target-specific data through info.ptr
353 * (see: dm_mq_init_request)
355 if (!md->init_tio_pdu)
356 memset(&tio->info, 0, sizeof(tio->info));
361 * DM_MAPIO_* : the request has been processed as indicated
362 * DM_MAPIO_REQUEUE : the original request needs to be immediately requeued
363 * < 0 : the request was completed due to failure
365 static int map_request(struct dm_rq_target_io *tio)
368 struct dm_target *ti = tio->ti;
369 struct mapped_device *md = tio->md;
370 struct request *rq = tio->orig;
371 struct request *clone = NULL;
374 r = ti->type->clone_and_map_rq(ti, rq, &tio->info, &clone);
376 case DM_MAPIO_SUBMITTED:
377 /* The target has taken the I/O to submit by itself later */
379 case DM_MAPIO_REMAPPED:
380 if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
382 ti->type->release_clone_rq(clone);
383 return DM_MAPIO_REQUEUE;
386 /* The target has remapped the I/O so dispatch it */
387 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
389 ret = dm_dispatch_clone_request(clone, rq);
390 if (ret == BLK_STS_RESOURCE || ret == BLK_STS_DEV_RESOURCE) {
391 blk_rq_unprep_clone(clone);
392 tio->ti->type->release_clone_rq(clone);
394 return DM_MAPIO_REQUEUE;
397 case DM_MAPIO_REQUEUE:
398 /* The target wants to requeue the I/O */
400 case DM_MAPIO_DELAY_REQUEUE:
401 /* The target wants to requeue the I/O after a delay */
402 dm_requeue_original_request(tio, true);
405 /* The target wants to complete the I/O */
406 dm_kill_unmapped_request(rq, BLK_STS_IOERR);
409 DMWARN("unimplemented target map return value: %d", r);
416 /* DEPRECATED: previously used for request-based merge heuristic in dm_request_fn() */
417 ssize_t dm_attr_rq_based_seq_io_merge_deadline_show(struct mapped_device *md, char *buf)
419 return sprintf(buf, "%u\n", 0);
422 ssize_t dm_attr_rq_based_seq_io_merge_deadline_store(struct mapped_device *md,
423 const char *buf, size_t count)
428 static void dm_start_request(struct mapped_device *md, struct request *orig)
430 blk_mq_start_request(orig);
432 if (unlikely(dm_stats_used(&md->stats))) {
433 struct dm_rq_target_io *tio = tio_from_request(orig);
434 tio->duration_jiffies = jiffies;
435 tio->n_sectors = blk_rq_sectors(orig);
436 dm_stats_account_io(&md->stats, rq_data_dir(orig),
437 blk_rq_pos(orig), tio->n_sectors, false, 0,
442 * Hold the md reference here for the in-flight I/O.
443 * We can't rely on the reference count by device opener,
444 * because the device may be closed during the request completion
445 * when all bios are completed.
446 * See the comment in rq_completed() too.
451 static int dm_mq_init_request(struct blk_mq_tag_set *set, struct request *rq,
452 unsigned int hctx_idx, unsigned int numa_node)
454 struct mapped_device *md = set->driver_data;
455 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
458 * Must initialize md member of tio, otherwise it won't
459 * be available in dm_mq_queue_rq.
463 if (md->init_tio_pdu) {
464 /* target-specific per-io data is immediately after the tio */
465 tio->info.ptr = tio + 1;
471 static blk_status_t dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
472 const struct blk_mq_queue_data *bd)
474 struct request *rq = bd->rq;
475 struct dm_rq_target_io *tio = blk_mq_rq_to_pdu(rq);
476 struct mapped_device *md = tio->md;
477 struct dm_target *ti = md->immutable_target;
481 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
483 ti = dm_table_find_target(map, 0);
484 dm_put_live_table(md, srcu_idx);
487 if (ti->type->busy && ti->type->busy(ti))
488 return BLK_STS_RESOURCE;
490 dm_start_request(md, rq);
492 /* Init tio using md established in .init_request */
493 init_tio(tio, rq, md);
496 * Establish tio->ti before calling map_request().
500 /* Direct call is fine since .queue_rq allows allocations */
501 if (map_request(tio) == DM_MAPIO_REQUEUE) {
502 /* Undo dm_start_request() before requeuing */
503 rq_end_stats(md, rq);
505 return BLK_STS_RESOURCE;
511 static const struct blk_mq_ops dm_mq_ops = {
512 .queue_rq = dm_mq_queue_rq,
513 .complete = dm_softirq_done,
514 .init_request = dm_mq_init_request,
517 int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t)
519 struct request_queue *q;
520 struct dm_target *immutable_tgt;
523 md->tag_set = kzalloc_node(sizeof(struct blk_mq_tag_set), GFP_KERNEL, md->numa_node_id);
527 md->tag_set->ops = &dm_mq_ops;
528 md->tag_set->queue_depth = dm_get_blk_mq_queue_depth();
529 md->tag_set->numa_node = md->numa_node_id;
530 md->tag_set->flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
531 md->tag_set->nr_hw_queues = dm_get_blk_mq_nr_hw_queues();
532 md->tag_set->driver_data = md;
534 md->tag_set->cmd_size = sizeof(struct dm_rq_target_io);
535 immutable_tgt = dm_table_get_immutable_target(t);
536 if (immutable_tgt && immutable_tgt->per_io_data_size) {
537 /* any target-specific per-io data is immediately after the tio */
538 md->tag_set->cmd_size += immutable_tgt->per_io_data_size;
539 md->init_tio_pdu = true;
542 err = blk_mq_alloc_tag_set(md->tag_set);
544 goto out_kfree_tag_set;
546 q = blk_mq_init_allocated_queue(md->tag_set, md->queue);
555 blk_mq_free_tag_set(md->tag_set);
562 void dm_mq_cleanup_mapped_device(struct mapped_device *md)
565 blk_mq_free_tag_set(md->tag_set);
570 module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
571 MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
573 /* Unused, but preserved for userspace compatibility */
574 static bool use_blk_mq = true;
575 module_param(use_blk_mq, bool, S_IRUGO | S_IWUSR);
576 MODULE_PARM_DESC(use_blk_mq, "Use block multiqueue for request-based DM devices");
578 module_param(dm_mq_nr_hw_queues, uint, S_IRUGO | S_IWUSR);
579 MODULE_PARM_DESC(dm_mq_nr_hw_queues, "Number of hardware queues for request-based dm-mq devices");
581 module_param(dm_mq_queue_depth, uint, S_IRUGO | S_IWUSR);
582 MODULE_PARM_DESC(dm_mq_queue_depth, "Queue depth for request-based dm-mq devices");