2 * Functions related to setting various queue properties from drivers
4 #include <linux/kernel.h>
5 #include <linux/module.h>
6 #include <linux/init.h>
8 #include <linux/blkdev.h>
9 #include <linux/memblock.h> /* for max_pfn/max_low_pfn */
10 #include <linux/gcd.h>
11 #include <linux/lcm.h>
12 #include <linux/jiffies.h>
13 #include <linux/gfp.h>
18 unsigned long blk_max_low_pfn;
19 EXPORT_SYMBOL(blk_max_low_pfn);
21 unsigned long blk_max_pfn;
23 void blk_queue_softirq_done(struct request_queue *q, softirq_done_fn *fn)
25 q->softirq_done_fn = fn;
27 EXPORT_SYMBOL(blk_queue_softirq_done);
29 void blk_queue_rq_timeout(struct request_queue *q, unsigned int timeout)
31 q->rq_timeout = timeout;
33 EXPORT_SYMBOL_GPL(blk_queue_rq_timeout);
36 * blk_set_default_limits - reset limits to default values
37 * @lim: the queue_limits structure to reset
40 * Returns a queue_limit struct to its default state.
42 void blk_set_default_limits(struct queue_limits *lim)
44 lim->max_segments = BLK_MAX_SEGMENTS;
45 lim->max_discard_segments = 1;
46 lim->max_integrity_segments = 0;
47 lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
48 lim->virt_boundary_mask = 0;
49 lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
50 lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
51 lim->max_dev_sectors = 0;
52 lim->chunk_sectors = 0;
53 lim->max_write_same_sectors = 0;
54 lim->max_write_zeroes_sectors = 0;
55 lim->max_discard_sectors = 0;
56 lim->max_hw_discard_sectors = 0;
57 lim->discard_granularity = 0;
58 lim->discard_alignment = 0;
59 lim->discard_misaligned = 0;
60 lim->logical_block_size = lim->physical_block_size = lim->io_min = 512;
61 lim->bounce_pfn = (unsigned long)(BLK_BOUNCE_ANY >> PAGE_SHIFT);
62 lim->alignment_offset = 0;
66 lim->zoned = BLK_ZONED_NONE;
68 EXPORT_SYMBOL(blk_set_default_limits);
71 * blk_set_stacking_limits - set default limits for stacking devices
72 * @lim: the queue_limits structure to reset
75 * Returns a queue_limit struct to its default state. Should be used
76 * by stacking drivers like DM that have no internal limits.
78 void blk_set_stacking_limits(struct queue_limits *lim)
80 blk_set_default_limits(lim);
82 /* Inherit limits from component devices */
83 lim->max_segments = USHRT_MAX;
84 lim->max_discard_segments = USHRT_MAX;
85 lim->max_hw_sectors = UINT_MAX;
86 lim->max_segment_size = UINT_MAX;
87 lim->max_sectors = UINT_MAX;
88 lim->max_dev_sectors = UINT_MAX;
89 lim->max_write_same_sectors = UINT_MAX;
90 lim->max_write_zeroes_sectors = UINT_MAX;
92 EXPORT_SYMBOL(blk_set_stacking_limits);
95 * blk_queue_make_request - define an alternate make_request function for a device
96 * @q: the request queue for the device to be affected
97 * @mfn: the alternate make_request function
100 * The normal way for &struct bios to be passed to a device
101 * driver is for them to be collected into requests on a request
102 * queue, and then to allow the device driver to select requests
103 * off that queue when it is ready. This works well for many block
104 * devices. However some block devices (typically virtual devices
105 * such as md or lvm) do not benefit from the processing on the
106 * request queue, and are served best by having the requests passed
107 * directly to them. This can be achieved by providing a function
108 * to blk_queue_make_request().
111 * The driver that does this *must* be able to deal appropriately
112 * with buffers in "highmemory". This can be accomplished by either calling
113 * kmap_atomic() to get a temporary kernel mapping, or by calling
114 * blk_queue_bounce() to create a buffer in normal memory.
116 void blk_queue_make_request(struct request_queue *q, make_request_fn *mfn)
121 q->nr_requests = BLKDEV_MAX_RQ;
123 q->make_request_fn = mfn;
124 blk_queue_dma_alignment(q, 511);
126 blk_set_default_limits(&q->limits);
128 EXPORT_SYMBOL(blk_queue_make_request);
131 * blk_queue_bounce_limit - set bounce buffer limit for queue
132 * @q: the request queue for the device
133 * @max_addr: the maximum address the device can handle
136 * Different hardware can have different requirements as to what pages
137 * it can do I/O directly to. A low level driver can call
138 * blk_queue_bounce_limit to have lower memory pages allocated as bounce
139 * buffers for doing I/O to pages residing above @max_addr.
141 void blk_queue_bounce_limit(struct request_queue *q, u64 max_addr)
143 unsigned long b_pfn = max_addr >> PAGE_SHIFT;
146 q->bounce_gfp = GFP_NOIO;
147 #if BITS_PER_LONG == 64
149 * Assume anything <= 4GB can be handled by IOMMU. Actually
150 * some IOMMUs can handle everything, but I don't know of a
151 * way to test this here.
153 if (b_pfn < (min_t(u64, 0xffffffffUL, BLK_BOUNCE_HIGH) >> PAGE_SHIFT))
155 q->limits.bounce_pfn = max(max_low_pfn, b_pfn);
157 if (b_pfn < blk_max_low_pfn)
159 q->limits.bounce_pfn = b_pfn;
162 init_emergency_isa_pool();
163 q->bounce_gfp = GFP_NOIO | GFP_DMA;
164 q->limits.bounce_pfn = b_pfn;
167 EXPORT_SYMBOL(blk_queue_bounce_limit);
170 * blk_queue_max_hw_sectors - set max sectors for a request for this queue
171 * @q: the request queue for the device
172 * @max_hw_sectors: max hardware sectors in the usual 512b unit
175 * Enables a low level driver to set a hard upper limit,
176 * max_hw_sectors, on the size of requests. max_hw_sectors is set by
177 * the device driver based upon the capabilities of the I/O
180 * max_dev_sectors is a hard limit imposed by the storage device for
181 * READ/WRITE requests. It is set by the disk driver.
183 * max_sectors is a soft limit imposed by the block layer for
184 * filesystem type requests. This value can be overridden on a
185 * per-device basis in /sys/block/<device>/queue/max_sectors_kb.
186 * The soft limit can not exceed max_hw_sectors.
188 void blk_queue_max_hw_sectors(struct request_queue *q, unsigned int max_hw_sectors)
190 struct queue_limits *limits = &q->limits;
191 unsigned int max_sectors;
193 if ((max_hw_sectors << 9) < PAGE_SIZE) {
194 max_hw_sectors = 1 << (PAGE_SHIFT - 9);
195 printk(KERN_INFO "%s: set to minimum %d\n",
196 __func__, max_hw_sectors);
199 limits->max_hw_sectors = max_hw_sectors;
200 max_sectors = min_not_zero(max_hw_sectors, limits->max_dev_sectors);
201 max_sectors = min_t(unsigned int, max_sectors, BLK_DEF_MAX_SECTORS);
202 limits->max_sectors = max_sectors;
203 q->backing_dev_info->io_pages = max_sectors >> (PAGE_SHIFT - 9);
205 EXPORT_SYMBOL(blk_queue_max_hw_sectors);
208 * blk_queue_chunk_sectors - set size of the chunk for this queue
209 * @q: the request queue for the device
210 * @chunk_sectors: chunk sectors in the usual 512b unit
213 * If a driver doesn't want IOs to cross a given chunk size, it can set
214 * this limit and prevent merging across chunks. Note that the chunk size
215 * must currently be a power-of-2 in sectors. Also note that the block
216 * layer must accept a page worth of data at any offset. So if the
217 * crossing of chunks is a hard limitation in the driver, it must still be
218 * prepared to split single page bios.
220 void blk_queue_chunk_sectors(struct request_queue *q, unsigned int chunk_sectors)
222 BUG_ON(!is_power_of_2(chunk_sectors));
223 q->limits.chunk_sectors = chunk_sectors;
225 EXPORT_SYMBOL(blk_queue_chunk_sectors);
228 * blk_queue_max_discard_sectors - set max sectors for a single discard
229 * @q: the request queue for the device
230 * @max_discard_sectors: maximum number of sectors to discard
232 void blk_queue_max_discard_sectors(struct request_queue *q,
233 unsigned int max_discard_sectors)
235 q->limits.max_hw_discard_sectors = max_discard_sectors;
236 q->limits.max_discard_sectors = max_discard_sectors;
238 EXPORT_SYMBOL(blk_queue_max_discard_sectors);
241 * blk_queue_max_write_same_sectors - set max sectors for a single write same
242 * @q: the request queue for the device
243 * @max_write_same_sectors: maximum number of sectors to write per command
245 void blk_queue_max_write_same_sectors(struct request_queue *q,
246 unsigned int max_write_same_sectors)
248 q->limits.max_write_same_sectors = max_write_same_sectors;
250 EXPORT_SYMBOL(blk_queue_max_write_same_sectors);
253 * blk_queue_max_write_zeroes_sectors - set max sectors for a single
255 * @q: the request queue for the device
256 * @max_write_zeroes_sectors: maximum number of sectors to write per command
258 void blk_queue_max_write_zeroes_sectors(struct request_queue *q,
259 unsigned int max_write_zeroes_sectors)
261 q->limits.max_write_zeroes_sectors = max_write_zeroes_sectors;
263 EXPORT_SYMBOL(blk_queue_max_write_zeroes_sectors);
266 * blk_queue_max_segments - set max hw segments for a request for this queue
267 * @q: the request queue for the device
268 * @max_segments: max number of segments
271 * Enables a low level driver to set an upper limit on the number of
272 * hw data segments in a request.
274 void blk_queue_max_segments(struct request_queue *q, unsigned short max_segments)
278 printk(KERN_INFO "%s: set to minimum %d\n",
279 __func__, max_segments);
282 q->limits.max_segments = max_segments;
284 EXPORT_SYMBOL(blk_queue_max_segments);
287 * blk_queue_max_discard_segments - set max segments for discard requests
288 * @q: the request queue for the device
289 * @max_segments: max number of segments
292 * Enables a low level driver to set an upper limit on the number of
293 * segments in a discard request.
295 void blk_queue_max_discard_segments(struct request_queue *q,
296 unsigned short max_segments)
298 q->limits.max_discard_segments = max_segments;
300 EXPORT_SYMBOL_GPL(blk_queue_max_discard_segments);
303 * blk_queue_max_segment_size - set max segment size for blk_rq_map_sg
304 * @q: the request queue for the device
305 * @max_size: max size of segment in bytes
308 * Enables a low level driver to set an upper limit on the size of a
311 void blk_queue_max_segment_size(struct request_queue *q, unsigned int max_size)
313 if (max_size < PAGE_SIZE) {
314 max_size = PAGE_SIZE;
315 printk(KERN_INFO "%s: set to minimum %d\n",
319 q->limits.max_segment_size = max_size;
321 EXPORT_SYMBOL(blk_queue_max_segment_size);
324 * blk_queue_logical_block_size - set logical block size for the queue
325 * @q: the request queue for the device
326 * @size: the logical block size, in bytes
329 * This should be set to the lowest possible block size that the
330 * storage device can address. The default of 512 covers most
333 void blk_queue_logical_block_size(struct request_queue *q, unsigned short size)
335 q->limits.logical_block_size = size;
337 if (q->limits.physical_block_size < size)
338 q->limits.physical_block_size = size;
340 if (q->limits.io_min < q->limits.physical_block_size)
341 q->limits.io_min = q->limits.physical_block_size;
343 EXPORT_SYMBOL(blk_queue_logical_block_size);
346 * blk_queue_physical_block_size - set physical block size for the queue
347 * @q: the request queue for the device
348 * @size: the physical block size, in bytes
351 * This should be set to the lowest possible sector size that the
352 * hardware can operate on without reverting to read-modify-write
355 void blk_queue_physical_block_size(struct request_queue *q, unsigned int size)
357 q->limits.physical_block_size = size;
359 if (q->limits.physical_block_size < q->limits.logical_block_size)
360 q->limits.physical_block_size = q->limits.logical_block_size;
362 if (q->limits.io_min < q->limits.physical_block_size)
363 q->limits.io_min = q->limits.physical_block_size;
365 EXPORT_SYMBOL(blk_queue_physical_block_size);
368 * blk_queue_alignment_offset - set physical block alignment offset
369 * @q: the request queue for the device
370 * @offset: alignment offset in bytes
373 * Some devices are naturally misaligned to compensate for things like
374 * the legacy DOS partition table 63-sector offset. Low-level drivers
375 * should call this function for devices whose first sector is not
378 void blk_queue_alignment_offset(struct request_queue *q, unsigned int offset)
380 q->limits.alignment_offset =
381 offset & (q->limits.physical_block_size - 1);
382 q->limits.misaligned = 0;
384 EXPORT_SYMBOL(blk_queue_alignment_offset);
387 * blk_limits_io_min - set minimum request size for a device
388 * @limits: the queue limits
389 * @min: smallest I/O size in bytes
392 * Some devices have an internal block size bigger than the reported
393 * hardware sector size. This function can be used to signal the
394 * smallest I/O the device can perform without incurring a performance
397 void blk_limits_io_min(struct queue_limits *limits, unsigned int min)
399 limits->io_min = min;
401 if (limits->io_min < limits->logical_block_size)
402 limits->io_min = limits->logical_block_size;
404 if (limits->io_min < limits->physical_block_size)
405 limits->io_min = limits->physical_block_size;
407 EXPORT_SYMBOL(blk_limits_io_min);
410 * blk_queue_io_min - set minimum request size for the queue
411 * @q: the request queue for the device
412 * @min: smallest I/O size in bytes
415 * Storage devices may report a granularity or preferred minimum I/O
416 * size which is the smallest request the device can perform without
417 * incurring a performance penalty. For disk drives this is often the
418 * physical block size. For RAID arrays it is often the stripe chunk
419 * size. A properly aligned multiple of minimum_io_size is the
420 * preferred request size for workloads where a high number of I/O
421 * operations is desired.
423 void blk_queue_io_min(struct request_queue *q, unsigned int min)
425 blk_limits_io_min(&q->limits, min);
427 EXPORT_SYMBOL(blk_queue_io_min);
430 * blk_limits_io_opt - set optimal request size for a device
431 * @limits: the queue limits
432 * @opt: smallest I/O size in bytes
435 * Storage devices may report an optimal I/O size, which is the
436 * device's preferred unit for sustained I/O. This is rarely reported
437 * for disk drives. For RAID arrays it is usually the stripe width or
438 * the internal track size. A properly aligned multiple of
439 * optimal_io_size is the preferred request size for workloads where
440 * sustained throughput is desired.
442 void blk_limits_io_opt(struct queue_limits *limits, unsigned int opt)
444 limits->io_opt = opt;
446 EXPORT_SYMBOL(blk_limits_io_opt);
449 * blk_queue_io_opt - set optimal request size for the queue
450 * @q: the request queue for the device
451 * @opt: optimal request size in bytes
454 * Storage devices may report an optimal I/O size, which is the
455 * device's preferred unit for sustained I/O. This is rarely reported
456 * for disk drives. For RAID arrays it is usually the stripe width or
457 * the internal track size. A properly aligned multiple of
458 * optimal_io_size is the preferred request size for workloads where
459 * sustained throughput is desired.
461 void blk_queue_io_opt(struct request_queue *q, unsigned int opt)
463 blk_limits_io_opt(&q->limits, opt);
465 EXPORT_SYMBOL(blk_queue_io_opt);
468 * blk_queue_stack_limits - inherit underlying queue limits for stacked drivers
469 * @t: the stacking driver (top)
470 * @b: the underlying device (bottom)
472 void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b)
474 blk_stack_limits(&t->limits, &b->limits, 0);
476 EXPORT_SYMBOL(blk_queue_stack_limits);
479 * blk_stack_limits - adjust queue_limits for stacked devices
480 * @t: the stacking driver limits (top device)
481 * @b: the underlying queue limits (bottom, component device)
482 * @start: first data sector within component device
485 * This function is used by stacking drivers like MD and DM to ensure
486 * that all component devices have compatible block sizes and
487 * alignments. The stacking driver must provide a queue_limits
488 * struct (top) and then iteratively call the stacking function for
489 * all component (bottom) devices. The stacking function will
490 * attempt to combine the values and ensure proper alignment.
492 * Returns 0 if the top and bottom queue_limits are compatible. The
493 * top device's block sizes and alignment offsets may be adjusted to
494 * ensure alignment with the bottom device. If no compatible sizes
495 * and alignments exist, -1 is returned and the resulting top
496 * queue_limits will have the misaligned flag set to indicate that
497 * the alignment_offset is undefined.
499 int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
502 unsigned int top, bottom, alignment, ret = 0;
504 t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
505 t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
506 t->max_dev_sectors = min_not_zero(t->max_dev_sectors, b->max_dev_sectors);
507 t->max_write_same_sectors = min(t->max_write_same_sectors,
508 b->max_write_same_sectors);
509 t->max_write_zeroes_sectors = min(t->max_write_zeroes_sectors,
510 b->max_write_zeroes_sectors);
511 t->bounce_pfn = min_not_zero(t->bounce_pfn, b->bounce_pfn);
513 t->seg_boundary_mask = min_not_zero(t->seg_boundary_mask,
514 b->seg_boundary_mask);
515 t->virt_boundary_mask = min_not_zero(t->virt_boundary_mask,
516 b->virt_boundary_mask);
518 t->max_segments = min_not_zero(t->max_segments, b->max_segments);
519 t->max_discard_segments = min_not_zero(t->max_discard_segments,
520 b->max_discard_segments);
521 t->max_integrity_segments = min_not_zero(t->max_integrity_segments,
522 b->max_integrity_segments);
524 t->max_segment_size = min_not_zero(t->max_segment_size,
525 b->max_segment_size);
527 t->misaligned |= b->misaligned;
529 alignment = queue_limit_alignment_offset(b, start);
531 /* Bottom device has different alignment. Check that it is
532 * compatible with the current top alignment.
534 if (t->alignment_offset != alignment) {
536 top = max(t->physical_block_size, t->io_min)
537 + t->alignment_offset;
538 bottom = max(b->physical_block_size, b->io_min) + alignment;
540 /* Verify that top and bottom intervals line up */
541 if (max(top, bottom) % min(top, bottom)) {
547 t->logical_block_size = max(t->logical_block_size,
548 b->logical_block_size);
550 t->physical_block_size = max(t->physical_block_size,
551 b->physical_block_size);
553 t->io_min = max(t->io_min, b->io_min);
554 t->io_opt = lcm_not_zero(t->io_opt, b->io_opt);
556 t->cluster &= b->cluster;
558 /* Physical block size a multiple of the logical block size? */
559 if (t->physical_block_size & (t->logical_block_size - 1)) {
560 t->physical_block_size = t->logical_block_size;
565 /* Minimum I/O a multiple of the physical block size? */
566 if (t->io_min & (t->physical_block_size - 1)) {
567 t->io_min = t->physical_block_size;
572 /* Optimal I/O a multiple of the physical block size? */
573 if (t->io_opt & (t->physical_block_size - 1)) {
579 t->raid_partial_stripes_expensive =
580 max(t->raid_partial_stripes_expensive,
581 b->raid_partial_stripes_expensive);
583 /* Find lowest common alignment_offset */
584 t->alignment_offset = lcm_not_zero(t->alignment_offset, alignment)
585 % max(t->physical_block_size, t->io_min);
587 /* Verify that new alignment_offset is on a logical block boundary */
588 if (t->alignment_offset & (t->logical_block_size - 1)) {
593 /* Discard alignment and granularity */
594 if (b->discard_granularity) {
595 alignment = queue_limit_discard_alignment(b, start);
597 if (t->discard_granularity != 0 &&
598 t->discard_alignment != alignment) {
599 top = t->discard_granularity + t->discard_alignment;
600 bottom = b->discard_granularity + alignment;
602 /* Verify that top and bottom intervals line up */
603 if ((max(top, bottom) % min(top, bottom)) != 0)
604 t->discard_misaligned = 1;
607 t->max_discard_sectors = min_not_zero(t->max_discard_sectors,
608 b->max_discard_sectors);
609 t->max_hw_discard_sectors = min_not_zero(t->max_hw_discard_sectors,
610 b->max_hw_discard_sectors);
611 t->discard_granularity = max(t->discard_granularity,
612 b->discard_granularity);
613 t->discard_alignment = lcm_not_zero(t->discard_alignment, alignment) %
614 t->discard_granularity;
617 if (b->chunk_sectors)
618 t->chunk_sectors = min_not_zero(t->chunk_sectors,
623 EXPORT_SYMBOL(blk_stack_limits);
626 * bdev_stack_limits - adjust queue limits for stacked drivers
627 * @t: the stacking driver limits (top device)
628 * @bdev: the component block_device (bottom)
629 * @start: first data sector within component device
632 * Merges queue limits for a top device and a block_device. Returns
633 * 0 if alignment didn't change. Returns -1 if adding the bottom
634 * device caused misalignment.
636 int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
639 struct request_queue *bq = bdev_get_queue(bdev);
641 start += get_start_sect(bdev);
643 return blk_stack_limits(t, &bq->limits, start);
645 EXPORT_SYMBOL(bdev_stack_limits);
648 * disk_stack_limits - adjust queue limits for stacked drivers
649 * @disk: MD/DM gendisk (top)
650 * @bdev: the underlying block device (bottom)
651 * @offset: offset to beginning of data within component device
654 * Merges the limits for a top level gendisk and a bottom level
657 void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
660 struct request_queue *t = disk->queue;
662 if (bdev_stack_limits(&t->limits, bdev, offset >> 9) < 0) {
663 char top[BDEVNAME_SIZE], bottom[BDEVNAME_SIZE];
665 disk_name(disk, 0, top);
666 bdevname(bdev, bottom);
668 printk(KERN_NOTICE "%s: Warning: Device %s is misaligned\n",
672 EXPORT_SYMBOL(disk_stack_limits);
675 * blk_queue_dma_pad - set pad mask
676 * @q: the request queue for the device
681 * Appending pad buffer to a request modifies the last entry of a
682 * scatter list such that it includes the pad buffer.
684 void blk_queue_dma_pad(struct request_queue *q, unsigned int mask)
686 q->dma_pad_mask = mask;
688 EXPORT_SYMBOL(blk_queue_dma_pad);
691 * blk_queue_update_dma_pad - update pad mask
692 * @q: the request queue for the device
695 * Update dma pad mask.
697 * Appending pad buffer to a request modifies the last entry of a
698 * scatter list such that it includes the pad buffer.
700 void blk_queue_update_dma_pad(struct request_queue *q, unsigned int mask)
702 if (mask > q->dma_pad_mask)
703 q->dma_pad_mask = mask;
705 EXPORT_SYMBOL(blk_queue_update_dma_pad);
708 * blk_queue_dma_drain - Set up a drain buffer for excess dma.
709 * @q: the request queue for the device
710 * @dma_drain_needed: fn which returns non-zero if drain is necessary
711 * @buf: physically contiguous buffer
712 * @size: size of the buffer in bytes
714 * Some devices have excess DMA problems and can't simply discard (or
715 * zero fill) the unwanted piece of the transfer. They have to have a
716 * real area of memory to transfer it into. The use case for this is
717 * ATAPI devices in DMA mode. If the packet command causes a transfer
718 * bigger than the transfer size some HBAs will lock up if there
719 * aren't DMA elements to contain the excess transfer. What this API
720 * does is adjust the queue so that the buf is always appended
721 * silently to the scatterlist.
723 * Note: This routine adjusts max_hw_segments to make room for appending
724 * the drain buffer. If you call blk_queue_max_segments() after calling
725 * this routine, you must set the limit to one fewer than your device
726 * can support otherwise there won't be room for the drain buffer.
728 int blk_queue_dma_drain(struct request_queue *q,
729 dma_drain_needed_fn *dma_drain_needed,
730 void *buf, unsigned int size)
732 if (queue_max_segments(q) < 2)
734 /* make room for appending the drain */
735 blk_queue_max_segments(q, queue_max_segments(q) - 1);
736 q->dma_drain_needed = dma_drain_needed;
737 q->dma_drain_buffer = buf;
738 q->dma_drain_size = size;
742 EXPORT_SYMBOL_GPL(blk_queue_dma_drain);
745 * blk_queue_segment_boundary - set boundary rules for segment merging
746 * @q: the request queue for the device
747 * @mask: the memory boundary mask
749 void blk_queue_segment_boundary(struct request_queue *q, unsigned long mask)
751 if (mask < PAGE_SIZE - 1) {
752 mask = PAGE_SIZE - 1;
753 printk(KERN_INFO "%s: set to minimum %lx\n",
757 q->limits.seg_boundary_mask = mask;
759 EXPORT_SYMBOL(blk_queue_segment_boundary);
762 * blk_queue_virt_boundary - set boundary rules for bio merging
763 * @q: the request queue for the device
764 * @mask: the memory boundary mask
766 void blk_queue_virt_boundary(struct request_queue *q, unsigned long mask)
768 q->limits.virt_boundary_mask = mask;
770 EXPORT_SYMBOL(blk_queue_virt_boundary);
773 * blk_queue_dma_alignment - set dma length and memory alignment
774 * @q: the request queue for the device
775 * @mask: alignment mask
778 * set required memory and length alignment for direct dma transactions.
779 * this is used when building direct io requests for the queue.
782 void blk_queue_dma_alignment(struct request_queue *q, int mask)
784 q->dma_alignment = mask;
786 EXPORT_SYMBOL(blk_queue_dma_alignment);
789 * blk_queue_update_dma_alignment - update dma length and memory alignment
790 * @q: the request queue for the device
791 * @mask: alignment mask
794 * update required memory and length alignment for direct dma transactions.
795 * If the requested alignment is larger than the current alignment, then
796 * the current queue alignment is updated to the new value, otherwise it
797 * is left alone. The design of this is to allow multiple objects
798 * (driver, device, transport etc) to set their respective
799 * alignments without having them interfere.
802 void blk_queue_update_dma_alignment(struct request_queue *q, int mask)
804 BUG_ON(mask > PAGE_SIZE);
806 if (mask > q->dma_alignment)
807 q->dma_alignment = mask;
809 EXPORT_SYMBOL(blk_queue_update_dma_alignment);
811 void blk_queue_flush_queueable(struct request_queue *q, bool queueable)
814 blk_queue_flag_clear(QUEUE_FLAG_FLUSH_NQ, q);
816 blk_queue_flag_set(QUEUE_FLAG_FLUSH_NQ, q);
818 EXPORT_SYMBOL_GPL(blk_queue_flush_queueable);
821 * blk_set_queue_depth - tell the block layer about the device queue depth
822 * @q: the request queue for the device
823 * @depth: queue depth
826 void blk_set_queue_depth(struct request_queue *q, unsigned int depth)
828 q->queue_depth = depth;
829 wbt_set_queue_depth(q, depth);
831 EXPORT_SYMBOL(blk_set_queue_depth);
834 * blk_queue_write_cache - configure queue's write cache
835 * @q: the request queue for the device
836 * @wc: write back cache on or off
837 * @fua: device supports FUA writes, if true
839 * Tell the block layer about the write cache of @q.
841 void blk_queue_write_cache(struct request_queue *q, bool wc, bool fua)
843 spin_lock_irq(q->queue_lock);
845 queue_flag_set(QUEUE_FLAG_WC, q);
847 queue_flag_clear(QUEUE_FLAG_WC, q);
849 queue_flag_set(QUEUE_FLAG_FUA, q);
851 queue_flag_clear(QUEUE_FLAG_FUA, q);
852 spin_unlock_irq(q->queue_lock);
854 wbt_set_write_cache(q, test_bit(QUEUE_FLAG_WC, &q->queue_flags));
856 EXPORT_SYMBOL_GPL(blk_queue_write_cache);
858 static int __init blk_settings_init(void)
860 blk_max_low_pfn = max_low_pfn - 1;
861 blk_max_pfn = max_pfn - 1;
864 subsys_initcall(blk_settings_init);