1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
6 #define CREATE_TRACE_POINTS
10 #define pr_fmt(fmt) "null_blk: " fmt
12 static inline sector_t mb_to_sects(unsigned long mb)
14 return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
17 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
19 return sect >> ilog2(dev->zone_size_sects);
22 static inline void null_lock_zone_res(struct nullb_device *dev)
24 if (dev->need_zone_res_mgmt)
25 spin_lock_irq(&dev->zone_res_lock);
28 static inline void null_unlock_zone_res(struct nullb_device *dev)
30 if (dev->need_zone_res_mgmt)
31 spin_unlock_irq(&dev->zone_res_lock);
34 static inline void null_init_zone_lock(struct nullb_device *dev,
35 struct nullb_zone *zone)
37 if (!dev->memory_backed)
38 spin_lock_init(&zone->spinlock);
40 mutex_init(&zone->mutex);
43 static inline void null_lock_zone(struct nullb_device *dev,
44 struct nullb_zone *zone)
46 if (!dev->memory_backed)
47 spin_lock_irq(&zone->spinlock);
49 mutex_lock(&zone->mutex);
52 static inline void null_unlock_zone(struct nullb_device *dev,
53 struct nullb_zone *zone)
55 if (!dev->memory_backed)
56 spin_unlock_irq(&zone->spinlock);
58 mutex_unlock(&zone->mutex);
61 int null_init_zoned_dev(struct nullb_device *dev, struct request_queue *q)
63 sector_t dev_capacity_sects, zone_capacity_sects;
64 struct nullb_zone *zone;
68 if (!is_power_of_2(dev->zone_size)) {
69 pr_err("zone_size must be power-of-two\n");
72 if (dev->zone_size > dev->size) {
73 pr_err("Zone size larger than device capacity\n");
77 if (!dev->zone_capacity)
78 dev->zone_capacity = dev->zone_size;
80 if (dev->zone_capacity > dev->zone_size) {
81 pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
82 dev->zone_capacity, dev->zone_size);
86 zone_capacity_sects = mb_to_sects(dev->zone_capacity);
87 dev_capacity_sects = mb_to_sects(dev->size);
88 dev->zone_size_sects = mb_to_sects(dev->zone_size);
89 dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
90 >> ilog2(dev->zone_size_sects);
92 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
93 GFP_KERNEL | __GFP_ZERO);
97 spin_lock_init(&dev->zone_res_lock);
99 if (dev->zone_nr_conv >= dev->nr_zones) {
100 dev->zone_nr_conv = dev->nr_zones - 1;
101 pr_info("changed the number of conventional zones to %u",
105 /* Max active zones has to be < nbr of seq zones in order to be enforceable */
106 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
107 dev->zone_max_active = 0;
108 pr_info("zone_max_active limit disabled, limit >= zone count\n");
111 /* Max open zones has to be <= max active zones */
112 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
113 dev->zone_max_open = dev->zone_max_active;
114 pr_info("changed the maximum number of open zones to %u\n",
116 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
117 dev->zone_max_open = 0;
118 pr_info("zone_max_open limit disabled, limit >= zone count\n");
120 dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
121 dev->imp_close_zone_no = dev->zone_nr_conv;
123 for (i = 0; i < dev->zone_nr_conv; i++) {
124 zone = &dev->zones[i];
126 null_init_zone_lock(dev, zone);
127 zone->start = sector;
128 zone->len = dev->zone_size_sects;
129 zone->capacity = zone->len;
130 zone->wp = zone->start + zone->len;
131 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
132 zone->cond = BLK_ZONE_COND_NOT_WP;
134 sector += dev->zone_size_sects;
137 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
138 zone = &dev->zones[i];
140 null_init_zone_lock(dev, zone);
141 zone->start = zone->wp = sector;
142 if (zone->start + dev->zone_size_sects > dev_capacity_sects)
143 zone->len = dev_capacity_sects - zone->start;
145 zone->len = dev->zone_size_sects;
147 min_t(sector_t, zone->len, zone_capacity_sects);
148 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
149 zone->cond = BLK_ZONE_COND_EMPTY;
151 sector += dev->zone_size_sects;
157 int null_register_zoned_dev(struct nullb *nullb)
159 struct nullb_device *dev = nullb->dev;
160 struct request_queue *q = nullb->q;
162 disk_set_zoned(nullb->disk);
163 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
164 blk_queue_required_elevator_features(q, ELEVATOR_F_ZBD_SEQ_WRITE);
165 blk_queue_chunk_sectors(q, dev->zone_size_sects);
166 nullb->disk->nr_zones = bdev_nr_zones(nullb->disk->part0);
167 blk_queue_max_zone_append_sectors(q, dev->zone_size_sects);
168 disk_set_max_open_zones(nullb->disk, dev->zone_max_open);
169 disk_set_max_active_zones(nullb->disk, dev->zone_max_active);
172 return blk_revalidate_disk_zones(nullb->disk, NULL);
177 void null_free_zoned_dev(struct nullb_device *dev)
183 int null_report_zones(struct gendisk *disk, sector_t sector,
184 unsigned int nr_zones, report_zones_cb cb, void *data)
186 struct nullb *nullb = disk->private_data;
187 struct nullb_device *dev = nullb->dev;
188 unsigned int first_zone, i;
189 struct nullb_zone *zone;
190 struct blk_zone blkz;
193 first_zone = null_zone_no(dev, sector);
194 if (first_zone >= dev->nr_zones)
197 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
198 trace_nullb_report_zones(nullb, nr_zones);
200 memset(&blkz, 0, sizeof(struct blk_zone));
201 zone = &dev->zones[first_zone];
202 for (i = 0; i < nr_zones; i++, zone++) {
204 * Stacked DM target drivers will remap the zone information by
205 * modifying the zone information passed to the report callback.
206 * So use a local copy to avoid corruption of the device zone
209 null_lock_zone(dev, zone);
210 blkz.start = zone->start;
211 blkz.len = zone->len;
213 blkz.type = zone->type;
214 blkz.cond = zone->cond;
215 blkz.capacity = zone->capacity;
216 null_unlock_zone(dev, zone);
218 error = cb(&blkz, i, data);
227 * This is called in the case of memory backing from null_process_cmd()
228 * with the target zone already locked.
230 size_t null_zone_valid_read_len(struct nullb *nullb,
231 sector_t sector, unsigned int len)
233 struct nullb_device *dev = nullb->dev;
234 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
235 unsigned int nr_sectors = len >> SECTOR_SHIFT;
237 /* Read must be below the write pointer position */
238 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
239 sector + nr_sectors <= zone->wp)
242 if (sector > zone->wp)
245 return (zone->wp - sector) << SECTOR_SHIFT;
248 static blk_status_t __null_close_zone(struct nullb_device *dev,
249 struct nullb_zone *zone)
251 switch (zone->cond) {
252 case BLK_ZONE_COND_CLOSED:
253 /* close operation on closed is not an error */
255 case BLK_ZONE_COND_IMP_OPEN:
256 dev->nr_zones_imp_open--;
258 case BLK_ZONE_COND_EXP_OPEN:
259 dev->nr_zones_exp_open--;
261 case BLK_ZONE_COND_EMPTY:
262 case BLK_ZONE_COND_FULL:
264 return BLK_STS_IOERR;
267 if (zone->wp == zone->start) {
268 zone->cond = BLK_ZONE_COND_EMPTY;
270 zone->cond = BLK_ZONE_COND_CLOSED;
271 dev->nr_zones_closed++;
277 static void null_close_imp_open_zone(struct nullb_device *dev)
279 struct nullb_zone *zone;
282 zno = dev->imp_close_zone_no;
283 if (zno >= dev->nr_zones)
284 zno = dev->zone_nr_conv;
286 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
287 zone = &dev->zones[zno];
289 if (zno >= dev->nr_zones)
290 zno = dev->zone_nr_conv;
292 if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
293 __null_close_zone(dev, zone);
294 dev->imp_close_zone_no = zno;
300 static blk_status_t null_check_active(struct nullb_device *dev)
302 if (!dev->zone_max_active)
305 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
306 dev->nr_zones_closed < dev->zone_max_active)
309 return BLK_STS_ZONE_ACTIVE_RESOURCE;
312 static blk_status_t null_check_open(struct nullb_device *dev)
314 if (!dev->zone_max_open)
317 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
320 if (dev->nr_zones_imp_open) {
321 if (null_check_active(dev) == BLK_STS_OK) {
322 null_close_imp_open_zone(dev);
327 return BLK_STS_ZONE_OPEN_RESOURCE;
331 * This function matches the manage open zone resources function in the ZBC standard,
332 * with the addition of max active zones support (added in the ZNS standard).
334 * The function determines if a zone can transition to implicit open or explicit open,
335 * while maintaining the max open zone (and max active zone) limit(s). It may close an
336 * implicit open zone in order to make additional zone resources available.
338 * ZBC states that an implicit open zone shall be closed only if there is not
339 * room within the open limit. However, with the addition of an active limit,
340 * it is not certain that closing an implicit open zone will allow a new zone
341 * to be opened, since we might already be at the active limit capacity.
343 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
344 struct nullb_zone *zone)
348 switch (zone->cond) {
349 case BLK_ZONE_COND_EMPTY:
350 ret = null_check_active(dev);
351 if (ret != BLK_STS_OK)
354 case BLK_ZONE_COND_CLOSED:
355 return null_check_open(dev);
357 /* Should never be called for other states */
359 return BLK_STS_IOERR;
363 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
364 unsigned int nr_sectors, bool append)
366 struct nullb_device *dev = cmd->nq->dev;
367 unsigned int zno = null_zone_no(dev, sector);
368 struct nullb_zone *zone = &dev->zones[zno];
371 trace_nullb_zone_op(cmd, zno, zone->cond);
373 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
375 return BLK_STS_IOERR;
376 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
379 null_lock_zone(dev, zone);
381 if (zone->cond == BLK_ZONE_COND_FULL ||
382 zone->cond == BLK_ZONE_COND_READONLY ||
383 zone->cond == BLK_ZONE_COND_OFFLINE) {
384 /* Cannot write to the zone */
390 * Regular writes must be at the write pointer position.
391 * Zone append writes are automatically issued at the write
392 * pointer and the position returned using the request or BIO
397 if (dev->queue_mode == NULL_Q_MQ)
398 cmd->rq->__sector = sector;
400 cmd->bio->bi_iter.bi_sector = sector;
401 } else if (sector != zone->wp) {
406 if (zone->wp + nr_sectors > zone->start + zone->capacity) {
411 if (zone->cond == BLK_ZONE_COND_CLOSED ||
412 zone->cond == BLK_ZONE_COND_EMPTY) {
413 null_lock_zone_res(dev);
415 ret = null_check_zone_resources(dev, zone);
416 if (ret != BLK_STS_OK) {
417 null_unlock_zone_res(dev);
420 if (zone->cond == BLK_ZONE_COND_CLOSED) {
421 dev->nr_zones_closed--;
422 dev->nr_zones_imp_open++;
423 } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
424 dev->nr_zones_imp_open++;
427 if (zone->cond != BLK_ZONE_COND_EXP_OPEN)
428 zone->cond = BLK_ZONE_COND_IMP_OPEN;
430 null_unlock_zone_res(dev);
433 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
434 if (ret != BLK_STS_OK)
437 zone->wp += nr_sectors;
438 if (zone->wp == zone->start + zone->capacity) {
439 null_lock_zone_res(dev);
440 if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
441 dev->nr_zones_exp_open--;
442 else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
443 dev->nr_zones_imp_open--;
444 zone->cond = BLK_ZONE_COND_FULL;
445 null_unlock_zone_res(dev);
451 null_unlock_zone(dev, zone);
456 static blk_status_t null_open_zone(struct nullb_device *dev,
457 struct nullb_zone *zone)
459 blk_status_t ret = BLK_STS_OK;
461 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
462 return BLK_STS_IOERR;
464 null_lock_zone_res(dev);
466 switch (zone->cond) {
467 case BLK_ZONE_COND_EXP_OPEN:
468 /* open operation on exp open is not an error */
470 case BLK_ZONE_COND_EMPTY:
471 ret = null_check_zone_resources(dev, zone);
472 if (ret != BLK_STS_OK)
475 case BLK_ZONE_COND_IMP_OPEN:
476 dev->nr_zones_imp_open--;
478 case BLK_ZONE_COND_CLOSED:
479 ret = null_check_zone_resources(dev, zone);
480 if (ret != BLK_STS_OK)
482 dev->nr_zones_closed--;
484 case BLK_ZONE_COND_FULL:
490 zone->cond = BLK_ZONE_COND_EXP_OPEN;
491 dev->nr_zones_exp_open++;
494 null_unlock_zone_res(dev);
499 static blk_status_t null_close_zone(struct nullb_device *dev,
500 struct nullb_zone *zone)
504 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
505 return BLK_STS_IOERR;
507 null_lock_zone_res(dev);
508 ret = __null_close_zone(dev, zone);
509 null_unlock_zone_res(dev);
514 static blk_status_t null_finish_zone(struct nullb_device *dev,
515 struct nullb_zone *zone)
517 blk_status_t ret = BLK_STS_OK;
519 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
520 return BLK_STS_IOERR;
522 null_lock_zone_res(dev);
524 switch (zone->cond) {
525 case BLK_ZONE_COND_FULL:
526 /* finish operation on full is not an error */
528 case BLK_ZONE_COND_EMPTY:
529 ret = null_check_zone_resources(dev, zone);
530 if (ret != BLK_STS_OK)
533 case BLK_ZONE_COND_IMP_OPEN:
534 dev->nr_zones_imp_open--;
536 case BLK_ZONE_COND_EXP_OPEN:
537 dev->nr_zones_exp_open--;
539 case BLK_ZONE_COND_CLOSED:
540 ret = null_check_zone_resources(dev, zone);
541 if (ret != BLK_STS_OK)
543 dev->nr_zones_closed--;
550 zone->cond = BLK_ZONE_COND_FULL;
551 zone->wp = zone->start + zone->len;
554 null_unlock_zone_res(dev);
559 static blk_status_t null_reset_zone(struct nullb_device *dev,
560 struct nullb_zone *zone)
562 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
563 return BLK_STS_IOERR;
565 null_lock_zone_res(dev);
567 switch (zone->cond) {
568 case BLK_ZONE_COND_EMPTY:
569 /* reset operation on empty is not an error */
570 null_unlock_zone_res(dev);
572 case BLK_ZONE_COND_IMP_OPEN:
573 dev->nr_zones_imp_open--;
575 case BLK_ZONE_COND_EXP_OPEN:
576 dev->nr_zones_exp_open--;
578 case BLK_ZONE_COND_CLOSED:
579 dev->nr_zones_closed--;
581 case BLK_ZONE_COND_FULL:
584 null_unlock_zone_res(dev);
585 return BLK_STS_IOERR;
588 zone->cond = BLK_ZONE_COND_EMPTY;
589 zone->wp = zone->start;
591 null_unlock_zone_res(dev);
593 if (dev->memory_backed)
594 return null_handle_discard(dev, zone->start, zone->len);
599 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
602 struct nullb_device *dev = cmd->nq->dev;
603 unsigned int zone_no;
604 struct nullb_zone *zone;
608 if (op == REQ_OP_ZONE_RESET_ALL) {
609 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
610 zone = &dev->zones[i];
611 null_lock_zone(dev, zone);
612 if (zone->cond != BLK_ZONE_COND_EMPTY &&
613 zone->cond != BLK_ZONE_COND_READONLY &&
614 zone->cond != BLK_ZONE_COND_OFFLINE) {
615 null_reset_zone(dev, zone);
616 trace_nullb_zone_op(cmd, i, zone->cond);
618 null_unlock_zone(dev, zone);
623 zone_no = null_zone_no(dev, sector);
624 zone = &dev->zones[zone_no];
626 null_lock_zone(dev, zone);
628 if (zone->cond == BLK_ZONE_COND_READONLY ||
629 zone->cond == BLK_ZONE_COND_OFFLINE) {
635 case REQ_OP_ZONE_RESET:
636 ret = null_reset_zone(dev, zone);
638 case REQ_OP_ZONE_OPEN:
639 ret = null_open_zone(dev, zone);
641 case REQ_OP_ZONE_CLOSE:
642 ret = null_close_zone(dev, zone);
644 case REQ_OP_ZONE_FINISH:
645 ret = null_finish_zone(dev, zone);
648 ret = BLK_STS_NOTSUPP;
652 if (ret == BLK_STS_OK)
653 trace_nullb_zone_op(cmd, zone_no, zone->cond);
656 null_unlock_zone(dev, zone);
661 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
662 sector_t sector, sector_t nr_sectors)
664 struct nullb_device *dev;
665 struct nullb_zone *zone;
670 return null_zone_write(cmd, sector, nr_sectors, false);
671 case REQ_OP_ZONE_APPEND:
672 return null_zone_write(cmd, sector, nr_sectors, true);
673 case REQ_OP_ZONE_RESET:
674 case REQ_OP_ZONE_RESET_ALL:
675 case REQ_OP_ZONE_OPEN:
676 case REQ_OP_ZONE_CLOSE:
677 case REQ_OP_ZONE_FINISH:
678 return null_zone_mgmt(cmd, op, sector);
681 zone = &dev->zones[null_zone_no(dev, sector)];
682 if (zone->cond == BLK_ZONE_COND_OFFLINE)
683 return BLK_STS_IOERR;
685 null_lock_zone(dev, zone);
686 sts = null_process_cmd(cmd, op, sector, nr_sectors);
687 null_unlock_zone(dev, zone);
693 * Set a zone in the read-only or offline condition.
695 static void null_set_zone_cond(struct nullb_device *dev,
696 struct nullb_zone *zone, enum blk_zone_cond cond)
698 if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
699 cond != BLK_ZONE_COND_OFFLINE))
702 null_lock_zone(dev, zone);
705 * If the read-only condition is requested again to zones already in
706 * read-only condition, restore back normal empty condition. Do the same
707 * if the offline condition is requested for offline zones. Otherwise,
708 * set the specified zone condition to the zones. Finish the zones
709 * beforehand to free up zone resources.
711 if (zone->cond == cond) {
712 zone->cond = BLK_ZONE_COND_EMPTY;
713 zone->wp = zone->start;
714 if (dev->memory_backed)
715 null_handle_discard(dev, zone->start, zone->len);
717 if (zone->cond != BLK_ZONE_COND_READONLY &&
718 zone->cond != BLK_ZONE_COND_OFFLINE)
719 null_finish_zone(dev, zone);
721 zone->wp = (sector_t)-1;
724 null_unlock_zone(dev, zone);
728 * Identify a zone from the sector written to configfs file. Then set zone
729 * condition to the zone.
731 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
732 size_t count, enum blk_zone_cond cond)
734 unsigned long long sector;
735 unsigned int zone_no;
739 pr_err("null_blk device is not zoned\n");
744 pr_err("null_blk device is not yet powered\n");
748 ret = kstrtoull(page, 0, §or);
752 zone_no = null_zone_no(dev, sector);
753 if (zone_no >= dev->nr_zones) {
754 pr_err("Sector out of range\n");
758 if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
759 pr_err("Can not change condition of conventional zones\n");
763 null_set_zone_cond(dev, &dev->zones[zone_no], cond);