1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
6 #define CREATE_TRACE_POINTS
10 #define pr_fmt(fmt) "null_blk: " fmt
12 #define NULL_ZONE_INVALID_WP ((sector_t)-1)
14 static inline sector_t mb_to_sects(unsigned long mb)
16 return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
19 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
21 return sect >> ilog2(dev->zone_size_sects);
24 static inline void null_init_zone_lock(struct nullb_device *dev,
25 struct nullb_zone *zone)
27 if (!dev->memory_backed)
28 spin_lock_init(&zone->spinlock);
30 mutex_init(&zone->mutex);
33 static inline void null_lock_zone(struct nullb_device *dev,
34 struct nullb_zone *zone)
36 if (!dev->memory_backed)
37 spin_lock_irq(&zone->spinlock);
39 mutex_lock(&zone->mutex);
42 static inline void null_unlock_zone(struct nullb_device *dev,
43 struct nullb_zone *zone)
45 if (!dev->memory_backed)
46 spin_unlock_irq(&zone->spinlock);
48 mutex_unlock(&zone->mutex);
51 int null_init_zoned_dev(struct nullb_device *dev,
52 struct queue_limits *lim)
54 sector_t dev_capacity_sects, zone_capacity_sects;
55 struct nullb_zone *zone;
59 if (!is_power_of_2(dev->zone_size)) {
60 pr_err("zone_size must be power-of-two\n");
63 if (dev->zone_size > dev->size) {
64 pr_err("Zone size larger than device capacity\n");
68 if (!dev->zone_capacity)
69 dev->zone_capacity = dev->zone_size;
71 if (dev->zone_capacity > dev->zone_size) {
72 pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
73 dev->zone_capacity, dev->zone_size);
78 * If a smaller zone capacity was requested, do not allow a smaller last
79 * zone at the same time as such zone configuration does not correspond
80 * to any real zoned device.
82 if (dev->zone_capacity != dev->zone_size &&
83 dev->size & (dev->zone_size - 1)) {
84 pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
88 zone_capacity_sects = mb_to_sects(dev->zone_capacity);
89 dev_capacity_sects = mb_to_sects(dev->size);
90 dev->zone_size_sects = mb_to_sects(dev->zone_size);
91 dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
92 >> ilog2(dev->zone_size_sects);
94 dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
95 GFP_KERNEL | __GFP_ZERO);
99 spin_lock_init(&dev->zone_res_lock);
101 if (dev->zone_nr_conv >= dev->nr_zones) {
102 dev->zone_nr_conv = dev->nr_zones - 1;
103 pr_info("changed the number of conventional zones to %u",
107 dev->zone_append_max_sectors =
108 min(ALIGN_DOWN(dev->zone_append_max_sectors,
109 dev->blocksize >> SECTOR_SHIFT),
110 zone_capacity_sects);
112 /* Max active zones has to be < nbr of seq zones in order to be enforceable */
113 if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
114 dev->zone_max_active = 0;
115 pr_info("zone_max_active limit disabled, limit >= zone count\n");
118 /* Max open zones has to be <= max active zones */
119 if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
120 dev->zone_max_open = dev->zone_max_active;
121 pr_info("changed the maximum number of open zones to %u\n",
123 } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
124 dev->zone_max_open = 0;
125 pr_info("zone_max_open limit disabled, limit >= zone count\n");
127 dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
128 dev->imp_close_zone_no = dev->zone_nr_conv;
130 for (i = 0; i < dev->zone_nr_conv; i++) {
131 zone = &dev->zones[i];
133 null_init_zone_lock(dev, zone);
134 zone->start = sector;
135 zone->len = dev->zone_size_sects;
136 zone->capacity = zone->len;
137 zone->wp = zone->start + zone->len;
138 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
139 zone->cond = BLK_ZONE_COND_NOT_WP;
141 sector += dev->zone_size_sects;
144 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
145 zone = &dev->zones[i];
147 null_init_zone_lock(dev, zone);
148 zone->start = zone->wp = sector;
149 if (zone->start + dev->zone_size_sects > dev_capacity_sects)
150 zone->len = dev_capacity_sects - zone->start;
152 zone->len = dev->zone_size_sects;
154 min_t(sector_t, zone->len, zone_capacity_sects);
155 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
156 zone->cond = BLK_ZONE_COND_EMPTY;
158 sector += dev->zone_size_sects;
162 lim->chunk_sectors = dev->zone_size_sects;
163 lim->max_zone_append_sectors = dev->zone_append_max_sectors;
164 lim->max_open_zones = dev->zone_max_open;
165 lim->max_active_zones = dev->zone_max_active;
169 int null_register_zoned_dev(struct nullb *nullb)
171 struct request_queue *q = nullb->q;
172 struct gendisk *disk = nullb->disk;
174 blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
175 disk->nr_zones = bdev_nr_zones(disk->part0);
177 pr_info("%s: using %s zone append\n",
179 queue_emulates_zone_append(q) ? "emulated" : "native");
181 return blk_revalidate_disk_zones(disk);
184 void null_free_zoned_dev(struct nullb_device *dev)
190 int null_report_zones(struct gendisk *disk, sector_t sector,
191 unsigned int nr_zones, report_zones_cb cb, void *data)
193 struct nullb *nullb = disk->private_data;
194 struct nullb_device *dev = nullb->dev;
195 unsigned int first_zone, i;
196 struct nullb_zone *zone;
197 struct blk_zone blkz;
200 first_zone = null_zone_no(dev, sector);
201 if (first_zone >= dev->nr_zones)
204 nr_zones = min(nr_zones, dev->nr_zones - first_zone);
205 trace_nullb_report_zones(nullb, nr_zones);
207 memset(&blkz, 0, sizeof(struct blk_zone));
208 zone = &dev->zones[first_zone];
209 for (i = 0; i < nr_zones; i++, zone++) {
211 * Stacked DM target drivers will remap the zone information by
212 * modifying the zone information passed to the report callback.
213 * So use a local copy to avoid corruption of the device zone
216 null_lock_zone(dev, zone);
217 blkz.start = zone->start;
218 blkz.len = zone->len;
220 blkz.type = zone->type;
221 blkz.cond = zone->cond;
222 blkz.capacity = zone->capacity;
223 null_unlock_zone(dev, zone);
225 error = cb(&blkz, i, data);
234 * This is called in the case of memory backing from null_process_cmd()
235 * with the target zone already locked.
237 size_t null_zone_valid_read_len(struct nullb *nullb,
238 sector_t sector, unsigned int len)
240 struct nullb_device *dev = nullb->dev;
241 struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
242 unsigned int nr_sectors = len >> SECTOR_SHIFT;
244 /* Read must be below the write pointer position */
245 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
246 sector + nr_sectors <= zone->wp)
249 if (sector > zone->wp)
252 return (zone->wp - sector) << SECTOR_SHIFT;
255 static void null_close_imp_open_zone(struct nullb_device *dev)
257 struct nullb_zone *zone;
260 zno = dev->imp_close_zone_no;
261 if (zno >= dev->nr_zones)
262 zno = dev->zone_nr_conv;
264 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
265 zone = &dev->zones[zno];
267 if (zno >= dev->nr_zones)
268 zno = dev->zone_nr_conv;
270 if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
271 dev->nr_zones_imp_open--;
272 if (zone->wp == zone->start) {
273 zone->cond = BLK_ZONE_COND_EMPTY;
275 zone->cond = BLK_ZONE_COND_CLOSED;
276 dev->nr_zones_closed++;
278 dev->imp_close_zone_no = zno;
284 static blk_status_t null_check_active(struct nullb_device *dev)
286 if (!dev->zone_max_active)
289 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
290 dev->nr_zones_closed < dev->zone_max_active)
293 return BLK_STS_ZONE_ACTIVE_RESOURCE;
296 static blk_status_t null_check_open(struct nullb_device *dev)
298 if (!dev->zone_max_open)
301 if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
304 if (dev->nr_zones_imp_open) {
305 if (null_check_active(dev) == BLK_STS_OK) {
306 null_close_imp_open_zone(dev);
311 return BLK_STS_ZONE_OPEN_RESOURCE;
315 * This function matches the manage open zone resources function in the ZBC standard,
316 * with the addition of max active zones support (added in the ZNS standard).
318 * The function determines if a zone can transition to implicit open or explicit open,
319 * while maintaining the max open zone (and max active zone) limit(s). It may close an
320 * implicit open zone in order to make additional zone resources available.
322 * ZBC states that an implicit open zone shall be closed only if there is not
323 * room within the open limit. However, with the addition of an active limit,
324 * it is not certain that closing an implicit open zone will allow a new zone
325 * to be opened, since we might already be at the active limit capacity.
327 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
328 struct nullb_zone *zone)
332 switch (zone->cond) {
333 case BLK_ZONE_COND_EMPTY:
334 ret = null_check_active(dev);
335 if (ret != BLK_STS_OK)
338 case BLK_ZONE_COND_CLOSED:
339 return null_check_open(dev);
341 /* Should never be called for other states */
343 return BLK_STS_IOERR;
347 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
348 unsigned int nr_sectors, bool append)
350 struct nullb_device *dev = cmd->nq->dev;
351 unsigned int zno = null_zone_no(dev, sector);
352 struct nullb_zone *zone = &dev->zones[zno];
355 trace_nullb_zone_op(cmd, zno, zone->cond);
357 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
359 return BLK_STS_IOERR;
360 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
363 null_lock_zone(dev, zone);
366 * Regular writes must be at the write pointer position. Zone append
367 * writes are automatically issued at the write pointer and the position
368 * returned using the request sector. Note that we do not check the zone
369 * condition because for FULL, READONLY and OFFLINE zones, the sector
370 * check against the zone write pointer will always result in failing
374 if (WARN_ON_ONCE(!dev->zone_append_max_sectors) ||
375 zone->wp == NULL_ZONE_INVALID_WP) {
380 blk_mq_rq_from_pdu(cmd)->__sector = sector;
383 if (sector != zone->wp ||
384 zone->wp + nr_sectors > zone->start + zone->capacity) {
389 if (zone->cond == BLK_ZONE_COND_CLOSED ||
390 zone->cond == BLK_ZONE_COND_EMPTY) {
391 if (dev->need_zone_res_mgmt) {
392 spin_lock(&dev->zone_res_lock);
394 ret = null_check_zone_resources(dev, zone);
395 if (ret != BLK_STS_OK) {
396 spin_unlock(&dev->zone_res_lock);
399 if (zone->cond == BLK_ZONE_COND_CLOSED) {
400 dev->nr_zones_closed--;
401 dev->nr_zones_imp_open++;
402 } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
403 dev->nr_zones_imp_open++;
406 spin_unlock(&dev->zone_res_lock);
409 zone->cond = BLK_ZONE_COND_IMP_OPEN;
412 ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
413 if (ret != BLK_STS_OK)
416 zone->wp += nr_sectors;
417 if (zone->wp == zone->start + zone->capacity) {
418 if (dev->need_zone_res_mgmt) {
419 spin_lock(&dev->zone_res_lock);
420 if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
421 dev->nr_zones_exp_open--;
422 else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
423 dev->nr_zones_imp_open--;
424 spin_unlock(&dev->zone_res_lock);
426 zone->cond = BLK_ZONE_COND_FULL;
432 null_unlock_zone(dev, zone);
437 static blk_status_t null_open_zone(struct nullb_device *dev,
438 struct nullb_zone *zone)
440 blk_status_t ret = BLK_STS_OK;
442 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
443 return BLK_STS_IOERR;
445 switch (zone->cond) {
446 case BLK_ZONE_COND_EXP_OPEN:
447 /* Open operation on exp open is not an error */
449 case BLK_ZONE_COND_EMPTY:
450 case BLK_ZONE_COND_IMP_OPEN:
451 case BLK_ZONE_COND_CLOSED:
453 case BLK_ZONE_COND_FULL:
455 return BLK_STS_IOERR;
458 if (dev->need_zone_res_mgmt) {
459 spin_lock(&dev->zone_res_lock);
461 switch (zone->cond) {
462 case BLK_ZONE_COND_EMPTY:
463 ret = null_check_zone_resources(dev, zone);
464 if (ret != BLK_STS_OK) {
465 spin_unlock(&dev->zone_res_lock);
469 case BLK_ZONE_COND_IMP_OPEN:
470 dev->nr_zones_imp_open--;
472 case BLK_ZONE_COND_CLOSED:
473 ret = null_check_zone_resources(dev, zone);
474 if (ret != BLK_STS_OK) {
475 spin_unlock(&dev->zone_res_lock);
478 dev->nr_zones_closed--;
484 dev->nr_zones_exp_open++;
486 spin_unlock(&dev->zone_res_lock);
489 zone->cond = BLK_ZONE_COND_EXP_OPEN;
494 static blk_status_t null_close_zone(struct nullb_device *dev,
495 struct nullb_zone *zone)
497 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
498 return BLK_STS_IOERR;
500 switch (zone->cond) {
501 case BLK_ZONE_COND_CLOSED:
502 /* close operation on closed is not an error */
504 case BLK_ZONE_COND_IMP_OPEN:
505 case BLK_ZONE_COND_EXP_OPEN:
507 case BLK_ZONE_COND_EMPTY:
508 case BLK_ZONE_COND_FULL:
510 return BLK_STS_IOERR;
513 if (dev->need_zone_res_mgmt) {
514 spin_lock(&dev->zone_res_lock);
516 switch (zone->cond) {
517 case BLK_ZONE_COND_IMP_OPEN:
518 dev->nr_zones_imp_open--;
520 case BLK_ZONE_COND_EXP_OPEN:
521 dev->nr_zones_exp_open--;
527 if (zone->wp > zone->start)
528 dev->nr_zones_closed++;
530 spin_unlock(&dev->zone_res_lock);
533 if (zone->wp == zone->start)
534 zone->cond = BLK_ZONE_COND_EMPTY;
536 zone->cond = BLK_ZONE_COND_CLOSED;
541 static blk_status_t null_finish_zone(struct nullb_device *dev,
542 struct nullb_zone *zone)
544 blk_status_t ret = BLK_STS_OK;
546 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
547 return BLK_STS_IOERR;
549 if (dev->need_zone_res_mgmt) {
550 spin_lock(&dev->zone_res_lock);
552 switch (zone->cond) {
553 case BLK_ZONE_COND_FULL:
554 /* Finish operation on full is not an error */
555 spin_unlock(&dev->zone_res_lock);
557 case BLK_ZONE_COND_EMPTY:
558 ret = null_check_zone_resources(dev, zone);
559 if (ret != BLK_STS_OK) {
560 spin_unlock(&dev->zone_res_lock);
564 case BLK_ZONE_COND_IMP_OPEN:
565 dev->nr_zones_imp_open--;
567 case BLK_ZONE_COND_EXP_OPEN:
568 dev->nr_zones_exp_open--;
570 case BLK_ZONE_COND_CLOSED:
571 ret = null_check_zone_resources(dev, zone);
572 if (ret != BLK_STS_OK) {
573 spin_unlock(&dev->zone_res_lock);
576 dev->nr_zones_closed--;
579 spin_unlock(&dev->zone_res_lock);
580 return BLK_STS_IOERR;
583 spin_unlock(&dev->zone_res_lock);
586 zone->cond = BLK_ZONE_COND_FULL;
587 zone->wp = zone->start + zone->len;
592 static blk_status_t null_reset_zone(struct nullb_device *dev,
593 struct nullb_zone *zone)
595 if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
596 return BLK_STS_IOERR;
598 if (dev->need_zone_res_mgmt) {
599 spin_lock(&dev->zone_res_lock);
601 switch (zone->cond) {
602 case BLK_ZONE_COND_IMP_OPEN:
603 dev->nr_zones_imp_open--;
605 case BLK_ZONE_COND_EXP_OPEN:
606 dev->nr_zones_exp_open--;
608 case BLK_ZONE_COND_CLOSED:
609 dev->nr_zones_closed--;
611 case BLK_ZONE_COND_EMPTY:
612 case BLK_ZONE_COND_FULL:
615 spin_unlock(&dev->zone_res_lock);
616 return BLK_STS_IOERR;
619 spin_unlock(&dev->zone_res_lock);
622 zone->cond = BLK_ZONE_COND_EMPTY;
623 zone->wp = zone->start;
625 if (dev->memory_backed)
626 return null_handle_discard(dev, zone->start, zone->len);
631 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
634 struct nullb_device *dev = cmd->nq->dev;
635 unsigned int zone_no;
636 struct nullb_zone *zone;
640 if (op == REQ_OP_ZONE_RESET_ALL) {
641 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
642 zone = &dev->zones[i];
643 null_lock_zone(dev, zone);
644 if (zone->cond != BLK_ZONE_COND_EMPTY &&
645 zone->cond != BLK_ZONE_COND_READONLY &&
646 zone->cond != BLK_ZONE_COND_OFFLINE) {
647 null_reset_zone(dev, zone);
648 trace_nullb_zone_op(cmd, i, zone->cond);
650 null_unlock_zone(dev, zone);
655 zone_no = null_zone_no(dev, sector);
656 zone = &dev->zones[zone_no];
658 null_lock_zone(dev, zone);
660 if (zone->cond == BLK_ZONE_COND_READONLY ||
661 zone->cond == BLK_ZONE_COND_OFFLINE) {
667 case REQ_OP_ZONE_RESET:
668 ret = null_reset_zone(dev, zone);
670 case REQ_OP_ZONE_OPEN:
671 ret = null_open_zone(dev, zone);
673 case REQ_OP_ZONE_CLOSE:
674 ret = null_close_zone(dev, zone);
676 case REQ_OP_ZONE_FINISH:
677 ret = null_finish_zone(dev, zone);
680 ret = BLK_STS_NOTSUPP;
684 if (ret == BLK_STS_OK)
685 trace_nullb_zone_op(cmd, zone_no, zone->cond);
688 null_unlock_zone(dev, zone);
693 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
694 sector_t sector, sector_t nr_sectors)
696 struct nullb_device *dev;
697 struct nullb_zone *zone;
702 return null_zone_write(cmd, sector, nr_sectors, false);
703 case REQ_OP_ZONE_APPEND:
704 return null_zone_write(cmd, sector, nr_sectors, true);
705 case REQ_OP_ZONE_RESET:
706 case REQ_OP_ZONE_RESET_ALL:
707 case REQ_OP_ZONE_OPEN:
708 case REQ_OP_ZONE_CLOSE:
709 case REQ_OP_ZONE_FINISH:
710 return null_zone_mgmt(cmd, op, sector);
713 zone = &dev->zones[null_zone_no(dev, sector)];
714 if (zone->cond == BLK_ZONE_COND_OFFLINE)
715 return BLK_STS_IOERR;
717 null_lock_zone(dev, zone);
718 sts = null_process_cmd(cmd, op, sector, nr_sectors);
719 null_unlock_zone(dev, zone);
725 * Set a zone in the read-only or offline condition.
727 static void null_set_zone_cond(struct nullb_device *dev,
728 struct nullb_zone *zone, enum blk_zone_cond cond)
730 if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
731 cond != BLK_ZONE_COND_OFFLINE))
734 null_lock_zone(dev, zone);
737 * If the read-only condition is requested again to zones already in
738 * read-only condition, restore back normal empty condition. Do the same
739 * if the offline condition is requested for offline zones. Otherwise,
740 * set the specified zone condition to the zones. Finish the zones
741 * beforehand to free up zone resources.
743 if (zone->cond == cond) {
744 zone->cond = BLK_ZONE_COND_EMPTY;
745 zone->wp = zone->start;
746 if (dev->memory_backed)
747 null_handle_discard(dev, zone->start, zone->len);
749 if (zone->cond != BLK_ZONE_COND_READONLY &&
750 zone->cond != BLK_ZONE_COND_OFFLINE)
751 null_finish_zone(dev, zone);
753 zone->wp = NULL_ZONE_INVALID_WP;
756 null_unlock_zone(dev, zone);
760 * Identify a zone from the sector written to configfs file. Then set zone
761 * condition to the zone.
763 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
764 size_t count, enum blk_zone_cond cond)
766 unsigned long long sector;
767 unsigned int zone_no;
771 pr_err("null_blk device is not zoned\n");
776 pr_err("null_blk device is not yet powered\n");
780 ret = kstrtoull(page, 0, §or);
784 zone_no = null_zone_no(dev, sector);
785 if (zone_no >= dev->nr_zones) {
786 pr_err("Sector out of range\n");
790 if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
791 pr_err("Can not change condition of conventional zones\n");
795 null_set_zone_cond(dev, &dev->zones[zone_no], cond);