Linux 6.10-rc4
[sfrench/cifs-2.6.git] / drivers / block / null_blk / zoned.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/vmalloc.h>
3 #include <linux/bitmap.h>
4 #include "null_blk.h"
5
6 #define CREATE_TRACE_POINTS
7 #include "trace.h"
8
9 #undef pr_fmt
10 #define pr_fmt(fmt)     "null_blk: " fmt
11
12 #define NULL_ZONE_INVALID_WP    ((sector_t)-1)
13
14 static inline sector_t mb_to_sects(unsigned long mb)
15 {
16         return ((sector_t)mb * SZ_1M) >> SECTOR_SHIFT;
17 }
18
19 static inline unsigned int null_zone_no(struct nullb_device *dev, sector_t sect)
20 {
21         return sect >> ilog2(dev->zone_size_sects);
22 }
23
24 static inline void null_init_zone_lock(struct nullb_device *dev,
25                                        struct nullb_zone *zone)
26 {
27         if (!dev->memory_backed)
28                 spin_lock_init(&zone->spinlock);
29         else
30                 mutex_init(&zone->mutex);
31 }
32
33 static inline void null_lock_zone(struct nullb_device *dev,
34                                   struct nullb_zone *zone)
35 {
36         if (!dev->memory_backed)
37                 spin_lock_irq(&zone->spinlock);
38         else
39                 mutex_lock(&zone->mutex);
40 }
41
42 static inline void null_unlock_zone(struct nullb_device *dev,
43                                     struct nullb_zone *zone)
44 {
45         if (!dev->memory_backed)
46                 spin_unlock_irq(&zone->spinlock);
47         else
48                 mutex_unlock(&zone->mutex);
49 }
50
51 int null_init_zoned_dev(struct nullb_device *dev,
52                         struct queue_limits *lim)
53 {
54         sector_t dev_capacity_sects, zone_capacity_sects;
55         struct nullb_zone *zone;
56         sector_t sector = 0;
57         unsigned int i;
58
59         if (!is_power_of_2(dev->zone_size)) {
60                 pr_err("zone_size must be power-of-two\n");
61                 return -EINVAL;
62         }
63         if (dev->zone_size > dev->size) {
64                 pr_err("Zone size larger than device capacity\n");
65                 return -EINVAL;
66         }
67
68         if (!dev->zone_capacity)
69                 dev->zone_capacity = dev->zone_size;
70
71         if (dev->zone_capacity > dev->zone_size) {
72                 pr_err("zone capacity (%lu MB) larger than zone size (%lu MB)\n",
73                        dev->zone_capacity, dev->zone_size);
74                 return -EINVAL;
75         }
76
77         /*
78          * If a smaller zone capacity was requested, do not allow a smaller last
79          * zone at the same time as such zone configuration does not correspond
80          * to any real zoned device.
81          */
82         if (dev->zone_capacity != dev->zone_size &&
83             dev->size & (dev->zone_size - 1)) {
84                 pr_err("A smaller last zone is not allowed with zone capacity smaller than zone size.\n");
85                 return -EINVAL;
86         }
87
88         zone_capacity_sects = mb_to_sects(dev->zone_capacity);
89         dev_capacity_sects = mb_to_sects(dev->size);
90         dev->zone_size_sects = mb_to_sects(dev->zone_size);
91         dev->nr_zones = round_up(dev_capacity_sects, dev->zone_size_sects)
92                 >> ilog2(dev->zone_size_sects);
93
94         dev->zones = kvmalloc_array(dev->nr_zones, sizeof(struct nullb_zone),
95                                     GFP_KERNEL | __GFP_ZERO);
96         if (!dev->zones)
97                 return -ENOMEM;
98
99         spin_lock_init(&dev->zone_res_lock);
100
101         if (dev->zone_nr_conv >= dev->nr_zones) {
102                 dev->zone_nr_conv = dev->nr_zones - 1;
103                 pr_info("changed the number of conventional zones to %u",
104                         dev->zone_nr_conv);
105         }
106
107         dev->zone_append_max_sectors =
108                 min(ALIGN_DOWN(dev->zone_append_max_sectors,
109                                dev->blocksize >> SECTOR_SHIFT),
110                     zone_capacity_sects);
111
112         /* Max active zones has to be < nbr of seq zones in order to be enforceable */
113         if (dev->zone_max_active >= dev->nr_zones - dev->zone_nr_conv) {
114                 dev->zone_max_active = 0;
115                 pr_info("zone_max_active limit disabled, limit >= zone count\n");
116         }
117
118         /* Max open zones has to be <= max active zones */
119         if (dev->zone_max_active && dev->zone_max_open > dev->zone_max_active) {
120                 dev->zone_max_open = dev->zone_max_active;
121                 pr_info("changed the maximum number of open zones to %u\n",
122                         dev->zone_max_open);
123         } else if (dev->zone_max_open >= dev->nr_zones - dev->zone_nr_conv) {
124                 dev->zone_max_open = 0;
125                 pr_info("zone_max_open limit disabled, limit >= zone count\n");
126         }
127         dev->need_zone_res_mgmt = dev->zone_max_active || dev->zone_max_open;
128         dev->imp_close_zone_no = dev->zone_nr_conv;
129
130         for (i = 0; i <  dev->zone_nr_conv; i++) {
131                 zone = &dev->zones[i];
132
133                 null_init_zone_lock(dev, zone);
134                 zone->start = sector;
135                 zone->len = dev->zone_size_sects;
136                 zone->capacity = zone->len;
137                 zone->wp = zone->start + zone->len;
138                 zone->type = BLK_ZONE_TYPE_CONVENTIONAL;
139                 zone->cond = BLK_ZONE_COND_NOT_WP;
140
141                 sector += dev->zone_size_sects;
142         }
143
144         for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
145                 zone = &dev->zones[i];
146
147                 null_init_zone_lock(dev, zone);
148                 zone->start = zone->wp = sector;
149                 if (zone->start + dev->zone_size_sects > dev_capacity_sects)
150                         zone->len = dev_capacity_sects - zone->start;
151                 else
152                         zone->len = dev->zone_size_sects;
153                 zone->capacity =
154                         min_t(sector_t, zone->len, zone_capacity_sects);
155                 zone->type = BLK_ZONE_TYPE_SEQWRITE_REQ;
156                 zone->cond = BLK_ZONE_COND_EMPTY;
157
158                 sector += dev->zone_size_sects;
159         }
160
161         lim->zoned = true;
162         lim->chunk_sectors = dev->zone_size_sects;
163         lim->max_zone_append_sectors = dev->zone_append_max_sectors;
164         lim->max_open_zones = dev->zone_max_open;
165         lim->max_active_zones = dev->zone_max_active;
166         return 0;
167 }
168
169 int null_register_zoned_dev(struct nullb *nullb)
170 {
171         struct request_queue *q = nullb->q;
172         struct gendisk *disk = nullb->disk;
173
174         blk_queue_flag_set(QUEUE_FLAG_ZONE_RESETALL, q);
175         disk->nr_zones = bdev_nr_zones(disk->part0);
176
177         pr_info("%s: using %s zone append\n",
178                 disk->disk_name,
179                 queue_emulates_zone_append(q) ? "emulated" : "native");
180
181         return blk_revalidate_disk_zones(disk);
182 }
183
184 void null_free_zoned_dev(struct nullb_device *dev)
185 {
186         kvfree(dev->zones);
187         dev->zones = NULL;
188 }
189
190 int null_report_zones(struct gendisk *disk, sector_t sector,
191                 unsigned int nr_zones, report_zones_cb cb, void *data)
192 {
193         struct nullb *nullb = disk->private_data;
194         struct nullb_device *dev = nullb->dev;
195         unsigned int first_zone, i;
196         struct nullb_zone *zone;
197         struct blk_zone blkz;
198         int error;
199
200         first_zone = null_zone_no(dev, sector);
201         if (first_zone >= dev->nr_zones)
202                 return 0;
203
204         nr_zones = min(nr_zones, dev->nr_zones - first_zone);
205         trace_nullb_report_zones(nullb, nr_zones);
206
207         memset(&blkz, 0, sizeof(struct blk_zone));
208         zone = &dev->zones[first_zone];
209         for (i = 0; i < nr_zones; i++, zone++) {
210                 /*
211                  * Stacked DM target drivers will remap the zone information by
212                  * modifying the zone information passed to the report callback.
213                  * So use a local copy to avoid corruption of the device zone
214                  * array.
215                  */
216                 null_lock_zone(dev, zone);
217                 blkz.start = zone->start;
218                 blkz.len = zone->len;
219                 blkz.wp = zone->wp;
220                 blkz.type = zone->type;
221                 blkz.cond = zone->cond;
222                 blkz.capacity = zone->capacity;
223                 null_unlock_zone(dev, zone);
224
225                 error = cb(&blkz, i, data);
226                 if (error)
227                         return error;
228         }
229
230         return nr_zones;
231 }
232
233 /*
234  * This is called in the case of memory backing from null_process_cmd()
235  * with the target zone already locked.
236  */
237 size_t null_zone_valid_read_len(struct nullb *nullb,
238                                 sector_t sector, unsigned int len)
239 {
240         struct nullb_device *dev = nullb->dev;
241         struct nullb_zone *zone = &dev->zones[null_zone_no(dev, sector)];
242         unsigned int nr_sectors = len >> SECTOR_SHIFT;
243
244         /* Read must be below the write pointer position */
245         if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL ||
246             sector + nr_sectors <= zone->wp)
247                 return len;
248
249         if (sector > zone->wp)
250                 return 0;
251
252         return (zone->wp - sector) << SECTOR_SHIFT;
253 }
254
255 static void null_close_imp_open_zone(struct nullb_device *dev)
256 {
257         struct nullb_zone *zone;
258         unsigned int zno, i;
259
260         zno = dev->imp_close_zone_no;
261         if (zno >= dev->nr_zones)
262                 zno = dev->zone_nr_conv;
263
264         for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
265                 zone = &dev->zones[zno];
266                 zno++;
267                 if (zno >= dev->nr_zones)
268                         zno = dev->zone_nr_conv;
269
270                 if (zone->cond == BLK_ZONE_COND_IMP_OPEN) {
271                         dev->nr_zones_imp_open--;
272                         if (zone->wp == zone->start) {
273                                 zone->cond = BLK_ZONE_COND_EMPTY;
274                         } else {
275                                 zone->cond = BLK_ZONE_COND_CLOSED;
276                                 dev->nr_zones_closed++;
277                         }
278                         dev->imp_close_zone_no = zno;
279                         return;
280                 }
281         }
282 }
283
284 static blk_status_t null_check_active(struct nullb_device *dev)
285 {
286         if (!dev->zone_max_active)
287                 return BLK_STS_OK;
288
289         if (dev->nr_zones_exp_open + dev->nr_zones_imp_open +
290                         dev->nr_zones_closed < dev->zone_max_active)
291                 return BLK_STS_OK;
292
293         return BLK_STS_ZONE_ACTIVE_RESOURCE;
294 }
295
296 static blk_status_t null_check_open(struct nullb_device *dev)
297 {
298         if (!dev->zone_max_open)
299                 return BLK_STS_OK;
300
301         if (dev->nr_zones_exp_open + dev->nr_zones_imp_open < dev->zone_max_open)
302                 return BLK_STS_OK;
303
304         if (dev->nr_zones_imp_open) {
305                 if (null_check_active(dev) == BLK_STS_OK) {
306                         null_close_imp_open_zone(dev);
307                         return BLK_STS_OK;
308                 }
309         }
310
311         return BLK_STS_ZONE_OPEN_RESOURCE;
312 }
313
314 /*
315  * This function matches the manage open zone resources function in the ZBC standard,
316  * with the addition of max active zones support (added in the ZNS standard).
317  *
318  * The function determines if a zone can transition to implicit open or explicit open,
319  * while maintaining the max open zone (and max active zone) limit(s). It may close an
320  * implicit open zone in order to make additional zone resources available.
321  *
322  * ZBC states that an implicit open zone shall be closed only if there is not
323  * room within the open limit. However, with the addition of an active limit,
324  * it is not certain that closing an implicit open zone will allow a new zone
325  * to be opened, since we might already be at the active limit capacity.
326  */
327 static blk_status_t null_check_zone_resources(struct nullb_device *dev,
328                                               struct nullb_zone *zone)
329 {
330         blk_status_t ret;
331
332         switch (zone->cond) {
333         case BLK_ZONE_COND_EMPTY:
334                 ret = null_check_active(dev);
335                 if (ret != BLK_STS_OK)
336                         return ret;
337                 fallthrough;
338         case BLK_ZONE_COND_CLOSED:
339                 return null_check_open(dev);
340         default:
341                 /* Should never be called for other states */
342                 WARN_ON(1);
343                 return BLK_STS_IOERR;
344         }
345 }
346
347 static blk_status_t null_zone_write(struct nullb_cmd *cmd, sector_t sector,
348                                     unsigned int nr_sectors, bool append)
349 {
350         struct nullb_device *dev = cmd->nq->dev;
351         unsigned int zno = null_zone_no(dev, sector);
352         struct nullb_zone *zone = &dev->zones[zno];
353         blk_status_t ret;
354
355         trace_nullb_zone_op(cmd, zno, zone->cond);
356
357         if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL) {
358                 if (append)
359                         return BLK_STS_IOERR;
360                 return null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
361         }
362
363         null_lock_zone(dev, zone);
364
365         /*
366          * Regular writes must be at the write pointer position. Zone append
367          * writes are automatically issued at the write pointer and the position
368          * returned using the request sector. Note that we do not check the zone
369          * condition because for FULL, READONLY and OFFLINE zones, the sector
370          * check against the zone write pointer will always result in failing
371          * the command.
372          */
373         if (append) {
374                 if (WARN_ON_ONCE(!dev->zone_append_max_sectors) ||
375                     zone->wp == NULL_ZONE_INVALID_WP) {
376                         ret = BLK_STS_IOERR;
377                         goto unlock_zone;
378                 }
379                 sector = zone->wp;
380                 blk_mq_rq_from_pdu(cmd)->__sector = sector;
381         }
382
383         if (sector != zone->wp ||
384             zone->wp + nr_sectors > zone->start + zone->capacity) {
385                 ret = BLK_STS_IOERR;
386                 goto unlock_zone;
387         }
388
389         if (zone->cond == BLK_ZONE_COND_CLOSED ||
390             zone->cond == BLK_ZONE_COND_EMPTY) {
391                 if (dev->need_zone_res_mgmt) {
392                         spin_lock(&dev->zone_res_lock);
393
394                         ret = null_check_zone_resources(dev, zone);
395                         if (ret != BLK_STS_OK) {
396                                 spin_unlock(&dev->zone_res_lock);
397                                 goto unlock_zone;
398                         }
399                         if (zone->cond == BLK_ZONE_COND_CLOSED) {
400                                 dev->nr_zones_closed--;
401                                 dev->nr_zones_imp_open++;
402                         } else if (zone->cond == BLK_ZONE_COND_EMPTY) {
403                                 dev->nr_zones_imp_open++;
404                         }
405
406                         spin_unlock(&dev->zone_res_lock);
407                 }
408
409                 zone->cond = BLK_ZONE_COND_IMP_OPEN;
410         }
411
412         ret = null_process_cmd(cmd, REQ_OP_WRITE, sector, nr_sectors);
413         if (ret != BLK_STS_OK)
414                 goto unlock_zone;
415
416         zone->wp += nr_sectors;
417         if (zone->wp == zone->start + zone->capacity) {
418                 if (dev->need_zone_res_mgmt) {
419                         spin_lock(&dev->zone_res_lock);
420                         if (zone->cond == BLK_ZONE_COND_EXP_OPEN)
421                                 dev->nr_zones_exp_open--;
422                         else if (zone->cond == BLK_ZONE_COND_IMP_OPEN)
423                                 dev->nr_zones_imp_open--;
424                         spin_unlock(&dev->zone_res_lock);
425                 }
426                 zone->cond = BLK_ZONE_COND_FULL;
427         }
428
429         ret = BLK_STS_OK;
430
431 unlock_zone:
432         null_unlock_zone(dev, zone);
433
434         return ret;
435 }
436
437 static blk_status_t null_open_zone(struct nullb_device *dev,
438                                    struct nullb_zone *zone)
439 {
440         blk_status_t ret = BLK_STS_OK;
441
442         if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
443                 return BLK_STS_IOERR;
444
445         switch (zone->cond) {
446         case BLK_ZONE_COND_EXP_OPEN:
447                 /* Open operation on exp open is not an error */
448                 return BLK_STS_OK;
449         case BLK_ZONE_COND_EMPTY:
450         case BLK_ZONE_COND_IMP_OPEN:
451         case BLK_ZONE_COND_CLOSED:
452                 break;
453         case BLK_ZONE_COND_FULL:
454         default:
455                 return BLK_STS_IOERR;
456         }
457
458         if (dev->need_zone_res_mgmt) {
459                 spin_lock(&dev->zone_res_lock);
460
461                 switch (zone->cond) {
462                 case BLK_ZONE_COND_EMPTY:
463                         ret = null_check_zone_resources(dev, zone);
464                         if (ret != BLK_STS_OK) {
465                                 spin_unlock(&dev->zone_res_lock);
466                                 return ret;
467                         }
468                         break;
469                 case BLK_ZONE_COND_IMP_OPEN:
470                         dev->nr_zones_imp_open--;
471                         break;
472                 case BLK_ZONE_COND_CLOSED:
473                         ret = null_check_zone_resources(dev, zone);
474                         if (ret != BLK_STS_OK) {
475                                 spin_unlock(&dev->zone_res_lock);
476                                 return ret;
477                         }
478                         dev->nr_zones_closed--;
479                         break;
480                 default:
481                         break;
482                 }
483
484                 dev->nr_zones_exp_open++;
485
486                 spin_unlock(&dev->zone_res_lock);
487         }
488
489         zone->cond = BLK_ZONE_COND_EXP_OPEN;
490
491         return BLK_STS_OK;
492 }
493
494 static blk_status_t null_close_zone(struct nullb_device *dev,
495                                     struct nullb_zone *zone)
496 {
497         if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
498                 return BLK_STS_IOERR;
499
500         switch (zone->cond) {
501         case BLK_ZONE_COND_CLOSED:
502                 /* close operation on closed is not an error */
503                 return BLK_STS_OK;
504         case BLK_ZONE_COND_IMP_OPEN:
505         case BLK_ZONE_COND_EXP_OPEN:
506                 break;
507         case BLK_ZONE_COND_EMPTY:
508         case BLK_ZONE_COND_FULL:
509         default:
510                 return BLK_STS_IOERR;
511         }
512
513         if (dev->need_zone_res_mgmt) {
514                 spin_lock(&dev->zone_res_lock);
515
516                 switch (zone->cond) {
517                 case BLK_ZONE_COND_IMP_OPEN:
518                         dev->nr_zones_imp_open--;
519                         break;
520                 case BLK_ZONE_COND_EXP_OPEN:
521                         dev->nr_zones_exp_open--;
522                         break;
523                 default:
524                         break;
525                 }
526
527                 if (zone->wp > zone->start)
528                         dev->nr_zones_closed++;
529
530                 spin_unlock(&dev->zone_res_lock);
531         }
532
533         if (zone->wp == zone->start)
534                 zone->cond = BLK_ZONE_COND_EMPTY;
535         else
536                 zone->cond = BLK_ZONE_COND_CLOSED;
537
538         return BLK_STS_OK;
539 }
540
541 static blk_status_t null_finish_zone(struct nullb_device *dev,
542                                      struct nullb_zone *zone)
543 {
544         blk_status_t ret = BLK_STS_OK;
545
546         if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
547                 return BLK_STS_IOERR;
548
549         if (dev->need_zone_res_mgmt) {
550                 spin_lock(&dev->zone_res_lock);
551
552                 switch (zone->cond) {
553                 case BLK_ZONE_COND_FULL:
554                         /* Finish operation on full is not an error */
555                         spin_unlock(&dev->zone_res_lock);
556                         return BLK_STS_OK;
557                 case BLK_ZONE_COND_EMPTY:
558                         ret = null_check_zone_resources(dev, zone);
559                         if (ret != BLK_STS_OK) {
560                                 spin_unlock(&dev->zone_res_lock);
561                                 return ret;
562                         }
563                         break;
564                 case BLK_ZONE_COND_IMP_OPEN:
565                         dev->nr_zones_imp_open--;
566                         break;
567                 case BLK_ZONE_COND_EXP_OPEN:
568                         dev->nr_zones_exp_open--;
569                         break;
570                 case BLK_ZONE_COND_CLOSED:
571                         ret = null_check_zone_resources(dev, zone);
572                         if (ret != BLK_STS_OK) {
573                                 spin_unlock(&dev->zone_res_lock);
574                                 return ret;
575                         }
576                         dev->nr_zones_closed--;
577                         break;
578                 default:
579                         spin_unlock(&dev->zone_res_lock);
580                         return BLK_STS_IOERR;
581                 }
582
583                 spin_unlock(&dev->zone_res_lock);
584         }
585
586         zone->cond = BLK_ZONE_COND_FULL;
587         zone->wp = zone->start + zone->len;
588
589         return BLK_STS_OK;
590 }
591
592 static blk_status_t null_reset_zone(struct nullb_device *dev,
593                                     struct nullb_zone *zone)
594 {
595         if (zone->type == BLK_ZONE_TYPE_CONVENTIONAL)
596                 return BLK_STS_IOERR;
597
598         if (dev->need_zone_res_mgmt) {
599                 spin_lock(&dev->zone_res_lock);
600
601                 switch (zone->cond) {
602                 case BLK_ZONE_COND_IMP_OPEN:
603                         dev->nr_zones_imp_open--;
604                         break;
605                 case BLK_ZONE_COND_EXP_OPEN:
606                         dev->nr_zones_exp_open--;
607                         break;
608                 case BLK_ZONE_COND_CLOSED:
609                         dev->nr_zones_closed--;
610                         break;
611                 case BLK_ZONE_COND_EMPTY:
612                 case BLK_ZONE_COND_FULL:
613                         break;
614                 default:
615                         spin_unlock(&dev->zone_res_lock);
616                         return BLK_STS_IOERR;
617                 }
618
619                 spin_unlock(&dev->zone_res_lock);
620         }
621
622         zone->cond = BLK_ZONE_COND_EMPTY;
623         zone->wp = zone->start;
624
625         if (dev->memory_backed)
626                 return null_handle_discard(dev, zone->start, zone->len);
627
628         return BLK_STS_OK;
629 }
630
631 static blk_status_t null_zone_mgmt(struct nullb_cmd *cmd, enum req_op op,
632                                    sector_t sector)
633 {
634         struct nullb_device *dev = cmd->nq->dev;
635         unsigned int zone_no;
636         struct nullb_zone *zone;
637         blk_status_t ret;
638         size_t i;
639
640         if (op == REQ_OP_ZONE_RESET_ALL) {
641                 for (i = dev->zone_nr_conv; i < dev->nr_zones; i++) {
642                         zone = &dev->zones[i];
643                         null_lock_zone(dev, zone);
644                         if (zone->cond != BLK_ZONE_COND_EMPTY &&
645                             zone->cond != BLK_ZONE_COND_READONLY &&
646                             zone->cond != BLK_ZONE_COND_OFFLINE) {
647                                 null_reset_zone(dev, zone);
648                                 trace_nullb_zone_op(cmd, i, zone->cond);
649                         }
650                         null_unlock_zone(dev, zone);
651                 }
652                 return BLK_STS_OK;
653         }
654
655         zone_no = null_zone_no(dev, sector);
656         zone = &dev->zones[zone_no];
657
658         null_lock_zone(dev, zone);
659
660         if (zone->cond == BLK_ZONE_COND_READONLY ||
661             zone->cond == BLK_ZONE_COND_OFFLINE) {
662                 ret = BLK_STS_IOERR;
663                 goto unlock;
664         }
665
666         switch (op) {
667         case REQ_OP_ZONE_RESET:
668                 ret = null_reset_zone(dev, zone);
669                 break;
670         case REQ_OP_ZONE_OPEN:
671                 ret = null_open_zone(dev, zone);
672                 break;
673         case REQ_OP_ZONE_CLOSE:
674                 ret = null_close_zone(dev, zone);
675                 break;
676         case REQ_OP_ZONE_FINISH:
677                 ret = null_finish_zone(dev, zone);
678                 break;
679         default:
680                 ret = BLK_STS_NOTSUPP;
681                 break;
682         }
683
684         if (ret == BLK_STS_OK)
685                 trace_nullb_zone_op(cmd, zone_no, zone->cond);
686
687 unlock:
688         null_unlock_zone(dev, zone);
689
690         return ret;
691 }
692
693 blk_status_t null_process_zoned_cmd(struct nullb_cmd *cmd, enum req_op op,
694                                     sector_t sector, sector_t nr_sectors)
695 {
696         struct nullb_device *dev;
697         struct nullb_zone *zone;
698         blk_status_t sts;
699
700         switch (op) {
701         case REQ_OP_WRITE:
702                 return null_zone_write(cmd, sector, nr_sectors, false);
703         case REQ_OP_ZONE_APPEND:
704                 return null_zone_write(cmd, sector, nr_sectors, true);
705         case REQ_OP_ZONE_RESET:
706         case REQ_OP_ZONE_RESET_ALL:
707         case REQ_OP_ZONE_OPEN:
708         case REQ_OP_ZONE_CLOSE:
709         case REQ_OP_ZONE_FINISH:
710                 return null_zone_mgmt(cmd, op, sector);
711         default:
712                 dev = cmd->nq->dev;
713                 zone = &dev->zones[null_zone_no(dev, sector)];
714                 if (zone->cond == BLK_ZONE_COND_OFFLINE)
715                         return BLK_STS_IOERR;
716
717                 null_lock_zone(dev, zone);
718                 sts = null_process_cmd(cmd, op, sector, nr_sectors);
719                 null_unlock_zone(dev, zone);
720                 return sts;
721         }
722 }
723
724 /*
725  * Set a zone in the read-only or offline condition.
726  */
727 static void null_set_zone_cond(struct nullb_device *dev,
728                                struct nullb_zone *zone, enum blk_zone_cond cond)
729 {
730         if (WARN_ON_ONCE(cond != BLK_ZONE_COND_READONLY &&
731                          cond != BLK_ZONE_COND_OFFLINE))
732                 return;
733
734         null_lock_zone(dev, zone);
735
736         /*
737          * If the read-only condition is requested again to zones already in
738          * read-only condition, restore back normal empty condition. Do the same
739          * if the offline condition is requested for offline zones. Otherwise,
740          * set the specified zone condition to the zones. Finish the zones
741          * beforehand to free up zone resources.
742          */
743         if (zone->cond == cond) {
744                 zone->cond = BLK_ZONE_COND_EMPTY;
745                 zone->wp = zone->start;
746                 if (dev->memory_backed)
747                         null_handle_discard(dev, zone->start, zone->len);
748         } else {
749                 if (zone->cond != BLK_ZONE_COND_READONLY &&
750                     zone->cond != BLK_ZONE_COND_OFFLINE)
751                         null_finish_zone(dev, zone);
752                 zone->cond = cond;
753                 zone->wp = NULL_ZONE_INVALID_WP;
754         }
755
756         null_unlock_zone(dev, zone);
757 }
758
759 /*
760  * Identify a zone from the sector written to configfs file. Then set zone
761  * condition to the zone.
762  */
763 ssize_t zone_cond_store(struct nullb_device *dev, const char *page,
764                         size_t count, enum blk_zone_cond cond)
765 {
766         unsigned long long sector;
767         unsigned int zone_no;
768         int ret;
769
770         if (!dev->zoned) {
771                 pr_err("null_blk device is not zoned\n");
772                 return -EINVAL;
773         }
774
775         if (!dev->zones) {
776                 pr_err("null_blk device is not yet powered\n");
777                 return -EINVAL;
778         }
779
780         ret = kstrtoull(page, 0, &sector);
781         if (ret < 0)
782                 return ret;
783
784         zone_no = null_zone_no(dev, sector);
785         if (zone_no >= dev->nr_zones) {
786                 pr_err("Sector out of range\n");
787                 return -EINVAL;
788         }
789
790         if (dev->zones[zone_no].type == BLK_ZONE_TYPE_CONVENTIONAL) {
791                 pr_err("Can not change condition of conventional zones\n");
792                 return -EINVAL;
793         }
794
795         null_set_zone_cond(dev, &dev->zones[zone_no], cond);
796
797         return count;
798 }