2 * SCSI Zoned Block commands
4 * Copyright (C) 2014-2015 SUSE Linux GmbH
5 * Written by: Hannes Reinecke <hare@suse.de>
6 * Modified by: Damien Le Moal <damien.lemoal@hgst.com>
7 * Modified by: Shaun Tancheff <shaun.tancheff@seagate.com>
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License version
11 * 2 as published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program; see the file COPYING. If not, write to
20 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139,
25 #include <linux/blkdev.h>
27 #include <asm/unaligned.h>
29 #include <scsi/scsi.h>
30 #include <scsi/scsi_cmnd.h>
35 * sd_zbc_parse_report - Convert a zone descriptor to a struct blk_zone,
36 * @sdkp: The disk the report originated from
37 * @buf: Address of the report zone descriptor
38 * @zone: the destination zone structure
40 * All LBA sized values are converted to 512B sectors unit.
42 static void sd_zbc_parse_report(struct scsi_disk *sdkp, u8 *buf,
43 struct blk_zone *zone)
45 struct scsi_device *sdp = sdkp->device;
47 memset(zone, 0, sizeof(struct blk_zone));
49 zone->type = buf[0] & 0x0f;
50 zone->cond = (buf[1] >> 4) & 0xf;
56 zone->len = logical_to_sectors(sdp, get_unaligned_be64(&buf[8]));
57 zone->start = logical_to_sectors(sdp, get_unaligned_be64(&buf[16]));
58 zone->wp = logical_to_sectors(sdp, get_unaligned_be64(&buf[24]));
59 if (zone->type != ZBC_ZONE_TYPE_CONV &&
60 zone->cond == ZBC_ZONE_COND_FULL)
61 zone->wp = zone->start + zone->len;
65 * sd_zbc_report_zones - Issue a REPORT ZONES scsi command.
66 * @sdkp: The target disk
67 * @buf: Buffer to use for the reply
68 * @buflen: the buffer size
69 * @lba: Start LBA of the report
71 * For internal use during device validation.
73 static int sd_zbc_report_zones(struct scsi_disk *sdkp, unsigned char *buf,
74 unsigned int buflen, sector_t lba)
76 struct scsi_device *sdp = sdkp->device;
77 const int timeout = sdp->request_queue->rq_timeout;
78 struct scsi_sense_hdr sshdr;
79 unsigned char cmd[16];
85 cmd[1] = ZI_REPORT_ZONES;
86 put_unaligned_be64(lba, &cmd[2]);
87 put_unaligned_be32(buflen, &cmd[10]);
88 memset(buf, 0, buflen);
90 result = scsi_execute_req(sdp, cmd, DMA_FROM_DEVICE,
92 timeout, SD_MAX_RETRIES, NULL);
94 sd_printk(KERN_ERR, sdkp,
95 "REPORT ZONES lba %llu failed with %d/%d\n",
96 (unsigned long long)lba,
97 host_byte(result), driver_byte(result));
101 rep_len = get_unaligned_be32(&buf[0]);
103 sd_printk(KERN_ERR, sdkp,
104 "REPORT ZONES report invalid length %u\n",
113 * sd_zbc_setup_report_cmnd - Prepare a REPORT ZONES scsi command
114 * @cmd: The command to setup
116 * Call in sd_init_command() for a REQ_OP_ZONE_REPORT request.
118 int sd_zbc_setup_report_cmnd(struct scsi_cmnd *cmd)
120 struct request *rq = cmd->request;
121 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
122 sector_t lba, sector = blk_rq_pos(rq);
123 unsigned int nr_bytes = blk_rq_bytes(rq);
126 WARN_ON(nr_bytes == 0);
128 if (!sd_is_zoned(sdkp))
129 /* Not a zoned device */
132 ret = scsi_init_io(cmd);
133 if (ret != BLKPREP_OK)
137 memset(cmd->cmnd, 0, cmd->cmd_len);
138 cmd->cmnd[0] = ZBC_IN;
139 cmd->cmnd[1] = ZI_REPORT_ZONES;
140 lba = sectors_to_logical(sdkp->device, sector);
141 put_unaligned_be64(lba, &cmd->cmnd[2]);
142 put_unaligned_be32(nr_bytes, &cmd->cmnd[10]);
143 /* Do partial report for speeding things up */
144 cmd->cmnd[14] = ZBC_REPORT_ZONE_PARTIAL;
146 cmd->sc_data_direction = DMA_FROM_DEVICE;
147 cmd->sdb.length = nr_bytes;
148 cmd->transfersize = sdkp->device->sector_size;
152 * Report may return less bytes than requested. Make sure
153 * to report completion on the entire initial request.
155 rq->__data_len = nr_bytes;
161 * sd_zbc_report_zones_complete - Process a REPORT ZONES scsi command reply.
162 * @scmd: The completed report zones command
163 * @good_bytes: reply size in bytes
165 * Convert all reported zone descriptors to struct blk_zone. The conversion
166 * is done in-place, directly in the request specified sg buffer.
168 static void sd_zbc_report_zones_complete(struct scsi_cmnd *scmd,
169 unsigned int good_bytes)
171 struct request *rq = scmd->request;
172 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
173 struct sg_mapping_iter miter;
174 struct blk_zone_report_hdr hdr;
175 struct blk_zone zone;
176 unsigned int offset, bytes = 0;
183 memset(&hdr, 0, sizeof(struct blk_zone_report_hdr));
185 sg_miter_start(&miter, scsi_sglist(scmd), scsi_sg_count(scmd),
186 SG_MITER_TO_SG | SG_MITER_ATOMIC);
188 local_irq_save(flags);
189 while (sg_miter_next(&miter) && bytes < good_bytes) {
195 /* Set the report header */
196 hdr.nr_zones = min_t(unsigned int,
197 (good_bytes - 64) / 64,
198 get_unaligned_be32(&buf[0]) / 64);
199 memcpy(buf, &hdr, sizeof(struct blk_zone_report_hdr));
204 /* Parse zone descriptors */
205 while (offset < miter.length && hdr.nr_zones) {
206 WARN_ON(offset > miter.length);
207 buf = miter.addr + offset;
208 sd_zbc_parse_report(sdkp, buf, &zone);
209 memcpy(buf, &zone, sizeof(struct blk_zone));
219 sg_miter_stop(&miter);
220 local_irq_restore(flags);
224 * sd_zbc_zone_sectors - Get the device zone size in number of 512B sectors.
225 * @sdkp: The target disk
227 static inline sector_t sd_zbc_zone_sectors(struct scsi_disk *sdkp)
229 return logical_to_sectors(sdkp->device, sdkp->zone_blocks);
233 * sd_zbc_setup_reset_cmnd - Prepare a RESET WRITE POINTER scsi command.
234 * @cmd: the command to setup
236 * Called from sd_init_command() for a REQ_OP_ZONE_RESET request.
238 int sd_zbc_setup_reset_cmnd(struct scsi_cmnd *cmd)
240 struct request *rq = cmd->request;
241 struct scsi_disk *sdkp = scsi_disk(rq->rq_disk);
242 sector_t sector = blk_rq_pos(rq);
243 sector_t block = sectors_to_logical(sdkp->device, sector);
245 if (!sd_is_zoned(sdkp))
246 /* Not a zoned device */
249 if (sdkp->device->changed)
252 if (sector & (sd_zbc_zone_sectors(sdkp) - 1))
253 /* Unaligned request */
257 memset(cmd->cmnd, 0, cmd->cmd_len);
258 cmd->cmnd[0] = ZBC_OUT;
259 cmd->cmnd[1] = ZO_RESET_WRITE_POINTER;
260 put_unaligned_be64(block, &cmd->cmnd[2]);
262 rq->timeout = SD_TIMEOUT;
263 cmd->sc_data_direction = DMA_NONE;
264 cmd->transfersize = 0;
271 * sd_zbc_complete - ZBC command post processing.
272 * @cmd: Completed command
273 * @good_bytes: Command reply bytes
274 * @sshdr: command sense header
276 * Called from sd_done(). Process report zones reply and handle reset zone
277 * and write commands errors.
279 void sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes,
280 struct scsi_sense_hdr *sshdr)
282 int result = cmd->result;
283 struct request *rq = cmd->request;
285 switch (req_op(rq)) {
286 case REQ_OP_ZONE_RESET:
289 sshdr->sense_key == ILLEGAL_REQUEST &&
292 * INVALID FIELD IN CDB error: reset of a conventional
293 * zone was attempted. Nothing to worry about, so be
294 * quiet about the error.
296 rq->rq_flags |= RQF_QUIET;
300 case REQ_OP_WRITE_ZEROES:
301 case REQ_OP_WRITE_SAME:
304 sshdr->sense_key == ILLEGAL_REQUEST &&
307 * INVALID ADDRESS FOR WRITE error: It is unlikely that
308 * retrying write requests failed with any kind of
309 * alignement error will result in success. So don't.
314 case REQ_OP_ZONE_REPORT:
317 sd_zbc_report_zones_complete(cmd, good_bytes);
324 * sd_zbc_read_zoned_characteristics - Read zoned block device characteristics
326 * @buf: Buffer where to store the VPD page data
330 static int sd_zbc_read_zoned_characteristics(struct scsi_disk *sdkp,
334 if (scsi_get_vpd_page(sdkp->device, 0xb6, buf, 64)) {
335 sd_printk(KERN_NOTICE, sdkp,
336 "Unconstrained-read check failed\n");
340 if (sdkp->device->type != TYPE_ZBC) {
343 sdkp->zones_optimal_open = get_unaligned_be32(&buf[8]);
344 sdkp->zones_optimal_nonseq = get_unaligned_be32(&buf[12]);
345 sdkp->zones_max_open = 0;
348 sdkp->urswrz = buf[4] & 1;
349 sdkp->zones_optimal_open = 0;
350 sdkp->zones_optimal_nonseq = 0;
351 sdkp->zones_max_open = get_unaligned_be32(&buf[16]);
358 * sd_zbc_check_capacity - Check reported capacity.
360 * @buf: Buffer to use for commands
362 * ZBC drive may report only the capacity of the first conventional zones at
363 * LBA 0. This is indicated by the RC_BASIS field of the read capacity reply.
364 * Check this here. If the disk reported only its conventional zones capacity,
365 * get the total capacity by doing a report zones.
367 static int sd_zbc_check_capacity(struct scsi_disk *sdkp, unsigned char *buf)
372 if (sdkp->rc_basis != 0)
375 /* Do a report zone to get the maximum LBA to check capacity */
376 ret = sd_zbc_report_zones(sdkp, buf, SD_BUF_SIZE, 0);
380 /* The max_lba field is the capacity of this device */
381 lba = get_unaligned_be64(&buf[8]);
382 if (lba + 1 == sdkp->capacity)
385 if (sdkp->first_scan)
386 sd_printk(KERN_WARNING, sdkp,
387 "Changing capacity from %llu to max LBA+1 %llu\n",
388 (unsigned long long)sdkp->capacity,
389 (unsigned long long)lba + 1);
390 sdkp->capacity = lba + 1;
395 #define SD_ZBC_BUF_SIZE 131072U
398 * sd_zbc_check_zone_size - Check the device zone sizes
401 * Check that all zones of the device are equal. The last zone can however
402 * be smaller. The zone size must also be a power of two number of LBAs.
404 static int sd_zbc_check_zone_size(struct scsi_disk *sdkp)
410 unsigned int buf_len;
411 unsigned int list_length;
415 sdkp->zone_blocks = 0;
418 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
422 /* Do a report zone to get the same field */
423 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, 0);
427 same = buf[4] & 0x0f;
430 zone_blocks = get_unaligned_be64(&rec[8]);
435 * Check the size of all zones: all zones must be of
436 * equal size, except the last zone which can be smaller
441 /* Parse REPORT ZONES header */
442 list_length = get_unaligned_be32(&buf[0]) + 64;
444 buf_len = min(list_length, SD_ZBC_BUF_SIZE);
446 /* Parse zone descriptors */
447 while (rec < buf + buf_len) {
448 zone_blocks = get_unaligned_be64(&rec[8]);
449 if (sdkp->zone_blocks == 0) {
450 sdkp->zone_blocks = zone_blocks;
451 } else if (zone_blocks != sdkp->zone_blocks &&
452 (block + zone_blocks < sdkp->capacity
453 || zone_blocks > sdkp->zone_blocks)) {
457 block += zone_blocks;
461 if (block < sdkp->capacity) {
462 ret = sd_zbc_report_zones(sdkp, buf,
463 SD_ZBC_BUF_SIZE, block);
468 } while (block < sdkp->capacity);
470 zone_blocks = sdkp->zone_blocks;
474 if (sdkp->first_scan)
475 sd_printk(KERN_NOTICE, sdkp,
476 "Devices with non constant zone "
477 "size are not supported\n");
479 } else if (!is_power_of_2(zone_blocks)) {
480 if (sdkp->first_scan)
481 sd_printk(KERN_NOTICE, sdkp,
482 "Devices with non power of 2 zone "
483 "size are not supported\n");
485 } else if (logical_to_sectors(sdkp->device, zone_blocks) > UINT_MAX) {
486 if (sdkp->first_scan)
487 sd_printk(KERN_NOTICE, sdkp,
488 "Zone size too large\n");
491 sdkp->zone_blocks = zone_blocks;
492 sdkp->zone_shift = ilog2(zone_blocks);
502 * sd_zbc_alloc_zone_bitmap - Allocate a zone bitmap (one bit per zone).
503 * @sdkp: The disk of the bitmap
505 static inline unsigned long *sd_zbc_alloc_zone_bitmap(struct scsi_disk *sdkp)
507 struct request_queue *q = sdkp->disk->queue;
509 return kzalloc_node(BITS_TO_LONGS(sdkp->nr_zones)
510 * sizeof(unsigned long),
511 GFP_KERNEL, q->node);
515 * sd_zbc_get_seq_zones - Parse report zones reply to identify sequential zones
517 * @buf: report reply buffer
518 * @buflen: length of @buf
519 * @seq_zones_bitmap: bitmap of sequential zones to set
521 * Parse reported zone descriptors in @buf to identify sequential zones and
522 * set the reported zone bit in @seq_zones_bitmap accordingly.
523 * Since read-only and offline zones cannot be written, do not
524 * mark them as sequential in the bitmap.
525 * Return the LBA after the last zone reported.
527 static sector_t sd_zbc_get_seq_zones(struct scsi_disk *sdkp, unsigned char *buf,
529 unsigned long *seq_zones_bitmap)
531 sector_t lba, next_lba = sdkp->capacity;
532 unsigned int buf_len, list_length;
536 list_length = get_unaligned_be32(&buf[0]) + 64;
537 buf_len = min(list_length, buflen);
540 while (rec < buf + buf_len) {
541 type = rec[0] & 0x0f;
542 cond = (rec[1] >> 4) & 0xf;
543 lba = get_unaligned_be64(&rec[16]);
544 if (type != ZBC_ZONE_TYPE_CONV &&
545 cond != ZBC_ZONE_COND_READONLY &&
546 cond != ZBC_ZONE_COND_OFFLINE)
547 set_bit(lba >> sdkp->zone_shift, seq_zones_bitmap);
548 next_lba = lba + get_unaligned_be64(&rec[8]);
556 * sd_zbc_setup_seq_zones_bitmap - Initialize the disk seq zone bitmap.
559 * Allocate a zone bitmap and initialize it by identifying sequential zones.
561 static int sd_zbc_setup_seq_zones_bitmap(struct scsi_disk *sdkp)
563 struct request_queue *q = sdkp->disk->queue;
564 unsigned long *seq_zones_bitmap;
569 seq_zones_bitmap = sd_zbc_alloc_zone_bitmap(sdkp);
570 if (!seq_zones_bitmap)
573 buf = kmalloc(SD_ZBC_BUF_SIZE, GFP_KERNEL);
577 while (lba < sdkp->capacity) {
578 ret = sd_zbc_report_zones(sdkp, buf, SD_ZBC_BUF_SIZE, lba);
581 lba = sd_zbc_get_seq_zones(sdkp, buf, SD_ZBC_BUF_SIZE,
585 if (lba != sdkp->capacity) {
586 /* Something went wrong */
593 kfree(seq_zones_bitmap);
597 q->seq_zones_bitmap = seq_zones_bitmap;
602 static void sd_zbc_cleanup(struct scsi_disk *sdkp)
604 struct request_queue *q = sdkp->disk->queue;
606 kfree(q->seq_zones_bitmap);
607 q->seq_zones_bitmap = NULL;
609 kfree(q->seq_zones_wlock);
610 q->seq_zones_wlock = NULL;
615 static int sd_zbc_setup(struct scsi_disk *sdkp)
617 struct request_queue *q = sdkp->disk->queue;
620 /* READ16/WRITE16 is mandatory for ZBC disks */
621 sdkp->device->use_16_for_rw = 1;
622 sdkp->device->use_10_for_rw = 0;
624 /* chunk_sectors indicates the zone size */
625 blk_queue_chunk_sectors(sdkp->disk->queue,
626 logical_to_sectors(sdkp->device, sdkp->zone_blocks));
628 round_up(sdkp->capacity, sdkp->zone_blocks) >> sdkp->zone_shift;
631 * Initialize the device request queue information if the number
634 if (sdkp->nr_zones != q->nr_zones) {
636 sd_zbc_cleanup(sdkp);
638 q->nr_zones = sdkp->nr_zones;
639 if (sdkp->nr_zones) {
640 q->seq_zones_wlock = sd_zbc_alloc_zone_bitmap(sdkp);
641 if (!q->seq_zones_wlock) {
646 ret = sd_zbc_setup_seq_zones_bitmap(sdkp);
648 sd_zbc_cleanup(sdkp);
658 sd_zbc_cleanup(sdkp);
662 int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buf)
666 if (!sd_is_zoned(sdkp))
668 * Device managed or normal SCSI disk,
669 * no special handling required
673 /* Get zoned block device characteristics */
674 ret = sd_zbc_read_zoned_characteristics(sdkp, buf);
679 * Check for unconstrained reads: host-managed devices with
680 * constrained reads (drives failing read after write pointer)
684 if (sdkp->first_scan)
685 sd_printk(KERN_NOTICE, sdkp,
686 "constrained reads devices are not supported\n");
692 ret = sd_zbc_check_capacity(sdkp, buf);
697 * Check zone size: only devices with a constant zone size (except
698 * an eventual last runt zone) that is a power of 2 are supported.
700 ret = sd_zbc_check_zone_size(sdkp);
704 /* The drive satisfies the kernel restrictions: set it up */
705 ret = sd_zbc_setup(sdkp);
713 sd_zbc_cleanup(sdkp);
718 void sd_zbc_remove(struct scsi_disk *sdkp)
720 sd_zbc_cleanup(sdkp);
723 void sd_zbc_print_zones(struct scsi_disk *sdkp)
725 if (!sd_is_zoned(sdkp) || !sdkp->capacity)
728 if (sdkp->capacity & (sdkp->zone_blocks - 1))
729 sd_printk(KERN_NOTICE, sdkp,
730 "%u zones of %u logical blocks + 1 runt zone\n",
734 sd_printk(KERN_NOTICE, sdkp,
735 "%u zones of %u logical blocks\n",