Merge tag 'ceph-for-5.3-rc1' of git://github.com/ceph/ceph-client
[sfrench/cifs-2.6.git] / drivers / block / virtio_blk.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 //#define DEBUG
3 #include <linux/spinlock.h>
4 #include <linux/slab.h>
5 #include <linux/blkdev.h>
6 #include <linux/hdreg.h>
7 #include <linux/module.h>
8 #include <linux/mutex.h>
9 #include <linux/interrupt.h>
10 #include <linux/virtio.h>
11 #include <linux/virtio_blk.h>
12 #include <linux/scatterlist.h>
13 #include <linux/string_helpers.h>
14 #include <scsi/scsi_cmnd.h>
15 #include <linux/idr.h>
16 #include <linux/blk-mq.h>
17 #include <linux/blk-mq-virtio.h>
18 #include <linux/numa.h>
19
20 #define PART_BITS 4
21 #define VQ_NAME_LEN 16
22 #define MAX_DISCARD_SEGMENTS 256u
23
24 static int major;
25 static DEFINE_IDA(vd_index_ida);
26
27 static struct workqueue_struct *virtblk_wq;
28
29 struct virtio_blk_vq {
30         struct virtqueue *vq;
31         spinlock_t lock;
32         char name[VQ_NAME_LEN];
33 } ____cacheline_aligned_in_smp;
34
35 struct virtio_blk {
36         struct virtio_device *vdev;
37
38         /* The disk structure for the kernel. */
39         struct gendisk *disk;
40
41         /* Block layer tags. */
42         struct blk_mq_tag_set tag_set;
43
44         /* Process context for config space updates */
45         struct work_struct config_work;
46
47         /* What host tells us, plus 2 for header & tailer. */
48         unsigned int sg_elems;
49
50         /* Ida index - used to track minor number allocations. */
51         int index;
52
53         /* num of vqs */
54         int num_vqs;
55         struct virtio_blk_vq *vqs;
56 };
57
58 struct virtblk_req {
59 #ifdef CONFIG_VIRTIO_BLK_SCSI
60         struct scsi_request sreq;       /* for SCSI passthrough, must be first */
61         u8 sense[SCSI_SENSE_BUFFERSIZE];
62         struct virtio_scsi_inhdr in_hdr;
63 #endif
64         struct virtio_blk_outhdr out_hdr;
65         u8 status;
66         struct scatterlist sg[];
67 };
68
69 static inline blk_status_t virtblk_result(struct virtblk_req *vbr)
70 {
71         switch (vbr->status) {
72         case VIRTIO_BLK_S_OK:
73                 return BLK_STS_OK;
74         case VIRTIO_BLK_S_UNSUPP:
75                 return BLK_STS_NOTSUPP;
76         default:
77                 return BLK_STS_IOERR;
78         }
79 }
80
81 /*
82  * If this is a packet command we need a couple of additional headers.  Behind
83  * the normal outhdr we put a segment with the scsi command block, and before
84  * the normal inhdr we put the sense data and the inhdr with additional status
85  * information.
86  */
87 #ifdef CONFIG_VIRTIO_BLK_SCSI
88 static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
89                 struct scatterlist *data_sg, bool have_data)
90 {
91         struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
92         unsigned int num_out = 0, num_in = 0;
93
94         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
95         sgs[num_out++] = &hdr;
96         sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
97         sgs[num_out++] = &cmd;
98
99         if (have_data) {
100                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
101                         sgs[num_out++] = data_sg;
102                 else
103                         sgs[num_out + num_in++] = data_sg;
104         }
105
106         sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
107         sgs[num_out + num_in++] = &sense;
108         sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
109         sgs[num_out + num_in++] = &inhdr;
110         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
111         sgs[num_out + num_in++] = &status;
112
113         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
114 }
115
116 static inline void virtblk_scsi_request_done(struct request *req)
117 {
118         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
119         struct virtio_blk *vblk = req->q->queuedata;
120         struct scsi_request *sreq = &vbr->sreq;
121
122         sreq->resid_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.residual);
123         sreq->sense_len = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.sense_len);
124         sreq->result = virtio32_to_cpu(vblk->vdev, vbr->in_hdr.errors);
125 }
126
127 static int virtblk_ioctl(struct block_device *bdev, fmode_t mode,
128                              unsigned int cmd, unsigned long data)
129 {
130         struct gendisk *disk = bdev->bd_disk;
131         struct virtio_blk *vblk = disk->private_data;
132
133         /*
134          * Only allow the generic SCSI ioctls if the host can support it.
135          */
136         if (!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_SCSI))
137                 return -ENOTTY;
138
139         return scsi_cmd_blk_ioctl(bdev, mode, cmd,
140                                   (void __user *)data);
141 }
142 #else
143 static inline int virtblk_add_req_scsi(struct virtqueue *vq,
144                 struct virtblk_req *vbr, struct scatterlist *data_sg,
145                 bool have_data)
146 {
147         return -EIO;
148 }
149 static inline void virtblk_scsi_request_done(struct request *req)
150 {
151 }
152 #define virtblk_ioctl   NULL
153 #endif /* CONFIG_VIRTIO_BLK_SCSI */
154
155 static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
156                 struct scatterlist *data_sg, bool have_data)
157 {
158         struct scatterlist hdr, status, *sgs[3];
159         unsigned int num_out = 0, num_in = 0;
160
161         sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
162         sgs[num_out++] = &hdr;
163
164         if (have_data) {
165                 if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
166                         sgs[num_out++] = data_sg;
167                 else
168                         sgs[num_out + num_in++] = data_sg;
169         }
170
171         sg_init_one(&status, &vbr->status, sizeof(vbr->status));
172         sgs[num_out + num_in++] = &status;
173
174         return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
175 }
176
177 static int virtblk_setup_discard_write_zeroes(struct request *req, bool unmap)
178 {
179         unsigned short segments = blk_rq_nr_discard_segments(req);
180         unsigned short n = 0;
181         struct virtio_blk_discard_write_zeroes *range;
182         struct bio *bio;
183         u32 flags = 0;
184
185         if (unmap)
186                 flags |= VIRTIO_BLK_WRITE_ZEROES_FLAG_UNMAP;
187
188         range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
189         if (!range)
190                 return -ENOMEM;
191
192         __rq_for_each_bio(bio, req) {
193                 u64 sector = bio->bi_iter.bi_sector;
194                 u32 num_sectors = bio->bi_iter.bi_size >> SECTOR_SHIFT;
195
196                 range[n].flags = cpu_to_le32(flags);
197                 range[n].num_sectors = cpu_to_le32(num_sectors);
198                 range[n].sector = cpu_to_le64(sector);
199                 n++;
200         }
201
202         req->special_vec.bv_page = virt_to_page(range);
203         req->special_vec.bv_offset = offset_in_page(range);
204         req->special_vec.bv_len = sizeof(*range) * segments;
205         req->rq_flags |= RQF_SPECIAL_PAYLOAD;
206
207         return 0;
208 }
209
210 static inline void virtblk_request_done(struct request *req)
211 {
212         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
213
214         if (req->rq_flags & RQF_SPECIAL_PAYLOAD) {
215                 kfree(page_address(req->special_vec.bv_page) +
216                       req->special_vec.bv_offset);
217         }
218
219         switch (req_op(req)) {
220         case REQ_OP_SCSI_IN:
221         case REQ_OP_SCSI_OUT:
222                 virtblk_scsi_request_done(req);
223                 break;
224         }
225
226         blk_mq_end_request(req, virtblk_result(vbr));
227 }
228
229 static void virtblk_done(struct virtqueue *vq)
230 {
231         struct virtio_blk *vblk = vq->vdev->priv;
232         bool req_done = false;
233         int qid = vq->index;
234         struct virtblk_req *vbr;
235         unsigned long flags;
236         unsigned int len;
237
238         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
239         do {
240                 virtqueue_disable_cb(vq);
241                 while ((vbr = virtqueue_get_buf(vblk->vqs[qid].vq, &len)) != NULL) {
242                         struct request *req = blk_mq_rq_from_pdu(vbr);
243
244                         blk_mq_complete_request(req);
245                         req_done = true;
246                 }
247                 if (unlikely(virtqueue_is_broken(vq)))
248                         break;
249         } while (!virtqueue_enable_cb(vq));
250
251         /* In case queue is stopped waiting for more buffers. */
252         if (req_done)
253                 blk_mq_start_stopped_hw_queues(vblk->disk->queue, true);
254         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
255 }
256
257 static void virtio_commit_rqs(struct blk_mq_hw_ctx *hctx)
258 {
259         struct virtio_blk *vblk = hctx->queue->queuedata;
260         struct virtio_blk_vq *vq = &vblk->vqs[hctx->queue_num];
261         bool kick;
262
263         spin_lock_irq(&vq->lock);
264         kick = virtqueue_kick_prepare(vq->vq);
265         spin_unlock_irq(&vq->lock);
266
267         if (kick)
268                 virtqueue_notify(vq->vq);
269 }
270
271 static blk_status_t virtio_queue_rq(struct blk_mq_hw_ctx *hctx,
272                            const struct blk_mq_queue_data *bd)
273 {
274         struct virtio_blk *vblk = hctx->queue->queuedata;
275         struct request *req = bd->rq;
276         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
277         unsigned long flags;
278         unsigned int num;
279         int qid = hctx->queue_num;
280         int err;
281         bool notify = false;
282         bool unmap = false;
283         u32 type;
284
285         BUG_ON(req->nr_phys_segments + 2 > vblk->sg_elems);
286
287         switch (req_op(req)) {
288         case REQ_OP_READ:
289         case REQ_OP_WRITE:
290                 type = 0;
291                 break;
292         case REQ_OP_FLUSH:
293                 type = VIRTIO_BLK_T_FLUSH;
294                 break;
295         case REQ_OP_DISCARD:
296                 type = VIRTIO_BLK_T_DISCARD;
297                 break;
298         case REQ_OP_WRITE_ZEROES:
299                 type = VIRTIO_BLK_T_WRITE_ZEROES;
300                 unmap = !(req->cmd_flags & REQ_NOUNMAP);
301                 break;
302         case REQ_OP_SCSI_IN:
303         case REQ_OP_SCSI_OUT:
304                 type = VIRTIO_BLK_T_SCSI_CMD;
305                 break;
306         case REQ_OP_DRV_IN:
307                 type = VIRTIO_BLK_T_GET_ID;
308                 break;
309         default:
310                 WARN_ON_ONCE(1);
311                 return BLK_STS_IOERR;
312         }
313
314         vbr->out_hdr.type = cpu_to_virtio32(vblk->vdev, type);
315         vbr->out_hdr.sector = type ?
316                 0 : cpu_to_virtio64(vblk->vdev, blk_rq_pos(req));
317         vbr->out_hdr.ioprio = cpu_to_virtio32(vblk->vdev, req_get_ioprio(req));
318
319         blk_mq_start_request(req);
320
321         if (type == VIRTIO_BLK_T_DISCARD || type == VIRTIO_BLK_T_WRITE_ZEROES) {
322                 err = virtblk_setup_discard_write_zeroes(req, unmap);
323                 if (err)
324                         return BLK_STS_RESOURCE;
325         }
326
327         num = blk_rq_map_sg(hctx->queue, req, vbr->sg);
328         if (num) {
329                 if (rq_data_dir(req) == WRITE)
330                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_OUT);
331                 else
332                         vbr->out_hdr.type |= cpu_to_virtio32(vblk->vdev, VIRTIO_BLK_T_IN);
333         }
334
335         spin_lock_irqsave(&vblk->vqs[qid].lock, flags);
336         if (blk_rq_is_scsi(req))
337                 err = virtblk_add_req_scsi(vblk->vqs[qid].vq, vbr, vbr->sg, num);
338         else
339                 err = virtblk_add_req(vblk->vqs[qid].vq, vbr, vbr->sg, num);
340         if (err) {
341                 virtqueue_kick(vblk->vqs[qid].vq);
342                 blk_mq_stop_hw_queue(hctx);
343                 spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
344                 /* Out of mem doesn't actually happen, since we fall back
345                  * to direct descriptors */
346                 if (err == -ENOMEM || err == -ENOSPC)
347                         return BLK_STS_DEV_RESOURCE;
348                 return BLK_STS_IOERR;
349         }
350
351         if (bd->last && virtqueue_kick_prepare(vblk->vqs[qid].vq))
352                 notify = true;
353         spin_unlock_irqrestore(&vblk->vqs[qid].lock, flags);
354
355         if (notify)
356                 virtqueue_notify(vblk->vqs[qid].vq);
357         return BLK_STS_OK;
358 }
359
360 /* return id (s/n) string for *disk to *id_str
361  */
362 static int virtblk_get_id(struct gendisk *disk, char *id_str)
363 {
364         struct virtio_blk *vblk = disk->private_data;
365         struct request_queue *q = vblk->disk->queue;
366         struct request *req;
367         int err;
368
369         req = blk_get_request(q, REQ_OP_DRV_IN, 0);
370         if (IS_ERR(req))
371                 return PTR_ERR(req);
372
373         err = blk_rq_map_kern(q, req, id_str, VIRTIO_BLK_ID_BYTES, GFP_KERNEL);
374         if (err)
375                 goto out;
376
377         blk_execute_rq(vblk->disk->queue, vblk->disk, req, false);
378         err = blk_status_to_errno(virtblk_result(blk_mq_rq_to_pdu(req)));
379 out:
380         blk_put_request(req);
381         return err;
382 }
383
384 /* We provide getgeo only to please some old bootloader/partitioning tools */
385 static int virtblk_getgeo(struct block_device *bd, struct hd_geometry *geo)
386 {
387         struct virtio_blk *vblk = bd->bd_disk->private_data;
388
389         /* see if the host passed in geometry config */
390         if (virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_GEOMETRY)) {
391                 virtio_cread(vblk->vdev, struct virtio_blk_config,
392                              geometry.cylinders, &geo->cylinders);
393                 virtio_cread(vblk->vdev, struct virtio_blk_config,
394                              geometry.heads, &geo->heads);
395                 virtio_cread(vblk->vdev, struct virtio_blk_config,
396                              geometry.sectors, &geo->sectors);
397         } else {
398                 /* some standard values, similar to sd */
399                 geo->heads = 1 << 6;
400                 geo->sectors = 1 << 5;
401                 geo->cylinders = get_capacity(bd->bd_disk) >> 11;
402         }
403         return 0;
404 }
405
406 static const struct block_device_operations virtblk_fops = {
407         .ioctl  = virtblk_ioctl,
408         .owner  = THIS_MODULE,
409         .getgeo = virtblk_getgeo,
410 };
411
412 static int index_to_minor(int index)
413 {
414         return index << PART_BITS;
415 }
416
417 static int minor_to_index(int minor)
418 {
419         return minor >> PART_BITS;
420 }
421
422 static ssize_t serial_show(struct device *dev,
423                            struct device_attribute *attr, char *buf)
424 {
425         struct gendisk *disk = dev_to_disk(dev);
426         int err;
427
428         /* sysfs gives us a PAGE_SIZE buffer */
429         BUILD_BUG_ON(PAGE_SIZE < VIRTIO_BLK_ID_BYTES);
430
431         buf[VIRTIO_BLK_ID_BYTES] = '\0';
432         err = virtblk_get_id(disk, buf);
433         if (!err)
434                 return strlen(buf);
435
436         if (err == -EIO) /* Unsupported? Make it empty. */
437                 return 0;
438
439         return err;
440 }
441
442 static DEVICE_ATTR_RO(serial);
443
444 /* The queue's logical block size must be set before calling this */
445 static void virtblk_update_capacity(struct virtio_blk *vblk, bool resize)
446 {
447         struct virtio_device *vdev = vblk->vdev;
448         struct request_queue *q = vblk->disk->queue;
449         char cap_str_2[10], cap_str_10[10];
450         unsigned long long nblocks;
451         u64 capacity;
452
453         /* Host must always specify the capacity. */
454         virtio_cread(vdev, struct virtio_blk_config, capacity, &capacity);
455
456         /* If capacity is too big, truncate with warning. */
457         if ((sector_t)capacity != capacity) {
458                 dev_warn(&vdev->dev, "Capacity %llu too large: truncating\n",
459                          (unsigned long long)capacity);
460                 capacity = (sector_t)-1;
461         }
462
463         nblocks = DIV_ROUND_UP_ULL(capacity, queue_logical_block_size(q) >> 9);
464
465         string_get_size(nblocks, queue_logical_block_size(q),
466                         STRING_UNITS_2, cap_str_2, sizeof(cap_str_2));
467         string_get_size(nblocks, queue_logical_block_size(q),
468                         STRING_UNITS_10, cap_str_10, sizeof(cap_str_10));
469
470         dev_notice(&vdev->dev,
471                    "[%s] %s%llu %d-byte logical blocks (%s/%s)\n",
472                    vblk->disk->disk_name,
473                    resize ? "new size: " : "",
474                    nblocks,
475                    queue_logical_block_size(q),
476                    cap_str_10,
477                    cap_str_2);
478
479         set_capacity(vblk->disk, capacity);
480 }
481
482 static void virtblk_config_changed_work(struct work_struct *work)
483 {
484         struct virtio_blk *vblk =
485                 container_of(work, struct virtio_blk, config_work);
486         char *envp[] = { "RESIZE=1", NULL };
487
488         virtblk_update_capacity(vblk, true);
489         revalidate_disk(vblk->disk);
490         kobject_uevent_env(&disk_to_dev(vblk->disk)->kobj, KOBJ_CHANGE, envp);
491 }
492
493 static void virtblk_config_changed(struct virtio_device *vdev)
494 {
495         struct virtio_blk *vblk = vdev->priv;
496
497         queue_work(virtblk_wq, &vblk->config_work);
498 }
499
500 static int init_vq(struct virtio_blk *vblk)
501 {
502         int err;
503         int i;
504         vq_callback_t **callbacks;
505         const char **names;
506         struct virtqueue **vqs;
507         unsigned short num_vqs;
508         struct virtio_device *vdev = vblk->vdev;
509         struct irq_affinity desc = { 0, };
510
511         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_MQ,
512                                    struct virtio_blk_config, num_queues,
513                                    &num_vqs);
514         if (err)
515                 num_vqs = 1;
516
517         num_vqs = min_t(unsigned int, nr_cpu_ids, num_vqs);
518
519         vblk->vqs = kmalloc_array(num_vqs, sizeof(*vblk->vqs), GFP_KERNEL);
520         if (!vblk->vqs)
521                 return -ENOMEM;
522
523         names = kmalloc_array(num_vqs, sizeof(*names), GFP_KERNEL);
524         callbacks = kmalloc_array(num_vqs, sizeof(*callbacks), GFP_KERNEL);
525         vqs = kmalloc_array(num_vqs, sizeof(*vqs), GFP_KERNEL);
526         if (!names || !callbacks || !vqs) {
527                 err = -ENOMEM;
528                 goto out;
529         }
530
531         for (i = 0; i < num_vqs; i++) {
532                 callbacks[i] = virtblk_done;
533                 snprintf(vblk->vqs[i].name, VQ_NAME_LEN, "req.%d", i);
534                 names[i] = vblk->vqs[i].name;
535         }
536
537         /* Discover virtqueues and write information to configuration.  */
538         err = virtio_find_vqs(vdev, num_vqs, vqs, callbacks, names, &desc);
539         if (err)
540                 goto out;
541
542         for (i = 0; i < num_vqs; i++) {
543                 spin_lock_init(&vblk->vqs[i].lock);
544                 vblk->vqs[i].vq = vqs[i];
545         }
546         vblk->num_vqs = num_vqs;
547
548 out:
549         kfree(vqs);
550         kfree(callbacks);
551         kfree(names);
552         if (err)
553                 kfree(vblk->vqs);
554         return err;
555 }
556
557 /*
558  * Legacy naming scheme used for virtio devices.  We are stuck with it for
559  * virtio blk but don't ever use it for any new driver.
560  */
561 static int virtblk_name_format(char *prefix, int index, char *buf, int buflen)
562 {
563         const int base = 'z' - 'a' + 1;
564         char *begin = buf + strlen(prefix);
565         char *end = buf + buflen;
566         char *p;
567         int unit;
568
569         p = end - 1;
570         *p = '\0';
571         unit = base;
572         do {
573                 if (p == begin)
574                         return -EINVAL;
575                 *--p = 'a' + (index % unit);
576                 index = (index / unit) - 1;
577         } while (index >= 0);
578
579         memmove(begin, p, end - p);
580         memcpy(buf, prefix, strlen(prefix));
581
582         return 0;
583 }
584
585 static int virtblk_get_cache_mode(struct virtio_device *vdev)
586 {
587         u8 writeback;
588         int err;
589
590         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE,
591                                    struct virtio_blk_config, wce,
592                                    &writeback);
593
594         /*
595          * If WCE is not configurable and flush is not available,
596          * assume no writeback cache is in use.
597          */
598         if (err)
599                 writeback = virtio_has_feature(vdev, VIRTIO_BLK_F_FLUSH);
600
601         return writeback;
602 }
603
604 static void virtblk_update_cache_mode(struct virtio_device *vdev)
605 {
606         u8 writeback = virtblk_get_cache_mode(vdev);
607         struct virtio_blk *vblk = vdev->priv;
608
609         blk_queue_write_cache(vblk->disk->queue, writeback, false);
610         revalidate_disk(vblk->disk);
611 }
612
613 static const char *const virtblk_cache_types[] = {
614         "write through", "write back"
615 };
616
617 static ssize_t
618 cache_type_store(struct device *dev, struct device_attribute *attr,
619                  const char *buf, size_t count)
620 {
621         struct gendisk *disk = dev_to_disk(dev);
622         struct virtio_blk *vblk = disk->private_data;
623         struct virtio_device *vdev = vblk->vdev;
624         int i;
625
626         BUG_ON(!virtio_has_feature(vblk->vdev, VIRTIO_BLK_F_CONFIG_WCE));
627         i = sysfs_match_string(virtblk_cache_types, buf);
628         if (i < 0)
629                 return i;
630
631         virtio_cwrite8(vdev, offsetof(struct virtio_blk_config, wce), i);
632         virtblk_update_cache_mode(vdev);
633         return count;
634 }
635
636 static ssize_t
637 cache_type_show(struct device *dev, struct device_attribute *attr, char *buf)
638 {
639         struct gendisk *disk = dev_to_disk(dev);
640         struct virtio_blk *vblk = disk->private_data;
641         u8 writeback = virtblk_get_cache_mode(vblk->vdev);
642
643         BUG_ON(writeback >= ARRAY_SIZE(virtblk_cache_types));
644         return snprintf(buf, 40, "%s\n", virtblk_cache_types[writeback]);
645 }
646
647 static DEVICE_ATTR_RW(cache_type);
648
649 static struct attribute *virtblk_attrs[] = {
650         &dev_attr_serial.attr,
651         &dev_attr_cache_type.attr,
652         NULL,
653 };
654
655 static umode_t virtblk_attrs_are_visible(struct kobject *kobj,
656                 struct attribute *a, int n)
657 {
658         struct device *dev = container_of(kobj, struct device, kobj);
659         struct gendisk *disk = dev_to_disk(dev);
660         struct virtio_blk *vblk = disk->private_data;
661         struct virtio_device *vdev = vblk->vdev;
662
663         if (a == &dev_attr_cache_type.attr &&
664             !virtio_has_feature(vdev, VIRTIO_BLK_F_CONFIG_WCE))
665                 return S_IRUGO;
666
667         return a->mode;
668 }
669
670 static const struct attribute_group virtblk_attr_group = {
671         .attrs = virtblk_attrs,
672         .is_visible = virtblk_attrs_are_visible,
673 };
674
675 static const struct attribute_group *virtblk_attr_groups[] = {
676         &virtblk_attr_group,
677         NULL,
678 };
679
680 static int virtblk_init_request(struct blk_mq_tag_set *set, struct request *rq,
681                 unsigned int hctx_idx, unsigned int numa_node)
682 {
683         struct virtio_blk *vblk = set->driver_data;
684         struct virtblk_req *vbr = blk_mq_rq_to_pdu(rq);
685
686 #ifdef CONFIG_VIRTIO_BLK_SCSI
687         vbr->sreq.sense = vbr->sense;
688 #endif
689         sg_init_table(vbr->sg, vblk->sg_elems);
690         return 0;
691 }
692
693 static int virtblk_map_queues(struct blk_mq_tag_set *set)
694 {
695         struct virtio_blk *vblk = set->driver_data;
696
697         return blk_mq_virtio_map_queues(&set->map[HCTX_TYPE_DEFAULT],
698                                         vblk->vdev, 0);
699 }
700
701 #ifdef CONFIG_VIRTIO_BLK_SCSI
702 static void virtblk_initialize_rq(struct request *req)
703 {
704         struct virtblk_req *vbr = blk_mq_rq_to_pdu(req);
705
706         scsi_req_init(&vbr->sreq);
707 }
708 #endif
709
710 static const struct blk_mq_ops virtio_mq_ops = {
711         .queue_rq       = virtio_queue_rq,
712         .commit_rqs     = virtio_commit_rqs,
713         .complete       = virtblk_request_done,
714         .init_request   = virtblk_init_request,
715 #ifdef CONFIG_VIRTIO_BLK_SCSI
716         .initialize_rq_fn = virtblk_initialize_rq,
717 #endif
718         .map_queues     = virtblk_map_queues,
719 };
720
721 static unsigned int virtblk_queue_depth;
722 module_param_named(queue_depth, virtblk_queue_depth, uint, 0444);
723
724 static int virtblk_probe(struct virtio_device *vdev)
725 {
726         struct virtio_blk *vblk;
727         struct request_queue *q;
728         int err, index;
729
730         u32 v, blk_size, max_size, sg_elems, opt_io_size;
731         u16 min_io_size;
732         u8 physical_block_exp, alignment_offset;
733
734         if (!vdev->config->get) {
735                 dev_err(&vdev->dev, "%s failure: config access disabled\n",
736                         __func__);
737                 return -EINVAL;
738         }
739
740         err = ida_simple_get(&vd_index_ida, 0, minor_to_index(1 << MINORBITS),
741                              GFP_KERNEL);
742         if (err < 0)
743                 goto out;
744         index = err;
745
746         /* We need to know how many segments before we allocate. */
747         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SEG_MAX,
748                                    struct virtio_blk_config, seg_max,
749                                    &sg_elems);
750
751         /* We need at least one SG element, whatever they say. */
752         if (err || !sg_elems)
753                 sg_elems = 1;
754
755         /* We need an extra sg elements at head and tail. */
756         sg_elems += 2;
757         vdev->priv = vblk = kmalloc(sizeof(*vblk), GFP_KERNEL);
758         if (!vblk) {
759                 err = -ENOMEM;
760                 goto out_free_index;
761         }
762
763         vblk->vdev = vdev;
764         vblk->sg_elems = sg_elems;
765
766         INIT_WORK(&vblk->config_work, virtblk_config_changed_work);
767
768         err = init_vq(vblk);
769         if (err)
770                 goto out_free_vblk;
771
772         /* FIXME: How many partitions?  How long is a piece of string? */
773         vblk->disk = alloc_disk(1 << PART_BITS);
774         if (!vblk->disk) {
775                 err = -ENOMEM;
776                 goto out_free_vq;
777         }
778
779         /* Default queue sizing is to fill the ring. */
780         if (!virtblk_queue_depth) {
781                 virtblk_queue_depth = vblk->vqs[0].vq->num_free;
782                 /* ... but without indirect descs, we use 2 descs per req */
783                 if (!virtio_has_feature(vdev, VIRTIO_RING_F_INDIRECT_DESC))
784                         virtblk_queue_depth /= 2;
785         }
786
787         memset(&vblk->tag_set, 0, sizeof(vblk->tag_set));
788         vblk->tag_set.ops = &virtio_mq_ops;
789         vblk->tag_set.queue_depth = virtblk_queue_depth;
790         vblk->tag_set.numa_node = NUMA_NO_NODE;
791         vblk->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
792         vblk->tag_set.cmd_size =
793                 sizeof(struct virtblk_req) +
794                 sizeof(struct scatterlist) * sg_elems;
795         vblk->tag_set.driver_data = vblk;
796         vblk->tag_set.nr_hw_queues = vblk->num_vqs;
797
798         err = blk_mq_alloc_tag_set(&vblk->tag_set);
799         if (err)
800                 goto out_put_disk;
801
802         q = blk_mq_init_queue(&vblk->tag_set);
803         if (IS_ERR(q)) {
804                 err = -ENOMEM;
805                 goto out_free_tags;
806         }
807         vblk->disk->queue = q;
808
809         q->queuedata = vblk;
810
811         virtblk_name_format("vd", index, vblk->disk->disk_name, DISK_NAME_LEN);
812
813         vblk->disk->major = major;
814         vblk->disk->first_minor = index_to_minor(index);
815         vblk->disk->private_data = vblk;
816         vblk->disk->fops = &virtblk_fops;
817         vblk->disk->flags |= GENHD_FL_EXT_DEVT;
818         vblk->index = index;
819
820         /* configure queue flush support */
821         virtblk_update_cache_mode(vdev);
822
823         /* If disk is read-only in the host, the guest should obey */
824         if (virtio_has_feature(vdev, VIRTIO_BLK_F_RO))
825                 set_disk_ro(vblk->disk, 1);
826
827         /* We can handle whatever the host told us to handle. */
828         blk_queue_max_segments(q, vblk->sg_elems-2);
829
830         /* No real sector limit. */
831         blk_queue_max_hw_sectors(q, -1U);
832
833         max_size = virtio_max_dma_size(vdev);
834
835         /* Host can optionally specify maximum segment size and number of
836          * segments. */
837         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_SIZE_MAX,
838                                    struct virtio_blk_config, size_max, &v);
839         if (!err)
840                 max_size = min(max_size, v);
841
842         blk_queue_max_segment_size(q, max_size);
843
844         /* Host can optionally specify the block size of the device */
845         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_BLK_SIZE,
846                                    struct virtio_blk_config, blk_size,
847                                    &blk_size);
848         if (!err)
849                 blk_queue_logical_block_size(q, blk_size);
850         else
851                 blk_size = queue_logical_block_size(q);
852
853         /* Use topology information if available */
854         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
855                                    struct virtio_blk_config, physical_block_exp,
856                                    &physical_block_exp);
857         if (!err && physical_block_exp)
858                 blk_queue_physical_block_size(q,
859                                 blk_size * (1 << physical_block_exp));
860
861         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
862                                    struct virtio_blk_config, alignment_offset,
863                                    &alignment_offset);
864         if (!err && alignment_offset)
865                 blk_queue_alignment_offset(q, blk_size * alignment_offset);
866
867         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
868                                    struct virtio_blk_config, min_io_size,
869                                    &min_io_size);
870         if (!err && min_io_size)
871                 blk_queue_io_min(q, blk_size * min_io_size);
872
873         err = virtio_cread_feature(vdev, VIRTIO_BLK_F_TOPOLOGY,
874                                    struct virtio_blk_config, opt_io_size,
875                                    &opt_io_size);
876         if (!err && opt_io_size)
877                 blk_queue_io_opt(q, blk_size * opt_io_size);
878
879         if (virtio_has_feature(vdev, VIRTIO_BLK_F_DISCARD)) {
880                 q->limits.discard_granularity = blk_size;
881
882                 virtio_cread(vdev, struct virtio_blk_config,
883                              discard_sector_alignment, &v);
884                 q->limits.discard_alignment = v ? v << SECTOR_SHIFT : 0;
885
886                 virtio_cread(vdev, struct virtio_blk_config,
887                              max_discard_sectors, &v);
888                 blk_queue_max_discard_sectors(q, v ? v : UINT_MAX);
889
890                 virtio_cread(vdev, struct virtio_blk_config, max_discard_seg,
891                              &v);
892                 blk_queue_max_discard_segments(q,
893                                                min_not_zero(v,
894                                                             MAX_DISCARD_SEGMENTS));
895
896                 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
897         }
898
899         if (virtio_has_feature(vdev, VIRTIO_BLK_F_WRITE_ZEROES)) {
900                 virtio_cread(vdev, struct virtio_blk_config,
901                              max_write_zeroes_sectors, &v);
902                 blk_queue_max_write_zeroes_sectors(q, v ? v : UINT_MAX);
903         }
904
905         virtblk_update_capacity(vblk, false);
906         virtio_device_ready(vdev);
907
908         device_add_disk(&vdev->dev, vblk->disk, virtblk_attr_groups);
909         return 0;
910
911 out_free_tags:
912         blk_mq_free_tag_set(&vblk->tag_set);
913 out_put_disk:
914         put_disk(vblk->disk);
915 out_free_vq:
916         vdev->config->del_vqs(vdev);
917 out_free_vblk:
918         kfree(vblk);
919 out_free_index:
920         ida_simple_remove(&vd_index_ida, index);
921 out:
922         return err;
923 }
924
925 static void virtblk_remove(struct virtio_device *vdev)
926 {
927         struct virtio_blk *vblk = vdev->priv;
928         int index = vblk->index;
929         int refc;
930
931         /* Make sure no work handler is accessing the device. */
932         flush_work(&vblk->config_work);
933
934         del_gendisk(vblk->disk);
935         blk_cleanup_queue(vblk->disk->queue);
936
937         blk_mq_free_tag_set(&vblk->tag_set);
938
939         /* Stop all the virtqueues. */
940         vdev->config->reset(vdev);
941
942         refc = kref_read(&disk_to_dev(vblk->disk)->kobj.kref);
943         put_disk(vblk->disk);
944         vdev->config->del_vqs(vdev);
945         kfree(vblk->vqs);
946         kfree(vblk);
947
948         /* Only free device id if we don't have any users */
949         if (refc == 1)
950                 ida_simple_remove(&vd_index_ida, index);
951 }
952
953 #ifdef CONFIG_PM_SLEEP
954 static int virtblk_freeze(struct virtio_device *vdev)
955 {
956         struct virtio_blk *vblk = vdev->priv;
957
958         /* Ensure we don't receive any more interrupts */
959         vdev->config->reset(vdev);
960
961         /* Make sure no work handler is accessing the device. */
962         flush_work(&vblk->config_work);
963
964         blk_mq_quiesce_queue(vblk->disk->queue);
965
966         vdev->config->del_vqs(vdev);
967         return 0;
968 }
969
970 static int virtblk_restore(struct virtio_device *vdev)
971 {
972         struct virtio_blk *vblk = vdev->priv;
973         int ret;
974
975         ret = init_vq(vdev->priv);
976         if (ret)
977                 return ret;
978
979         virtio_device_ready(vdev);
980
981         blk_mq_unquiesce_queue(vblk->disk->queue);
982         return 0;
983 }
984 #endif
985
986 static const struct virtio_device_id id_table[] = {
987         { VIRTIO_ID_BLOCK, VIRTIO_DEV_ANY_ID },
988         { 0 },
989 };
990
991 static unsigned int features_legacy[] = {
992         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
993         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
994 #ifdef CONFIG_VIRTIO_BLK_SCSI
995         VIRTIO_BLK_F_SCSI,
996 #endif
997         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
998         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
999 }
1000 ;
1001 static unsigned int features[] = {
1002         VIRTIO_BLK_F_SEG_MAX, VIRTIO_BLK_F_SIZE_MAX, VIRTIO_BLK_F_GEOMETRY,
1003         VIRTIO_BLK_F_RO, VIRTIO_BLK_F_BLK_SIZE,
1004         VIRTIO_BLK_F_FLUSH, VIRTIO_BLK_F_TOPOLOGY, VIRTIO_BLK_F_CONFIG_WCE,
1005         VIRTIO_BLK_F_MQ, VIRTIO_BLK_F_DISCARD, VIRTIO_BLK_F_WRITE_ZEROES,
1006 };
1007
1008 static struct virtio_driver virtio_blk = {
1009         .feature_table                  = features,
1010         .feature_table_size             = ARRAY_SIZE(features),
1011         .feature_table_legacy           = features_legacy,
1012         .feature_table_size_legacy      = ARRAY_SIZE(features_legacy),
1013         .driver.name                    = KBUILD_MODNAME,
1014         .driver.owner                   = THIS_MODULE,
1015         .id_table                       = id_table,
1016         .probe                          = virtblk_probe,
1017         .remove                         = virtblk_remove,
1018         .config_changed                 = virtblk_config_changed,
1019 #ifdef CONFIG_PM_SLEEP
1020         .freeze                         = virtblk_freeze,
1021         .restore                        = virtblk_restore,
1022 #endif
1023 };
1024
1025 static int __init init(void)
1026 {
1027         int error;
1028
1029         virtblk_wq = alloc_workqueue("virtio-blk", 0, 0);
1030         if (!virtblk_wq)
1031                 return -ENOMEM;
1032
1033         major = register_blkdev(0, "virtblk");
1034         if (major < 0) {
1035                 error = major;
1036                 goto out_destroy_workqueue;
1037         }
1038
1039         error = register_virtio_driver(&virtio_blk);
1040         if (error)
1041                 goto out_unregister_blkdev;
1042         return 0;
1043
1044 out_unregister_blkdev:
1045         unregister_blkdev(major, "virtblk");
1046 out_destroy_workqueue:
1047         destroy_workqueue(virtblk_wq);
1048         return error;
1049 }
1050
1051 static void __exit fini(void)
1052 {
1053         unregister_virtio_driver(&virtio_blk);
1054         unregister_blkdev(major, "virtblk");
1055         destroy_workqueue(virtblk_wq);
1056 }
1057 module_init(init);
1058 module_exit(fini);
1059
1060 MODULE_DEVICE_TABLE(virtio, id_table);
1061 MODULE_DESCRIPTION("Virtio block driver");
1062 MODULE_LICENSE("GPL");