2 * Copyright (C) 2015 Red Hat, Inc.
6 * Dave Airlie <airlied@redhat.com>
7 * Gerd Hoffmann <kraxel@redhat.com>
9 * Permission is hereby granted, free of charge, to any person obtaining a
10 * copy of this software and associated documentation files (the "Software"),
11 * to deal in the Software without restriction, including without limitation
12 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
13 * and/or sell copies of the Software, and to permit persons to whom the
14 * Software is furnished to do so, subject to the following conditions:
16 * The above copyright notice and this permission notice (including the next
17 * paragraph) shall be included in all copies or substantial portions of the
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
24 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
25 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
26 * OTHER DEALINGS IN THE SOFTWARE.
30 #include "virtgpu_drv.h"
31 #include <linux/virtio.h>
32 #include <linux/virtio_config.h>
33 #include <linux/virtio_ring.h>
35 #define MAX_INLINE_CMD_SIZE 96
36 #define MAX_INLINE_RESP_SIZE 24
37 #define VBUFFER_SIZE (sizeof(struct virtio_gpu_vbuffer) \
38 + MAX_INLINE_CMD_SIZE \
39 + MAX_INLINE_RESP_SIZE)
41 void virtio_gpu_ctrl_ack(struct virtqueue *vq)
43 struct drm_device *dev = vq->vdev->priv;
44 struct virtio_gpu_device *vgdev = dev->dev_private;
46 schedule_work(&vgdev->ctrlq.dequeue_work);
49 void virtio_gpu_cursor_ack(struct virtqueue *vq)
51 struct drm_device *dev = vq->vdev->priv;
52 struct virtio_gpu_device *vgdev = dev->dev_private;
54 schedule_work(&vgdev->cursorq.dequeue_work);
57 int virtio_gpu_alloc_vbufs(struct virtio_gpu_device *vgdev)
59 vgdev->vbufs = kmem_cache_create("virtio-gpu-vbufs",
61 __alignof__(struct virtio_gpu_vbuffer),
68 void virtio_gpu_free_vbufs(struct virtio_gpu_device *vgdev)
70 kmem_cache_destroy(vgdev->vbufs);
74 static struct virtio_gpu_vbuffer*
75 virtio_gpu_get_vbuf(struct virtio_gpu_device *vgdev,
76 int size, int resp_size, void *resp_buf,
77 virtio_gpu_resp_cb resp_cb)
79 struct virtio_gpu_vbuffer *vbuf;
81 vbuf = kmem_cache_zalloc(vgdev->vbufs, GFP_KERNEL);
83 return ERR_PTR(-ENOMEM);
85 BUG_ON(size > MAX_INLINE_CMD_SIZE);
86 vbuf->buf = (void *)vbuf + sizeof(*vbuf);
89 vbuf->resp_cb = resp_cb;
90 vbuf->resp_size = resp_size;
91 if (resp_size <= MAX_INLINE_RESP_SIZE)
92 vbuf->resp_buf = (void *)vbuf->buf + size;
94 vbuf->resp_buf = resp_buf;
95 BUG_ON(!vbuf->resp_buf);
99 static void *virtio_gpu_alloc_cmd(struct virtio_gpu_device *vgdev,
100 struct virtio_gpu_vbuffer **vbuffer_p,
103 struct virtio_gpu_vbuffer *vbuf;
105 vbuf = virtio_gpu_get_vbuf(vgdev, size,
106 sizeof(struct virtio_gpu_ctrl_hdr),
110 return ERR_CAST(vbuf);
116 static struct virtio_gpu_update_cursor*
117 virtio_gpu_alloc_cursor(struct virtio_gpu_device *vgdev,
118 struct virtio_gpu_vbuffer **vbuffer_p)
120 struct virtio_gpu_vbuffer *vbuf;
122 vbuf = virtio_gpu_get_vbuf
123 (vgdev, sizeof(struct virtio_gpu_update_cursor),
127 return ERR_CAST(vbuf);
130 return (struct virtio_gpu_update_cursor *)vbuf->buf;
133 static void *virtio_gpu_alloc_cmd_resp(struct virtio_gpu_device *vgdev,
134 virtio_gpu_resp_cb cb,
135 struct virtio_gpu_vbuffer **vbuffer_p,
136 int cmd_size, int resp_size,
139 struct virtio_gpu_vbuffer *vbuf;
141 vbuf = virtio_gpu_get_vbuf(vgdev, cmd_size,
142 resp_size, resp_buf, cb);
145 return ERR_CAST(vbuf);
148 return (struct virtio_gpu_command *)vbuf->buf;
151 static void free_vbuf(struct virtio_gpu_device *vgdev,
152 struct virtio_gpu_vbuffer *vbuf)
154 if (vbuf->resp_size > MAX_INLINE_RESP_SIZE)
155 kfree(vbuf->resp_buf);
156 kfree(vbuf->data_buf);
157 kmem_cache_free(vgdev->vbufs, vbuf);
160 static void reclaim_vbufs(struct virtqueue *vq, struct list_head *reclaim_list)
162 struct virtio_gpu_vbuffer *vbuf;
166 while ((vbuf = virtqueue_get_buf(vq, &len))) {
167 list_add_tail(&vbuf->list, reclaim_list);
171 DRM_DEBUG("Huh? zero vbufs reclaimed");
174 void virtio_gpu_dequeue_ctrl_func(struct work_struct *work)
176 struct virtio_gpu_device *vgdev =
177 container_of(work, struct virtio_gpu_device,
179 struct list_head reclaim_list;
180 struct virtio_gpu_vbuffer *entry, *tmp;
181 struct virtio_gpu_ctrl_hdr *resp;
184 INIT_LIST_HEAD(&reclaim_list);
185 spin_lock(&vgdev->ctrlq.qlock);
187 virtqueue_disable_cb(vgdev->ctrlq.vq);
188 reclaim_vbufs(vgdev->ctrlq.vq, &reclaim_list);
190 } while (!virtqueue_enable_cb(vgdev->ctrlq.vq));
191 spin_unlock(&vgdev->ctrlq.qlock);
193 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
194 resp = (struct virtio_gpu_ctrl_hdr *)entry->resp_buf;
195 if (resp->type != cpu_to_le32(VIRTIO_GPU_RESP_OK_NODATA)) {
196 if (resp->type >= cpu_to_le32(VIRTIO_GPU_RESP_ERR_UNSPEC)) {
197 struct virtio_gpu_ctrl_hdr *cmd;
198 cmd = (struct virtio_gpu_ctrl_hdr *)entry->buf;
199 DRM_ERROR("response 0x%x (command 0x%x)\n",
200 le32_to_cpu(resp->type),
201 le32_to_cpu(cmd->type));
203 DRM_DEBUG("response 0x%x\n", le32_to_cpu(resp->type));
205 if (resp->flags & cpu_to_le32(VIRTIO_GPU_FLAG_FENCE)) {
206 u64 f = le64_to_cpu(resp->fence_id);
209 DRM_ERROR("%s: Oops: fence %llx -> %llx\n",
210 __func__, fence_id, f);
216 entry->resp_cb(vgdev, entry);
218 list_del(&entry->list);
219 free_vbuf(vgdev, entry);
221 wake_up(&vgdev->ctrlq.ack_queue);
224 virtio_gpu_fence_event_process(vgdev, fence_id);
227 void virtio_gpu_dequeue_cursor_func(struct work_struct *work)
229 struct virtio_gpu_device *vgdev =
230 container_of(work, struct virtio_gpu_device,
231 cursorq.dequeue_work);
232 struct list_head reclaim_list;
233 struct virtio_gpu_vbuffer *entry, *tmp;
235 INIT_LIST_HEAD(&reclaim_list);
236 spin_lock(&vgdev->cursorq.qlock);
238 virtqueue_disable_cb(vgdev->cursorq.vq);
239 reclaim_vbufs(vgdev->cursorq.vq, &reclaim_list);
240 } while (!virtqueue_enable_cb(vgdev->cursorq.vq));
241 spin_unlock(&vgdev->cursorq.qlock);
243 list_for_each_entry_safe(entry, tmp, &reclaim_list, list) {
244 list_del(&entry->list);
245 free_vbuf(vgdev, entry);
247 wake_up(&vgdev->cursorq.ack_queue);
250 static int virtio_gpu_queue_ctrl_buffer_locked(struct virtio_gpu_device *vgdev,
251 struct virtio_gpu_vbuffer *vbuf)
252 __releases(&vgdev->ctrlq.qlock)
253 __acquires(&vgdev->ctrlq.qlock)
255 struct virtqueue *vq = vgdev->ctrlq.vq;
256 struct scatterlist *sgs[3], vcmd, vout, vresp;
257 int outcnt = 0, incnt = 0;
260 if (!vgdev->vqs_ready)
263 sg_init_one(&vcmd, vbuf->buf, vbuf->size);
264 sgs[outcnt + incnt] = &vcmd;
267 if (vbuf->data_size) {
268 sg_init_one(&vout, vbuf->data_buf, vbuf->data_size);
269 sgs[outcnt + incnt] = &vout;
273 if (vbuf->resp_size) {
274 sg_init_one(&vresp, vbuf->resp_buf, vbuf->resp_size);
275 sgs[outcnt + incnt] = &vresp;
280 ret = virtqueue_add_sgs(vq, sgs, outcnt, incnt, vbuf, GFP_ATOMIC);
281 if (ret == -ENOSPC) {
282 spin_unlock(&vgdev->ctrlq.qlock);
283 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= outcnt + incnt);
284 spin_lock(&vgdev->ctrlq.qlock);
295 static int virtio_gpu_queue_ctrl_buffer(struct virtio_gpu_device *vgdev,
296 struct virtio_gpu_vbuffer *vbuf)
300 spin_lock(&vgdev->ctrlq.qlock);
301 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
302 spin_unlock(&vgdev->ctrlq.qlock);
306 static int virtio_gpu_queue_fenced_ctrl_buffer(struct virtio_gpu_device *vgdev,
307 struct virtio_gpu_vbuffer *vbuf,
308 struct virtio_gpu_ctrl_hdr *hdr,
309 struct virtio_gpu_fence *fence)
311 struct virtqueue *vq = vgdev->ctrlq.vq;
315 spin_lock(&vgdev->ctrlq.qlock);
318 * Make sure we have enouth space in the virtqueue. If not
319 * wait here until we have.
321 * Without that virtio_gpu_queue_ctrl_buffer_nolock might have
322 * to wait for free space, which can result in fence ids being
323 * submitted out-of-order.
325 if (vq->num_free < 3) {
326 spin_unlock(&vgdev->ctrlq.qlock);
327 wait_event(vgdev->ctrlq.ack_queue, vq->num_free >= 3);
332 virtio_gpu_fence_emit(vgdev, hdr, fence);
333 rc = virtio_gpu_queue_ctrl_buffer_locked(vgdev, vbuf);
334 spin_unlock(&vgdev->ctrlq.qlock);
338 static int virtio_gpu_queue_cursor(struct virtio_gpu_device *vgdev,
339 struct virtio_gpu_vbuffer *vbuf)
341 struct virtqueue *vq = vgdev->cursorq.vq;
342 struct scatterlist *sgs[1], ccmd;
346 if (!vgdev->vqs_ready)
349 sg_init_one(&ccmd, vbuf->buf, vbuf->size);
353 spin_lock(&vgdev->cursorq.qlock);
355 ret = virtqueue_add_sgs(vq, sgs, outcnt, 0, vbuf, GFP_ATOMIC);
356 if (ret == -ENOSPC) {
357 spin_unlock(&vgdev->cursorq.qlock);
358 wait_event(vgdev->cursorq.ack_queue, vq->num_free >= outcnt);
359 spin_lock(&vgdev->cursorq.qlock);
365 spin_unlock(&vgdev->cursorq.qlock);
372 /* just create gem objects for userspace and long lived objects,
373 * just use dma_alloced pages for the queue objects?
376 /* create a basic resource */
377 void virtio_gpu_cmd_create_resource(struct virtio_gpu_device *vgdev,
378 struct virtio_gpu_object *bo,
379 struct virtio_gpu_object_params *params,
380 struct virtio_gpu_fence *fence)
382 struct virtio_gpu_resource_create_2d *cmd_p;
383 struct virtio_gpu_vbuffer *vbuf;
385 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
386 memset(cmd_p, 0, sizeof(*cmd_p));
388 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_2D);
389 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
390 cmd_p->format = cpu_to_le32(params->format);
391 cmd_p->width = cpu_to_le32(params->width);
392 cmd_p->height = cpu_to_le32(params->height);
394 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
398 void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
399 uint32_t resource_id)
401 struct virtio_gpu_resource_unref *cmd_p;
402 struct virtio_gpu_vbuffer *vbuf;
404 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
405 memset(cmd_p, 0, sizeof(*cmd_p));
407 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNREF);
408 cmd_p->resource_id = cpu_to_le32(resource_id);
410 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
413 static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
414 uint32_t resource_id,
415 struct virtio_gpu_fence *fence)
417 struct virtio_gpu_resource_detach_backing *cmd_p;
418 struct virtio_gpu_vbuffer *vbuf;
420 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
421 memset(cmd_p, 0, sizeof(*cmd_p));
423 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
424 cmd_p->resource_id = cpu_to_le32(resource_id);
426 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
429 void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
430 uint32_t scanout_id, uint32_t resource_id,
431 uint32_t width, uint32_t height,
432 uint32_t x, uint32_t y)
434 struct virtio_gpu_set_scanout *cmd_p;
435 struct virtio_gpu_vbuffer *vbuf;
437 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
438 memset(cmd_p, 0, sizeof(*cmd_p));
440 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT);
441 cmd_p->resource_id = cpu_to_le32(resource_id);
442 cmd_p->scanout_id = cpu_to_le32(scanout_id);
443 cmd_p->r.width = cpu_to_le32(width);
444 cmd_p->r.height = cpu_to_le32(height);
445 cmd_p->r.x = cpu_to_le32(x);
446 cmd_p->r.y = cpu_to_le32(y);
448 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
451 void virtio_gpu_cmd_resource_flush(struct virtio_gpu_device *vgdev,
452 uint32_t resource_id,
453 uint32_t x, uint32_t y,
454 uint32_t width, uint32_t height)
456 struct virtio_gpu_resource_flush *cmd_p;
457 struct virtio_gpu_vbuffer *vbuf;
459 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
460 memset(cmd_p, 0, sizeof(*cmd_p));
462 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_FLUSH);
463 cmd_p->resource_id = cpu_to_le32(resource_id);
464 cmd_p->r.width = cpu_to_le32(width);
465 cmd_p->r.height = cpu_to_le32(height);
466 cmd_p->r.x = cpu_to_le32(x);
467 cmd_p->r.y = cpu_to_le32(y);
469 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
472 void virtio_gpu_cmd_transfer_to_host_2d(struct virtio_gpu_device *vgdev,
473 struct virtio_gpu_object *bo,
475 __le32 width, __le32 height,
477 struct virtio_gpu_fence *fence)
479 struct virtio_gpu_transfer_to_host_2d *cmd_p;
480 struct virtio_gpu_vbuffer *vbuf;
481 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
484 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
485 bo->pages->sgl, bo->pages->nents,
488 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
489 memset(cmd_p, 0, sizeof(*cmd_p));
491 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_2D);
492 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
493 cmd_p->offset = cpu_to_le64(offset);
494 cmd_p->r.width = width;
495 cmd_p->r.height = height;
499 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
503 virtio_gpu_cmd_resource_attach_backing(struct virtio_gpu_device *vgdev,
504 uint32_t resource_id,
505 struct virtio_gpu_mem_entry *ents,
507 struct virtio_gpu_fence *fence)
509 struct virtio_gpu_resource_attach_backing *cmd_p;
510 struct virtio_gpu_vbuffer *vbuf;
512 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
513 memset(cmd_p, 0, sizeof(*cmd_p));
515 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_ATTACH_BACKING);
516 cmd_p->resource_id = cpu_to_le32(resource_id);
517 cmd_p->nr_entries = cpu_to_le32(nents);
519 vbuf->data_buf = ents;
520 vbuf->data_size = sizeof(*ents) * nents;
522 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
525 static void virtio_gpu_cmd_get_display_info_cb(struct virtio_gpu_device *vgdev,
526 struct virtio_gpu_vbuffer *vbuf)
528 struct virtio_gpu_resp_display_info *resp =
529 (struct virtio_gpu_resp_display_info *)vbuf->resp_buf;
532 spin_lock(&vgdev->display_info_lock);
533 for (i = 0; i < vgdev->num_scanouts; i++) {
534 vgdev->outputs[i].info = resp->pmodes[i];
535 if (resp->pmodes[i].enabled) {
536 DRM_DEBUG("output %d: %dx%d+%d+%d", i,
537 le32_to_cpu(resp->pmodes[i].r.width),
538 le32_to_cpu(resp->pmodes[i].r.height),
539 le32_to_cpu(resp->pmodes[i].r.x),
540 le32_to_cpu(resp->pmodes[i].r.y));
542 DRM_DEBUG("output %d: disabled", i);
546 vgdev->display_info_pending = false;
547 spin_unlock(&vgdev->display_info_lock);
548 wake_up(&vgdev->resp_wq);
550 if (!drm_helper_hpd_irq_event(vgdev->ddev))
551 drm_kms_helper_hotplug_event(vgdev->ddev);
554 static void virtio_gpu_cmd_get_capset_info_cb(struct virtio_gpu_device *vgdev,
555 struct virtio_gpu_vbuffer *vbuf)
557 struct virtio_gpu_get_capset_info *cmd =
558 (struct virtio_gpu_get_capset_info *)vbuf->buf;
559 struct virtio_gpu_resp_capset_info *resp =
560 (struct virtio_gpu_resp_capset_info *)vbuf->resp_buf;
561 int i = le32_to_cpu(cmd->capset_index);
563 spin_lock(&vgdev->display_info_lock);
564 vgdev->capsets[i].id = le32_to_cpu(resp->capset_id);
565 vgdev->capsets[i].max_version = le32_to_cpu(resp->capset_max_version);
566 vgdev->capsets[i].max_size = le32_to_cpu(resp->capset_max_size);
567 spin_unlock(&vgdev->display_info_lock);
568 wake_up(&vgdev->resp_wq);
571 static void virtio_gpu_cmd_capset_cb(struct virtio_gpu_device *vgdev,
572 struct virtio_gpu_vbuffer *vbuf)
574 struct virtio_gpu_get_capset *cmd =
575 (struct virtio_gpu_get_capset *)vbuf->buf;
576 struct virtio_gpu_resp_capset *resp =
577 (struct virtio_gpu_resp_capset *)vbuf->resp_buf;
578 struct virtio_gpu_drv_cap_cache *cache_ent;
580 spin_lock(&vgdev->display_info_lock);
581 list_for_each_entry(cache_ent, &vgdev->cap_cache, head) {
582 if (cache_ent->version == le32_to_cpu(cmd->capset_version) &&
583 cache_ent->id == le32_to_cpu(cmd->capset_id)) {
584 memcpy(cache_ent->caps_cache, resp->capset_data,
586 atomic_set(&cache_ent->is_valid, 1);
590 spin_unlock(&vgdev->display_info_lock);
591 wake_up(&vgdev->resp_wq);
594 static int virtio_get_edid_block(void *data, u8 *buf,
595 unsigned int block, size_t len)
597 struct virtio_gpu_resp_edid *resp = data;
598 size_t start = block * EDID_LENGTH;
600 if (start + len > le32_to_cpu(resp->size))
602 memcpy(buf, resp->edid + start, len);
606 static void virtio_gpu_cmd_get_edid_cb(struct virtio_gpu_device *vgdev,
607 struct virtio_gpu_vbuffer *vbuf)
609 struct virtio_gpu_cmd_get_edid *cmd =
610 (struct virtio_gpu_cmd_get_edid *)vbuf->buf;
611 struct virtio_gpu_resp_edid *resp =
612 (struct virtio_gpu_resp_edid *)vbuf->resp_buf;
613 uint32_t scanout = le32_to_cpu(cmd->scanout);
614 struct virtio_gpu_output *output;
615 struct edid *new_edid, *old_edid;
617 if (scanout >= vgdev->num_scanouts)
619 output = vgdev->outputs + scanout;
621 new_edid = drm_do_get_edid(&output->conn, virtio_get_edid_block, resp);
622 drm_connector_update_edid_property(&output->conn, new_edid);
624 spin_lock(&vgdev->display_info_lock);
625 old_edid = output->edid;
626 output->edid = new_edid;
627 spin_unlock(&vgdev->display_info_lock);
630 wake_up(&vgdev->resp_wq);
633 int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev)
635 struct virtio_gpu_ctrl_hdr *cmd_p;
636 struct virtio_gpu_vbuffer *vbuf;
639 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_display_info),
644 cmd_p = virtio_gpu_alloc_cmd_resp
645 (vgdev, &virtio_gpu_cmd_get_display_info_cb, &vbuf,
646 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_display_info),
648 memset(cmd_p, 0, sizeof(*cmd_p));
650 vgdev->display_info_pending = true;
651 cmd_p->type = cpu_to_le32(VIRTIO_GPU_CMD_GET_DISPLAY_INFO);
652 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
656 int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx)
658 struct virtio_gpu_get_capset_info *cmd_p;
659 struct virtio_gpu_vbuffer *vbuf;
662 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset_info),
667 cmd_p = virtio_gpu_alloc_cmd_resp
668 (vgdev, &virtio_gpu_cmd_get_capset_info_cb, &vbuf,
669 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_capset_info),
671 memset(cmd_p, 0, sizeof(*cmd_p));
673 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET_INFO);
674 cmd_p->capset_index = cpu_to_le32(idx);
675 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
679 int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
680 int idx, int version,
681 struct virtio_gpu_drv_cap_cache **cache_p)
683 struct virtio_gpu_get_capset *cmd_p;
684 struct virtio_gpu_vbuffer *vbuf;
686 struct virtio_gpu_drv_cap_cache *cache_ent;
689 if (idx >= vgdev->num_capsets)
692 if (version > vgdev->capsets[idx].max_version)
695 cache_ent = kzalloc(sizeof(*cache_ent), GFP_KERNEL);
699 max_size = vgdev->capsets[idx].max_size;
700 cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
701 if (!cache_ent->caps_cache) {
706 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_capset) + max_size,
709 kfree(cache_ent->caps_cache);
714 cache_ent->version = version;
715 cache_ent->id = vgdev->capsets[idx].id;
716 atomic_set(&cache_ent->is_valid, 0);
717 cache_ent->size = max_size;
718 spin_lock(&vgdev->display_info_lock);
719 list_add_tail(&cache_ent->head, &vgdev->cap_cache);
720 spin_unlock(&vgdev->display_info_lock);
722 cmd_p = virtio_gpu_alloc_cmd_resp
723 (vgdev, &virtio_gpu_cmd_capset_cb, &vbuf, sizeof(*cmd_p),
724 sizeof(struct virtio_gpu_resp_capset) + max_size,
726 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_CAPSET);
727 cmd_p->capset_id = cpu_to_le32(vgdev->capsets[idx].id);
728 cmd_p->capset_version = cpu_to_le32(version);
729 *cache_p = cache_ent;
730 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
735 int virtio_gpu_cmd_get_edids(struct virtio_gpu_device *vgdev)
737 struct virtio_gpu_cmd_get_edid *cmd_p;
738 struct virtio_gpu_vbuffer *vbuf;
742 if (WARN_ON(!vgdev->has_edid))
745 for (scanout = 0; scanout < vgdev->num_scanouts; scanout++) {
746 resp_buf = kzalloc(sizeof(struct virtio_gpu_resp_edid),
751 cmd_p = virtio_gpu_alloc_cmd_resp
752 (vgdev, &virtio_gpu_cmd_get_edid_cb, &vbuf,
753 sizeof(*cmd_p), sizeof(struct virtio_gpu_resp_edid),
755 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_GET_EDID);
756 cmd_p->scanout = cpu_to_le32(scanout);
757 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
763 void virtio_gpu_cmd_context_create(struct virtio_gpu_device *vgdev, uint32_t id,
764 uint32_t nlen, const char *name)
766 struct virtio_gpu_ctx_create *cmd_p;
767 struct virtio_gpu_vbuffer *vbuf;
769 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
770 memset(cmd_p, 0, sizeof(*cmd_p));
772 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_CREATE);
773 cmd_p->hdr.ctx_id = cpu_to_le32(id);
774 cmd_p->nlen = cpu_to_le32(nlen);
775 strncpy(cmd_p->debug_name, name, sizeof(cmd_p->debug_name) - 1);
776 cmd_p->debug_name[sizeof(cmd_p->debug_name) - 1] = 0;
777 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
780 void virtio_gpu_cmd_context_destroy(struct virtio_gpu_device *vgdev,
783 struct virtio_gpu_ctx_destroy *cmd_p;
784 struct virtio_gpu_vbuffer *vbuf;
786 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
787 memset(cmd_p, 0, sizeof(*cmd_p));
789 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DESTROY);
790 cmd_p->hdr.ctx_id = cpu_to_le32(id);
791 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
794 void virtio_gpu_cmd_context_attach_resource(struct virtio_gpu_device *vgdev,
796 uint32_t resource_id)
798 struct virtio_gpu_ctx_resource *cmd_p;
799 struct virtio_gpu_vbuffer *vbuf;
801 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
802 memset(cmd_p, 0, sizeof(*cmd_p));
804 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_ATTACH_RESOURCE);
805 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
806 cmd_p->resource_id = cpu_to_le32(resource_id);
807 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
811 void virtio_gpu_cmd_context_detach_resource(struct virtio_gpu_device *vgdev,
813 uint32_t resource_id)
815 struct virtio_gpu_ctx_resource *cmd_p;
816 struct virtio_gpu_vbuffer *vbuf;
818 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
819 memset(cmd_p, 0, sizeof(*cmd_p));
821 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_CTX_DETACH_RESOURCE);
822 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
823 cmd_p->resource_id = cpu_to_le32(resource_id);
824 virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
828 virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
829 struct virtio_gpu_object *bo,
830 struct virtio_gpu_object_params *params,
831 struct virtio_gpu_fence *fence)
833 struct virtio_gpu_resource_create_3d *cmd_p;
834 struct virtio_gpu_vbuffer *vbuf;
836 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
837 memset(cmd_p, 0, sizeof(*cmd_p));
839 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_3D);
840 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
841 cmd_p->format = cpu_to_le32(params->format);
842 cmd_p->width = cpu_to_le32(params->width);
843 cmd_p->height = cpu_to_le32(params->height);
845 cmd_p->target = cpu_to_le32(params->target);
846 cmd_p->bind = cpu_to_le32(params->bind);
847 cmd_p->depth = cpu_to_le32(params->depth);
848 cmd_p->array_size = cpu_to_le32(params->array_size);
849 cmd_p->last_level = cpu_to_le32(params->last_level);
850 cmd_p->nr_samples = cpu_to_le32(params->nr_samples);
851 cmd_p->flags = cpu_to_le32(params->flags);
853 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
857 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
858 struct virtio_gpu_object *bo,
860 uint64_t offset, uint32_t level,
861 struct virtio_gpu_box *box,
862 struct virtio_gpu_fence *fence)
864 struct virtio_gpu_transfer_host_3d *cmd_p;
865 struct virtio_gpu_vbuffer *vbuf;
866 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
869 dma_sync_sg_for_device(vgdev->vdev->dev.parent,
870 bo->pages->sgl, bo->pages->nents,
873 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
874 memset(cmd_p, 0, sizeof(*cmd_p));
876 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_TO_HOST_3D);
877 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
878 cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
880 cmd_p->offset = cpu_to_le64(offset);
881 cmd_p->level = cpu_to_le32(level);
883 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
886 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
887 uint32_t resource_id, uint32_t ctx_id,
888 uint64_t offset, uint32_t level,
889 struct virtio_gpu_box *box,
890 struct virtio_gpu_fence *fence)
892 struct virtio_gpu_transfer_host_3d *cmd_p;
893 struct virtio_gpu_vbuffer *vbuf;
895 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
896 memset(cmd_p, 0, sizeof(*cmd_p));
898 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_TRANSFER_FROM_HOST_3D);
899 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
900 cmd_p->resource_id = cpu_to_le32(resource_id);
902 cmd_p->offset = cpu_to_le64(offset);
903 cmd_p->level = cpu_to_le32(level);
905 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
908 void virtio_gpu_cmd_submit(struct virtio_gpu_device *vgdev,
909 void *data, uint32_t data_size,
910 uint32_t ctx_id, struct virtio_gpu_fence *fence)
912 struct virtio_gpu_cmd_submit *cmd_p;
913 struct virtio_gpu_vbuffer *vbuf;
915 cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
916 memset(cmd_p, 0, sizeof(*cmd_p));
918 vbuf->data_buf = data;
919 vbuf->data_size = data_size;
921 cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SUBMIT_3D);
922 cmd_p->hdr.ctx_id = cpu_to_le32(ctx_id);
923 cmd_p->size = cpu_to_le32(data_size);
925 virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
928 int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
929 struct virtio_gpu_object *obj,
930 struct virtio_gpu_fence *fence)
932 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
933 struct virtio_gpu_mem_entry *ents;
934 struct scatterlist *sg;
937 if (WARN_ON_ONCE(!obj->created))
943 ret = virtio_gpu_object_get_sg_table(vgdev, obj);
949 obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
950 obj->pages->sgl, obj->pages->nents,
954 nents = obj->pages->nents;
957 /* gets freed when the ring has consumed it */
958 ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
961 DRM_ERROR("failed to allocate ent list\n");
965 for_each_sg(obj->pages->sgl, sg, nents, si) {
966 ents[si].addr = cpu_to_le64(use_dma_api
969 ents[si].length = cpu_to_le32(sg->length);
970 ents[si].padding = 0;
973 virtio_gpu_cmd_resource_attach_backing(vgdev, obj->hw_res_handle,
979 void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
980 struct virtio_gpu_object *obj)
982 bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
984 if (use_dma_api && obj->mapped) {
985 struct virtio_gpu_fence *fence = virtio_gpu_fence_alloc(vgdev);
986 /* detach backing and wait for the host process it ... */
987 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, fence);
988 dma_fence_wait(&fence->f, true);
989 dma_fence_put(&fence->f);
991 /* ... then tear down iommu mappings */
992 dma_unmap_sg(vgdev->vdev->dev.parent,
993 obj->pages->sgl, obj->mapped,
997 virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
1001 void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
1002 struct virtio_gpu_output *output)
1004 struct virtio_gpu_vbuffer *vbuf;
1005 struct virtio_gpu_update_cursor *cur_p;
1007 output->cursor.pos.scanout_id = cpu_to_le32(output->index);
1008 cur_p = virtio_gpu_alloc_cursor(vgdev, &vbuf);
1009 memcpy(cur_p, &output->cursor, sizeof(output->cursor));
1010 virtio_gpu_queue_cursor(vgdev, vbuf);