Merge tag 'drm-misc-next-2020-10-27' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / virtio / virtgpu_vq.c
index 07945ca238e2d93741a7df8db1c21c56f56e7f74..857f730747b61c099484909b0fcf88bbf82b07ad 100644 (file)
@@ -1016,6 +1016,8 @@ virtio_gpu_cmd_resource_create_3d(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
                                        uint32_t ctx_id,
                                        uint64_t offset, uint32_t level,
+                                       uint32_t stride,
+                                       uint32_t layer_stride,
                                        struct drm_virtgpu_3d_box *box,
                                        struct virtio_gpu_object_array *objs,
                                        struct virtio_gpu_fence *fence)
@@ -1024,11 +1026,12 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        struct virtio_gpu_transfer_host_3d *cmd_p;
        struct virtio_gpu_vbuffer *vbuf;
        bool use_dma_api = !virtio_has_dma_quirk(vgdev->vdev);
-       struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
 
-       if (use_dma_api)
+       if (virtio_gpu_is_shmem(bo) && use_dma_api) {
+               struct virtio_gpu_object_shmem *shmem = to_virtio_gpu_shmem(bo);
                dma_sync_sgtable_for_device(vgdev->vdev->dev.parent,
                                            shmem->pages, DMA_TO_DEVICE);
+       }
 
        cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
        memset(cmd_p, 0, sizeof(*cmd_p));
@@ -1041,6 +1044,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
        convert_to_hw_box(&cmd_p->box, box);
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
+       cmd_p->stride = cpu_to_le32(stride);
+       cmd_p->layer_stride = cpu_to_le32(layer_stride);
 
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
@@ -1048,6 +1053,8 @@ void virtio_gpu_cmd_transfer_to_host_3d(struct virtio_gpu_device *vgdev,
 void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
                                          uint32_t ctx_id,
                                          uint64_t offset, uint32_t level,
+                                         uint32_t stride,
+                                         uint32_t layer_stride,
                                          struct drm_virtgpu_3d_box *box,
                                          struct virtio_gpu_object_array *objs,
                                          struct virtio_gpu_fence *fence)
@@ -1067,6 +1074,8 @@ void virtio_gpu_cmd_transfer_from_host_3d(struct virtio_gpu_device *vgdev,
        convert_to_hw_box(&cmd_p->box, box);
        cmd_p->offset = cpu_to_le64(offset);
        cmd_p->level = cpu_to_le32(level);
+       cmd_p->stride = cpu_to_le32(stride);
+       cmd_p->layer_stride = cpu_to_le32(layer_stride);
 
        virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, fence);
 }
@@ -1125,14 +1134,14 @@ static void virtio_gpu_cmd_resource_uuid_cb(struct virtio_gpu_device *vgdev,
        uint32_t resp_type = le32_to_cpu(resp->hdr.type);
 
        spin_lock(&vgdev->resource_export_lock);
-       WARN_ON(obj->uuid_state != UUID_INITIALIZING);
+       WARN_ON(obj->uuid_state != STATE_INITIALIZING);
 
        if (resp_type == VIRTIO_GPU_RESP_OK_RESOURCE_UUID &&
-           obj->uuid_state == UUID_INITIALIZING) {
-               memcpy(&obj->uuid.b, resp->uuid, sizeof(obj->uuid.b));
-               obj->uuid_state = UUID_INITIALIZED;
+           obj->uuid_state == STATE_INITIALIZING) {
+               import_uuid(&obj->uuid, resp->uuid);
+               obj->uuid_state = STATE_OK;
        } else {
-               obj->uuid_state = UUID_INITIALIZATION_FAILED;
+               obj->uuid_state = STATE_ERR;
        }
        spin_unlock(&vgdev->resource_export_lock);
 
@@ -1151,7 +1160,7 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
        resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
        if (!resp_buf) {
                spin_lock(&vgdev->resource_export_lock);
-               bo->uuid_state = UUID_INITIALIZATION_FAILED;
+               bo->uuid_state = STATE_ERR;
                spin_unlock(&vgdev->resource_export_lock);
                virtio_gpu_array_put_free(objs);
                return -ENOMEM;
@@ -1169,3 +1178,134 @@ virtio_gpu_cmd_resource_assign_uuid(struct virtio_gpu_device *vgdev,
        virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
        return 0;
 }
+
+static void virtio_gpu_cmd_resource_map_cb(struct virtio_gpu_device *vgdev,
+                                          struct virtio_gpu_vbuffer *vbuf)
+{
+       struct virtio_gpu_object *bo =
+               gem_to_virtio_gpu_obj(vbuf->objs->objs[0]);
+       struct virtio_gpu_resp_map_info *resp =
+               (struct virtio_gpu_resp_map_info *)vbuf->resp_buf;
+       struct virtio_gpu_object_vram *vram = to_virtio_gpu_vram(bo);
+       uint32_t resp_type = le32_to_cpu(resp->hdr.type);
+
+       spin_lock(&vgdev->host_visible_lock);
+
+       if (resp_type == VIRTIO_GPU_RESP_OK_MAP_INFO) {
+               vram->map_info = resp->map_info;
+               vram->map_state = STATE_OK;
+       } else {
+               vram->map_state = STATE_ERR;
+       }
+
+       spin_unlock(&vgdev->host_visible_lock);
+       wake_up_all(&vgdev->resp_wq);
+}
+
+int virtio_gpu_cmd_map(struct virtio_gpu_device *vgdev,
+                      struct virtio_gpu_object_array *objs, uint64_t offset)
+{
+       struct virtio_gpu_resource_map_blob *cmd_p;
+       struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(objs->objs[0]);
+       struct virtio_gpu_vbuffer *vbuf;
+       struct virtio_gpu_resp_map_info *resp_buf;
+
+       resp_buf = kzalloc(sizeof(*resp_buf), GFP_KERNEL);
+       if (!resp_buf) {
+               virtio_gpu_array_put_free(objs);
+               return -ENOMEM;
+       }
+
+       cmd_p = virtio_gpu_alloc_cmd_resp
+               (vgdev, virtio_gpu_cmd_resource_map_cb, &vbuf, sizeof(*cmd_p),
+                sizeof(struct virtio_gpu_resp_map_info), resp_buf);
+       memset(cmd_p, 0, sizeof(*cmd_p));
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_MAP_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->offset = cpu_to_le64(offset);
+       vbuf->objs = objs;
+
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       return 0;
+}
+
+void virtio_gpu_cmd_unmap(struct virtio_gpu_device *vgdev,
+                         struct virtio_gpu_object *bo)
+{
+       struct virtio_gpu_resource_unmap_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_UNMAP_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}
+
+void
+virtio_gpu_cmd_resource_create_blob(struct virtio_gpu_device *vgdev,
+                                   struct virtio_gpu_object *bo,
+                                   struct virtio_gpu_object_params *params,
+                                   struct virtio_gpu_mem_entry *ents,
+                                   uint32_t nents)
+{
+       struct virtio_gpu_resource_create_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_CREATE_BLOB);
+       cmd_p->hdr.ctx_id = cpu_to_le32(params->ctx_id);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->blob_mem = cpu_to_le32(params->blob_mem);
+       cmd_p->blob_flags = cpu_to_le32(params->blob_flags);
+       cmd_p->blob_id = cpu_to_le64(params->blob_id);
+       cmd_p->size = cpu_to_le64(params->size);
+       cmd_p->nr_entries = cpu_to_le32(nents);
+
+       vbuf->data_buf = ents;
+       vbuf->data_size = sizeof(*ents) * nents;
+
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+       bo->created = true;
+}
+
+void virtio_gpu_cmd_set_scanout_blob(struct virtio_gpu_device *vgdev,
+                                    uint32_t scanout_id,
+                                    struct virtio_gpu_object *bo,
+                                    struct drm_framebuffer *fb,
+                                    uint32_t width, uint32_t height,
+                                    uint32_t x, uint32_t y)
+{
+       uint32_t i;
+       struct virtio_gpu_set_scanout_blob *cmd_p;
+       struct virtio_gpu_vbuffer *vbuf;
+       uint32_t format = virtio_gpu_translate_format(fb->format->format);
+
+       cmd_p = virtio_gpu_alloc_cmd(vgdev, &vbuf, sizeof(*cmd_p));
+       memset(cmd_p, 0, sizeof(*cmd_p));
+
+       cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_SET_SCANOUT_BLOB);
+       cmd_p->resource_id = cpu_to_le32(bo->hw_res_handle);
+       cmd_p->scanout_id = cpu_to_le32(scanout_id);
+
+       cmd_p->format = cpu_to_le32(format);
+       cmd_p->width  = cpu_to_le32(fb->width);
+       cmd_p->height = cpu_to_le32(fb->height);
+
+       for (i = 0; i < 4; i++) {
+               cmd_p->strides[i] = cpu_to_le32(fb->pitches[i]);
+               cmd_p->offsets[i] = cpu_to_le32(fb->offsets[i]);
+       }
+
+       cmd_p->r.width = cpu_to_le32(width);
+       cmd_p->r.height = cpu_to_le32(height);
+       cmd_p->r.x = cpu_to_le32(x);
+       cmd_p->r.y = cpu_to_le32(y);
+
+       virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+}