2 * Copyright 2017 Intel Corporation. All rights reserved.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice (including the next
12 * paragraph) shall be included in all copies or substantial portions of the
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21 * DEALINGS IN THE SOFTWARE.
24 * Zhiyuan Lv <zhiyuan.lv@intel.com>
28 * Tina Zhang <tina.zhang@intel.com>
31 #include <linux/dma-buf.h>
32 #include <linux/vfio.h>
37 #define GEN8_DECODE_PTE(pte) (pte & GENMASK_ULL(63, 12))
39 static int vgpu_gem_get_pages(
40 struct drm_i915_gem_object *obj)
42 struct drm_i915_private *dev_priv = to_i915(obj->base.dev);
44 struct scatterlist *sg;
46 gen8_pte_t __iomem *gtt_entries;
47 struct intel_vgpu_fb_info *fb_info;
49 fb_info = (struct intel_vgpu_fb_info *)obj->gvt_info;
50 if (WARN_ON(!fb_info))
53 st = kmalloc(sizeof(*st), GFP_KERNEL);
57 ret = sg_alloc_table(st, fb_info->size, GFP_KERNEL);
62 gtt_entries = (gen8_pte_t __iomem *)dev_priv->ggtt.gsm +
63 (fb_info->start >> PAGE_SHIFT);
64 for_each_sg(st->sgl, sg, fb_info->size, i) {
66 sg->length = PAGE_SIZE;
68 GEN8_DECODE_PTE(readq(>t_entries[i]));
69 sg_dma_len(sg) = PAGE_SIZE;
72 __i915_gem_object_set_pages(obj, st, PAGE_SIZE);
77 static void vgpu_gem_put_pages(struct drm_i915_gem_object *obj,
78 struct sg_table *pages)
84 static void dmabuf_gem_object_free(struct kref *kref)
86 struct intel_vgpu_dmabuf_obj *obj =
87 container_of(kref, struct intel_vgpu_dmabuf_obj, kref);
88 struct intel_vgpu *vgpu = obj->vgpu;
89 struct list_head *pos;
90 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
92 if (vgpu && vgpu->active && !list_empty(&vgpu->dmabuf_obj_list_head)) {
93 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
94 dmabuf_obj = container_of(pos,
95 struct intel_vgpu_dmabuf_obj, list);
96 if (dmabuf_obj == obj) {
97 intel_gvt_hypervisor_put_vfio_device(vgpu);
98 idr_remove(&vgpu->object_idr,
99 dmabuf_obj->dmabuf_id);
100 kfree(dmabuf_obj->info);
107 /* Free the orphan dmabuf_objs here */
114 static inline void dmabuf_obj_get(struct intel_vgpu_dmabuf_obj *obj)
116 kref_get(&obj->kref);
119 static inline void dmabuf_obj_put(struct intel_vgpu_dmabuf_obj *obj)
121 kref_put(&obj->kref, dmabuf_gem_object_free);
124 static void vgpu_gem_release(struct drm_i915_gem_object *gem_obj)
127 struct intel_vgpu_fb_info *fb_info = gem_obj->gvt_info;
128 struct intel_vgpu_dmabuf_obj *obj = fb_info->obj;
129 struct intel_vgpu *vgpu = obj->vgpu;
132 mutex_lock(&vgpu->dmabuf_lock);
133 gem_obj->base.dma_buf = NULL;
135 mutex_unlock(&vgpu->dmabuf_lock);
137 /* vgpu is NULL, as it has been removed already */
138 gem_obj->base.dma_buf = NULL;
143 static const struct drm_i915_gem_object_ops intel_vgpu_gem_ops = {
144 .flags = I915_GEM_OBJECT_IS_PROXY,
145 .get_pages = vgpu_gem_get_pages,
146 .put_pages = vgpu_gem_put_pages,
147 .release = vgpu_gem_release,
150 static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
151 struct intel_vgpu_fb_info *info)
153 struct drm_i915_private *dev_priv = to_i915(dev);
154 struct drm_i915_gem_object *obj;
156 obj = i915_gem_object_alloc(dev_priv);
160 drm_gem_private_object_init(dev, &obj->base,
161 info->size << PAGE_SHIFT);
162 i915_gem_object_init(obj, &intel_vgpu_gem_ops);
164 obj->read_domains = I915_GEM_DOMAIN_GTT;
165 obj->write_domain = 0;
166 if (INTEL_GEN(dev_priv) >= 9) {
167 unsigned int tiling_mode = 0;
168 unsigned int stride = 0;
170 switch (info->drm_format_mod) {
171 case DRM_FORMAT_MOD_LINEAR:
172 tiling_mode = I915_TILING_NONE;
174 case I915_FORMAT_MOD_X_TILED:
175 tiling_mode = I915_TILING_X;
176 stride = info->stride;
178 case I915_FORMAT_MOD_Y_TILED:
179 case I915_FORMAT_MOD_Yf_TILED:
180 tiling_mode = I915_TILING_Y;
181 stride = info->stride;
184 gvt_dbg_core("invalid drm_format_mod %llx for tiling\n",
185 info->drm_format_mod);
187 obj->tiling_and_stride = tiling_mode | stride;
189 obj->tiling_and_stride = info->drm_format_mod ?
196 static bool validate_hotspot(struct intel_vgpu_cursor_plane_format *c)
198 if (c && c->x_hot <= c->width && c->y_hot <= c->height)
204 static int vgpu_get_plane_info(struct drm_device *dev,
205 struct intel_vgpu *vgpu,
206 struct intel_vgpu_fb_info *info,
209 struct drm_i915_private *dev_priv = to_i915(dev);
210 struct intel_vgpu_primary_plane_format p;
211 struct intel_vgpu_cursor_plane_format c;
214 if (plane_id == DRM_PLANE_TYPE_PRIMARY) {
215 ret = intel_vgpu_decode_primary_plane(vgpu, &p);
218 info->start = p.base;
219 info->start_gpa = p.base_gpa;
220 info->width = p.width;
221 info->height = p.height;
222 info->stride = p.stride;
223 info->drm_format = p.drm_format;
226 case PLANE_CTL_TILED_LINEAR:
227 info->drm_format_mod = DRM_FORMAT_MOD_LINEAR;
229 case PLANE_CTL_TILED_X:
230 info->drm_format_mod = I915_FORMAT_MOD_X_TILED;
232 case PLANE_CTL_TILED_Y:
233 info->drm_format_mod = I915_FORMAT_MOD_Y_TILED;
235 case PLANE_CTL_TILED_YF:
236 info->drm_format_mod = I915_FORMAT_MOD_Yf_TILED;
239 gvt_vgpu_err("invalid tiling mode: %x\n", p.tiled);
242 info->size = (((p.stride * p.height * p.bpp) / 8) +
243 (PAGE_SIZE - 1)) >> PAGE_SHIFT;
244 } else if (plane_id == DRM_PLANE_TYPE_CURSOR) {
245 ret = intel_vgpu_decode_cursor_plane(vgpu, &c);
248 info->start = c.base;
249 info->start_gpa = c.base_gpa;
250 info->width = c.width;
251 info->height = c.height;
252 info->stride = c.width * (c.bpp / 8);
253 info->drm_format = c.drm_format;
254 info->drm_format_mod = 0;
255 info->x_pos = c.x_pos;
256 info->y_pos = c.y_pos;
258 if (validate_hotspot(&c)) {
259 info->x_hot = c.x_hot;
260 info->y_hot = c.y_hot;
262 info->x_hot = UINT_MAX;
263 info->y_hot = UINT_MAX;
266 info->size = (((info->stride * c.height * c.bpp) / 8)
267 + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
269 gvt_vgpu_err("invalid plane id:%d\n", plane_id);
273 if (info->size == 0) {
274 gvt_vgpu_err("fb size is zero\n");
278 if (info->start & (PAGE_SIZE - 1)) {
279 gvt_vgpu_err("Not aligned fb address:0x%llx\n", info->start);
282 if (((info->start >> PAGE_SHIFT) + info->size) >
283 ggtt_total_entries(&dev_priv->ggtt)) {
284 gvt_vgpu_err("Invalid GTT offset or size\n");
288 if (!intel_gvt_ggtt_validate_range(vgpu, info->start, info->size)) {
289 gvt_vgpu_err("invalid gma addr\n");
296 static struct intel_vgpu_dmabuf_obj *
297 pick_dmabuf_by_info(struct intel_vgpu *vgpu,
298 struct intel_vgpu_fb_info *latest_info)
300 struct list_head *pos;
301 struct intel_vgpu_fb_info *fb_info;
302 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
303 struct intel_vgpu_dmabuf_obj *ret = NULL;
305 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
306 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
308 if ((dmabuf_obj == NULL) ||
309 (dmabuf_obj->info == NULL))
312 fb_info = (struct intel_vgpu_fb_info *)dmabuf_obj->info;
313 if ((fb_info->start == latest_info->start) &&
314 (fb_info->start_gpa == latest_info->start_gpa) &&
315 (fb_info->size == latest_info->size) &&
316 (fb_info->drm_format_mod == latest_info->drm_format_mod) &&
317 (fb_info->drm_format == latest_info->drm_format) &&
318 (fb_info->width == latest_info->width) &&
319 (fb_info->height == latest_info->height)) {
328 static struct intel_vgpu_dmabuf_obj *
329 pick_dmabuf_by_num(struct intel_vgpu *vgpu, u32 id)
331 struct list_head *pos;
332 struct intel_vgpu_dmabuf_obj *dmabuf_obj = NULL;
333 struct intel_vgpu_dmabuf_obj *ret = NULL;
335 list_for_each(pos, &vgpu->dmabuf_obj_list_head) {
336 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
341 if (dmabuf_obj->dmabuf_id == id) {
350 static void update_fb_info(struct vfio_device_gfx_plane_info *gvt_dmabuf,
351 struct intel_vgpu_fb_info *fb_info)
353 gvt_dmabuf->drm_format = fb_info->drm_format;
354 gvt_dmabuf->drm_format_mod = fb_info->drm_format_mod;
355 gvt_dmabuf->width = fb_info->width;
356 gvt_dmabuf->height = fb_info->height;
357 gvt_dmabuf->stride = fb_info->stride;
358 gvt_dmabuf->size = fb_info->size;
359 gvt_dmabuf->x_pos = fb_info->x_pos;
360 gvt_dmabuf->y_pos = fb_info->y_pos;
361 gvt_dmabuf->x_hot = fb_info->x_hot;
362 gvt_dmabuf->y_hot = fb_info->y_hot;
365 int intel_vgpu_query_plane(struct intel_vgpu *vgpu, void *args)
367 struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
368 struct vfio_device_gfx_plane_info *gfx_plane_info = args;
369 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
370 struct intel_vgpu_fb_info fb_info;
373 if (gfx_plane_info->flags == (VFIO_GFX_PLANE_TYPE_DMABUF |
374 VFIO_GFX_PLANE_TYPE_PROBE))
376 else if ((gfx_plane_info->flags & ~VFIO_GFX_PLANE_TYPE_DMABUF) ||
377 (!gfx_plane_info->flags))
380 ret = vgpu_get_plane_info(dev, vgpu, &fb_info,
381 gfx_plane_info->drm_plane_type);
385 mutex_lock(&vgpu->dmabuf_lock);
386 /* If exists, pick up the exposed dmabuf_obj */
387 dmabuf_obj = pick_dmabuf_by_info(vgpu, &fb_info);
389 update_fb_info(gfx_plane_info, &fb_info);
390 gfx_plane_info->dmabuf_id = dmabuf_obj->dmabuf_id;
392 /* This buffer may be released between query_plane ioctl and
393 * get_dmabuf ioctl. Add the refcount to make sure it won't
394 * be released between the two ioctls.
396 if (!dmabuf_obj->initref) {
397 dmabuf_obj->initref = true;
398 dmabuf_obj_get(dmabuf_obj);
401 gvt_dbg_dpy("vgpu%d: re-use dmabuf_obj ref %d, id %d\n",
402 vgpu->id, kref_read(&dmabuf_obj->kref),
403 gfx_plane_info->dmabuf_id);
404 mutex_unlock(&vgpu->dmabuf_lock);
408 mutex_unlock(&vgpu->dmabuf_lock);
410 /* Need to allocate a new one*/
411 dmabuf_obj = kmalloc(sizeof(struct intel_vgpu_dmabuf_obj), GFP_KERNEL);
412 if (unlikely(!dmabuf_obj)) {
413 gvt_vgpu_err("alloc dmabuf_obj failed\n");
418 dmabuf_obj->info = kmalloc(sizeof(struct intel_vgpu_fb_info),
420 if (unlikely(!dmabuf_obj->info)) {
421 gvt_vgpu_err("allocate intel vgpu fb info failed\n");
423 goto out_free_dmabuf;
425 memcpy(dmabuf_obj->info, &fb_info, sizeof(struct intel_vgpu_fb_info));
427 ((struct intel_vgpu_fb_info *)dmabuf_obj->info)->obj = dmabuf_obj;
429 dmabuf_obj->vgpu = vgpu;
431 ret = idr_alloc(&vgpu->object_idr, dmabuf_obj, 1, 0, GFP_NOWAIT);
434 gfx_plane_info->dmabuf_id = ret;
435 dmabuf_obj->dmabuf_id = ret;
437 dmabuf_obj->initref = true;
439 kref_init(&dmabuf_obj->kref);
441 mutex_lock(&vgpu->dmabuf_lock);
442 if (intel_gvt_hypervisor_get_vfio_device(vgpu)) {
443 gvt_vgpu_err("get vfio device failed\n");
444 mutex_unlock(&vgpu->dmabuf_lock);
447 mutex_unlock(&vgpu->dmabuf_lock);
449 update_fb_info(gfx_plane_info, &fb_info);
451 INIT_LIST_HEAD(&dmabuf_obj->list);
452 mutex_lock(&vgpu->dmabuf_lock);
453 list_add_tail(&dmabuf_obj->list, &vgpu->dmabuf_obj_list_head);
454 mutex_unlock(&vgpu->dmabuf_lock);
456 gvt_dbg_dpy("vgpu%d: %s new dmabuf_obj ref %d, id %d\n", vgpu->id,
457 __func__, kref_read(&dmabuf_obj->kref), ret);
462 kfree(dmabuf_obj->info);
466 /* ENODEV means plane isn't ready, which might be a normal case. */
467 return (ret == -ENODEV) ? 0 : ret;
470 /* To associate an exposed dmabuf with the dmabuf_obj */
471 int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
473 struct drm_device *dev = &vgpu->gvt->dev_priv->drm;
474 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
475 struct drm_i915_gem_object *obj;
476 struct dma_buf *dmabuf;
480 mutex_lock(&vgpu->dmabuf_lock);
482 dmabuf_obj = pick_dmabuf_by_num(vgpu, dmabuf_id);
483 if (dmabuf_obj == NULL) {
484 gvt_vgpu_err("invalid dmabuf id:%d\n", dmabuf_id);
489 obj = vgpu_create_gem(dev, dmabuf_obj->info);
491 gvt_vgpu_err("create gvt gem obj failed\n");
496 obj->gvt_info = dmabuf_obj->info;
498 dmabuf = i915_gem_prime_export(dev, &obj->base, DRM_CLOEXEC | DRM_RDWR);
499 if (IS_ERR(dmabuf)) {
500 gvt_vgpu_err("export dma-buf failed\n");
501 ret = PTR_ERR(dmabuf);
505 i915_gem_object_put(obj);
507 ret = dma_buf_fd(dmabuf, DRM_CLOEXEC | DRM_RDWR);
509 gvt_vgpu_err("create dma-buf fd failed ret:%d\n", ret);
510 goto out_free_dmabuf;
514 dmabuf_obj_get(dmabuf_obj);
516 if (dmabuf_obj->initref) {
517 dmabuf_obj->initref = false;
518 dmabuf_obj_put(dmabuf_obj);
521 mutex_unlock(&vgpu->dmabuf_lock);
523 gvt_dbg_dpy("vgpu%d: dmabuf:%d, dmabuf ref %d, fd:%d\n"
524 " file count: %ld, GEM ref: %d\n",
525 vgpu->id, dmabuf_obj->dmabuf_id,
526 kref_read(&dmabuf_obj->kref),
528 file_count(dmabuf->file),
529 kref_read(&obj->base.refcount));
536 i915_gem_object_put(obj);
538 mutex_unlock(&vgpu->dmabuf_lock);
542 void intel_vgpu_dmabuf_cleanup(struct intel_vgpu *vgpu)
544 struct list_head *pos, *n;
545 struct intel_vgpu_dmabuf_obj *dmabuf_obj;
547 mutex_lock(&vgpu->dmabuf_lock);
548 list_for_each_safe(pos, n, &vgpu->dmabuf_obj_list_head) {
549 dmabuf_obj = container_of(pos, struct intel_vgpu_dmabuf_obj,
551 dmabuf_obj->vgpu = NULL;
553 idr_remove(&vgpu->object_idr, dmabuf_obj->dmabuf_id);
554 intel_gvt_hypervisor_put_vfio_device(vgpu);
557 /* dmabuf_obj might be freed in dmabuf_obj_put */
558 if (dmabuf_obj->initref) {
559 dmabuf_obj->initref = false;
560 dmabuf_obj_put(dmabuf_obj);
564 mutex_unlock(&vgpu->dmabuf_lock);