drm/i915/gem: Pull phys pread/pwrite implementations to the backend
authorChris Wilson <chris@chris-wilson.co.uk>
Thu, 5 Nov 2020 15:49:34 +0000 (15:49 +0000)
committerRodrigo Vivi <rodrigo.vivi@intel.com>
Fri, 13 Nov 2020 00:47:30 +0000 (19:47 -0500)
Move the specialised interactions with the physical GEM object from the
pread/pwrite ioctl handler into the phys backend.

Currently, if one is able to exhaust the entire aperture and then try to
pwrite into an object not backed by struct page, we accidentally invoked
the phys pwrite handler on a non-phys object; calamitous.

Fixes: c6790dc22312 ("drm/i915: Wean off drm_pci_alloc/drm_pci_free")
Testcase: igt/gem_pwrite/exhaustion
Signed-off-by: Chris Wilson <chris@chris-wilson.co.uk>
Reviewed-by: Matthew Auld <matthew.auld@intel.com>
Cc: stable@vger.kernel.org
Link: https://patchwork.freedesktop.org/patch/msgid/20201105154934.16022-2-chris@chris-wilson.co.uk
(cherry picked from commit 852e1b3644817f071427b83859b889c788a0cf69)
Signed-off-by: Rodrigo Vivi <rodrigo.vivi@intel.com>
drivers/gpu/drm/i915/gem/i915_gem_phys.c
drivers/gpu/drm/i915/i915_gem.c

index 28147aab47b9ae60e8cef05378524b6d73833193..3a4dfe2ef1da5b9d847105f49bfacd50524a3252 100644 (file)
@@ -134,6 +134,58 @@ i915_gem_object_put_pages_phys(struct drm_i915_gem_object *obj,
                          vaddr, dma);
 }
 
+static int
+phys_pwrite(struct drm_i915_gem_object *obj,
+           const struct drm_i915_gem_pwrite *args)
+{
+       void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+       char __user *user_data = u64_to_user_ptr(args->data_ptr);
+       int err;
+
+       err = i915_gem_object_wait(obj,
+                                  I915_WAIT_INTERRUPTIBLE |
+                                  I915_WAIT_ALL,
+                                  MAX_SCHEDULE_TIMEOUT);
+       if (err)
+               return err;
+
+       /*
+        * We manually control the domain here and pretend that it
+        * remains coherent i.e. in the GTT domain, like shmem_pwrite.
+        */
+       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
+
+       if (copy_from_user(vaddr, user_data, args->size))
+               return -EFAULT;
+
+       drm_clflush_virt_range(vaddr, args->size);
+       intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
+
+       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
+       return 0;
+}
+
+static int
+phys_pread(struct drm_i915_gem_object *obj,
+          const struct drm_i915_gem_pread *args)
+{
+       void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
+       char __user *user_data = u64_to_user_ptr(args->data_ptr);
+       int err;
+
+       err = i915_gem_object_wait(obj,
+                                  I915_WAIT_INTERRUPTIBLE,
+                                  MAX_SCHEDULE_TIMEOUT);
+       if (err)
+               return err;
+
+       drm_clflush_virt_range(vaddr, args->size);
+       if (copy_to_user(user_data, vaddr, args->size))
+               return -EFAULT;
+
+       return 0;
+}
+
 static void phys_release(struct drm_i915_gem_object *obj)
 {
        fput(obj->base.filp);
@@ -144,6 +196,9 @@ static const struct drm_i915_gem_object_ops i915_gem_phys_ops = {
        .get_pages = i915_gem_object_get_pages_phys,
        .put_pages = i915_gem_object_put_pages_phys,
 
+       .pread  = phys_pread,
+       .pwrite = phys_pwrite,
+
        .release = phys_release,
 };
 
index d58fe1ddc3e153a780726266e5f9aa0e19844fed..58276694c848d2e5e2e83e732dffec6538d4f4a7 100644 (file)
@@ -179,30 +179,6 @@ try_again:
        return ret;
 }
 
-static int
-i915_gem_phys_pwrite(struct drm_i915_gem_object *obj,
-                    struct drm_i915_gem_pwrite *args,
-                    struct drm_file *file)
-{
-       void *vaddr = sg_page(obj->mm.pages->sgl) + args->offset;
-       char __user *user_data = u64_to_user_ptr(args->data_ptr);
-
-       /*
-        * We manually control the domain here and pretend that it
-        * remains coherent i.e. in the GTT domain, like shmem_pwrite.
-        */
-       i915_gem_object_invalidate_frontbuffer(obj, ORIGIN_CPU);
-
-       if (copy_from_user(vaddr, user_data, args->size))
-               return -EFAULT;
-
-       drm_clflush_virt_range(vaddr, args->size);
-       intel_gt_chipset_flush(&to_i915(obj->base.dev)->gt);
-
-       i915_gem_object_flush_frontbuffer(obj, ORIGIN_CPU);
-       return 0;
-}
-
 static int
 i915_gem_create(struct drm_file *file,
                struct intel_memory_region *mr,
@@ -872,8 +848,6 @@ i915_gem_pwrite_ioctl(struct drm_device *dev, void *data,
        if (ret == -EFAULT || ret == -ENOSPC) {
                if (i915_gem_object_has_struct_page(obj))
                        ret = i915_gem_shmem_pwrite(obj, args);
-               else
-                       ret = i915_gem_phys_pwrite(obj, args, file);
        }
 
        i915_gem_object_unpin_pages(obj);