Merge branch 'vmwgfx-fixes-3.13' of git://people.freedesktop.org/~thomash/linux into...
authorDave Airlie <airlied@redhat.com>
Thu, 21 Nov 2013 08:46:26 +0000 (18:46 +1000)
committerDave Airlie <airlied@redhat.com>
Thu, 21 Nov 2013 08:46:26 +0000 (18:46 +1000)
Below is a fix for a false lockep warning,
and the vmwgfx prime implementation.

* 'vmwgfx-fixes-3.13' of git://people.freedesktop.org/~thomash/linux:
  drm/vmwgfx: Make vmwgfx dma buffers prime aware
  drm/vmwgfx: Make surfaces prime-aware
  drm/vmwgfx: Hook up the prime ioctls
  drm/ttm: Add a minimal prime implementation for ttm base objects
  drm/vmwgfx: Fix false lockdep warning
  drm/ttm: Allow execbuf util reserves without ticket

drivers/gpu/drm/ttm/ttm_execbuf_util.c
drivers/gpu/drm/ttm/ttm_object.c
drivers/gpu/drm/vmwgfx/Makefile
drivers/gpu/drm/vmwgfx/vmwgfx_drv.c
drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
drivers/gpu/drm/vmwgfx/vmwgfx_prime.c [new file with mode: 0644]
drivers/gpu/drm/vmwgfx/vmwgfx_resource.c
drivers/gpu/drm/vmwgfx/vmwgfx_surface.c
include/drm/ttm/ttm_execbuf_util.h
include/drm/ttm/ttm_object.h

index 6c911789ae5cc979d46e649f9e5f39287df63d71..479e9418e3d710a8c5f02fab2355f2e6991e933c 100644 (file)
@@ -32,8 +32,7 @@
 #include <linux/sched.h>
 #include <linux/module.h>
 
-static void ttm_eu_backoff_reservation_locked(struct list_head *list,
-                                             struct ww_acquire_ctx *ticket)
+static void ttm_eu_backoff_reservation_locked(struct list_head *list)
 {
        struct ttm_validate_buffer *entry;
 
@@ -93,8 +92,9 @@ void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
        entry = list_first_entry(list, struct ttm_validate_buffer, head);
        glob = entry->bo->glob;
        spin_lock(&glob->lru_lock);
-       ttm_eu_backoff_reservation_locked(list, ticket);
-       ww_acquire_fini(ticket);
+       ttm_eu_backoff_reservation_locked(list);
+       if (ticket)
+               ww_acquire_fini(ticket);
        spin_unlock(&glob->lru_lock);
 }
 EXPORT_SYMBOL(ttm_eu_backoff_reservation);
@@ -130,7 +130,8 @@ int ttm_eu_reserve_buffers(struct ww_acquire_ctx *ticket,
        entry = list_first_entry(list, struct ttm_validate_buffer, head);
        glob = entry->bo->glob;
 
-       ww_acquire_init(ticket, &reservation_ww_class);
+       if (ticket)
+               ww_acquire_init(ticket, &reservation_ww_class);
 retry:
        list_for_each_entry(entry, list, head) {
                struct ttm_buffer_object *bo = entry->bo;
@@ -139,16 +140,17 @@ retry:
                if (entry->reserved)
                        continue;
 
-
-               ret = ttm_bo_reserve_nolru(bo, true, false, true, ticket);
+               ret = ttm_bo_reserve_nolru(bo, true, (ticket == NULL), true,
+                                          ticket);
 
                if (ret == -EDEADLK) {
                        /* uh oh, we lost out, drop every reservation and try
                         * to only reserve this buffer, then start over if
                         * this succeeds.
                         */
+                       BUG_ON(ticket == NULL);
                        spin_lock(&glob->lru_lock);
-                       ttm_eu_backoff_reservation_locked(list, ticket);
+                       ttm_eu_backoff_reservation_locked(list);
                        spin_unlock(&glob->lru_lock);
                        ttm_eu_list_ref_sub(list);
                        ret = ww_mutex_lock_slow_interruptible(&bo->resv->lock,
@@ -175,7 +177,8 @@ retry:
                }
        }
 
-       ww_acquire_done(ticket);
+       if (ticket)
+               ww_acquire_done(ticket);
        spin_lock(&glob->lru_lock);
        ttm_eu_del_from_lru_locked(list);
        spin_unlock(&glob->lru_lock);
@@ -184,12 +187,14 @@ retry:
 
 err:
        spin_lock(&glob->lru_lock);
-       ttm_eu_backoff_reservation_locked(list, ticket);
+       ttm_eu_backoff_reservation_locked(list);
        spin_unlock(&glob->lru_lock);
        ttm_eu_list_ref_sub(list);
 err_fini:
-       ww_acquire_done(ticket);
-       ww_acquire_fini(ticket);
+       if (ticket) {
+               ww_acquire_done(ticket);
+               ww_acquire_fini(ticket);
+       }
        return ret;
 }
 EXPORT_SYMBOL(ttm_eu_reserve_buffers);
@@ -224,7 +229,8 @@ void ttm_eu_fence_buffer_objects(struct ww_acquire_ctx *ticket,
        }
        spin_unlock(&bdev->fence_lock);
        spin_unlock(&glob->lru_lock);
-       ww_acquire_fini(ticket);
+       if (ticket)
+               ww_acquire_fini(ticket);
 
        list_for_each_entry(entry, list, head) {
                if (entry->old_sync_obj)
index a868176c258a95aee788253038637b83ec54a78f..6fe7b92a82d1f72f465a79d48dd0088337d713fe 100644 (file)
@@ -1,6 +1,6 @@
 /**************************************************************************
  *
- * Copyright (c) 2009 VMware, Inc., Palo Alto, CA., USA
+ * Copyright (c) 2009-2013 VMware, Inc., Palo Alto, CA., USA
  * All Rights Reserved.
  *
  * Permission is hereby granted, free of charge, to any person obtaining a
  **************************************************************************/
 /*
  * Authors: Thomas Hellstrom <thellstrom-at-vmware-dot-com>
+ *
+ * While no substantial code is shared, the prime code is inspired by
+ * drm_prime.c, with
+ * Authors:
+ *      Dave Airlie <airlied@redhat.com>
+ *      Rob Clark <rob.clark@linaro.org>
  */
 /** @file ttm_ref_object.c
  *
@@ -34,6 +40,7 @@
  * and release on file close.
  */
 
+
 /**
  * struct ttm_object_file
  *
@@ -84,6 +91,9 @@ struct ttm_object_device {
        struct drm_open_hash object_hash;
        atomic_t object_count;
        struct ttm_mem_global *mem_glob;
+       struct dma_buf_ops ops;
+       void (*dmabuf_release)(struct dma_buf *dma_buf);
+       size_t dma_buf_size;
 };
 
 /**
@@ -116,6 +126,8 @@ struct ttm_ref_object {
        struct ttm_object_file *tfile;
 };
 
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf);
+
 static inline struct ttm_object_file *
 ttm_object_file_ref(struct ttm_object_file *tfile)
 {
@@ -416,9 +428,10 @@ out_err:
 }
 EXPORT_SYMBOL(ttm_object_file_init);
 
-struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
-                                                *mem_glob,
-                                                unsigned int hash_order)
+struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+                      unsigned int hash_order,
+                      const struct dma_buf_ops *ops)
 {
        struct ttm_object_device *tdev = kmalloc(sizeof(*tdev), GFP_KERNEL);
        int ret;
@@ -430,10 +443,17 @@ struct ttm_object_device *ttm_object_device_init(struct ttm_mem_global
        spin_lock_init(&tdev->object_lock);
        atomic_set(&tdev->object_count, 0);
        ret = drm_ht_create(&tdev->object_hash, hash_order);
+       if (ret != 0)
+               goto out_no_object_hash;
 
-       if (likely(ret == 0))
-               return tdev;
+       tdev->ops = *ops;
+       tdev->dmabuf_release = tdev->ops.release;
+       tdev->ops.release = ttm_prime_dmabuf_release;
+       tdev->dma_buf_size = ttm_round_pot(sizeof(struct dma_buf)) +
+               ttm_round_pot(sizeof(struct file));
+       return tdev;
 
+out_no_object_hash:
        kfree(tdev);
        return NULL;
 }
@@ -452,3 +472,225 @@ void ttm_object_device_release(struct ttm_object_device **p_tdev)
        kfree(tdev);
 }
 EXPORT_SYMBOL(ttm_object_device_release);
+
+/**
+ * get_dma_buf_unless_doomed - get a dma_buf reference if possible.
+ *
+ * @dma_buf: Non-refcounted pointer to a struct dma-buf.
+ *
+ * Obtain a file reference from a lookup structure that doesn't refcount
+ * the file, but synchronizes with its release method to make sure it has
+ * not been freed yet. See for example kref_get_unless_zero documentation.
+ * Returns true if refcounting succeeds, false otherwise.
+ *
+ * Nobody really wants this as a public API yet, so let it mature here
+ * for some time...
+ */
+static bool __must_check get_dma_buf_unless_doomed(struct dma_buf *dmabuf)
+{
+       return atomic_long_inc_not_zero(&dmabuf->file->f_count) != 0L;
+}
+
+/**
+ * ttm_prime_refcount_release - refcount release method for a prime object.
+ *
+ * @p_base: Pointer to ttm_base_object pointer.
+ *
+ * This is a wrapper that calls the refcount_release founction of the
+ * underlying object. At the same time it cleans up the prime object.
+ * This function is called when all references to the base object we
+ * derive from are gone.
+ */
+static void ttm_prime_refcount_release(struct ttm_base_object **p_base)
+{
+       struct ttm_base_object *base = *p_base;
+       struct ttm_prime_object *prime;
+
+       *p_base = NULL;
+       prime = container_of(base, struct ttm_prime_object, base);
+       BUG_ON(prime->dma_buf != NULL);
+       mutex_destroy(&prime->mutex);
+       if (prime->refcount_release)
+               prime->refcount_release(&base);
+}
+
+/**
+ * ttm_prime_dmabuf_release - Release method for the dma-bufs we export
+ *
+ * @dma_buf:
+ *
+ * This function first calls the dma_buf release method the driver
+ * provides. Then it cleans up our dma_buf pointer used for lookup,
+ * and finally releases the reference the dma_buf has on our base
+ * object.
+ */
+static void ttm_prime_dmabuf_release(struct dma_buf *dma_buf)
+{
+       struct ttm_prime_object *prime =
+               (struct ttm_prime_object *) dma_buf->priv;
+       struct ttm_base_object *base = &prime->base;
+       struct ttm_object_device *tdev = base->tfile->tdev;
+
+       if (tdev->dmabuf_release)
+               tdev->dmabuf_release(dma_buf);
+       mutex_lock(&prime->mutex);
+       if (prime->dma_buf == dma_buf)
+               prime->dma_buf = NULL;
+       mutex_unlock(&prime->mutex);
+       ttm_mem_global_free(tdev->mem_glob, tdev->dma_buf_size);
+       ttm_base_object_unref(&base);
+}
+
+/**
+ * ttm_prime_fd_to_handle - Get a base object handle from a prime fd
+ *
+ * @tfile: A struct ttm_object_file identifying the caller.
+ * @fd: The prime / dmabuf fd.
+ * @handle: The returned handle.
+ *
+ * This function returns a handle to an object that previously exported
+ * a dma-buf. Note that we don't handle imports yet, because we simply
+ * have no consumers of that implementation.
+ */
+int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+                          int fd, u32 *handle)
+{
+       struct ttm_object_device *tdev = tfile->tdev;
+       struct dma_buf *dma_buf;
+       struct ttm_prime_object *prime;
+       struct ttm_base_object *base;
+       int ret;
+
+       dma_buf = dma_buf_get(fd);
+       if (IS_ERR(dma_buf))
+               return PTR_ERR(dma_buf);
+
+       if (dma_buf->ops != &tdev->ops)
+               return -ENOSYS;
+
+       prime = (struct ttm_prime_object *) dma_buf->priv;
+       base = &prime->base;
+       *handle = base->hash.key;
+       ret = ttm_ref_object_add(tfile, base, TTM_REF_USAGE, NULL);
+
+       dma_buf_put(dma_buf);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_fd_to_handle);
+
+/**
+ * ttm_prime_handle_to_fd - Return a dma_buf fd from a ttm prime object
+ *
+ * @tfile: Struct ttm_object_file identifying the caller.
+ * @handle: Handle to the object we're exporting from.
+ * @flags: flags for dma-buf creation. We just pass them on.
+ * @prime_fd: The returned file descriptor.
+ *
+ */
+int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+                          uint32_t handle, uint32_t flags,
+                          int *prime_fd)
+{
+       struct ttm_object_device *tdev = tfile->tdev;
+       struct ttm_base_object *base;
+       struct dma_buf *dma_buf;
+       struct ttm_prime_object *prime;
+       int ret;
+
+       base = ttm_base_object_lookup(tfile, handle);
+       if (unlikely(base == NULL ||
+                    base->object_type != ttm_prime_type)) {
+               ret = -ENOENT;
+               goto out_unref;
+       }
+
+       prime = container_of(base, struct ttm_prime_object, base);
+       if (unlikely(!base->shareable)) {
+               ret = -EPERM;
+               goto out_unref;
+       }
+
+       ret = mutex_lock_interruptible(&prime->mutex);
+       if (unlikely(ret != 0)) {
+               ret = -ERESTARTSYS;
+               goto out_unref;
+       }
+
+       dma_buf = prime->dma_buf;
+       if (!dma_buf || !get_dma_buf_unless_doomed(dma_buf)) {
+
+               /*
+                * Need to create a new dma_buf, with memory accounting.
+                */
+               ret = ttm_mem_global_alloc(tdev->mem_glob, tdev->dma_buf_size,
+                                          false, true);
+               if (unlikely(ret != 0)) {
+                       mutex_unlock(&prime->mutex);
+                       goto out_unref;
+               }
+
+               dma_buf = dma_buf_export(prime, &tdev->ops,
+                                        prime->size, flags);
+               if (IS_ERR(dma_buf)) {
+                       ret = PTR_ERR(dma_buf);
+                       ttm_mem_global_free(tdev->mem_glob,
+                                           tdev->dma_buf_size);
+                       mutex_unlock(&prime->mutex);
+                       goto out_unref;
+               }
+
+               /*
+                * dma_buf has taken the base object reference
+                */
+               base = NULL;
+               prime->dma_buf = dma_buf;
+       }
+       mutex_unlock(&prime->mutex);
+
+       ret = dma_buf_fd(dma_buf, flags);
+       if (ret >= 0) {
+               *prime_fd = ret;
+               ret = 0;
+       } else
+               dma_buf_put(dma_buf);
+
+out_unref:
+       if (base)
+               ttm_base_object_unref(&base);
+       return ret;
+}
+EXPORT_SYMBOL_GPL(ttm_prime_handle_to_fd);
+
+/**
+ * ttm_prime_object_init - Initialize a ttm_prime_object
+ *
+ * @tfile: struct ttm_object_file identifying the caller
+ * @size: The size of the dma_bufs we export.
+ * @prime: The object to be initialized.
+ * @shareable: See ttm_base_object_init
+ * @type: See ttm_base_object_init
+ * @refcount_release: See ttm_base_object_init
+ * @ref_obj_release: See ttm_base_object_init
+ *
+ * Initializes an object which is compatible with the drm_prime model
+ * for data sharing between processes and devices.
+ */
+int ttm_prime_object_init(struct ttm_object_file *tfile, size_t size,
+                         struct ttm_prime_object *prime, bool shareable,
+                         enum ttm_object_type type,
+                         void (*refcount_release) (struct ttm_base_object **),
+                         void (*ref_obj_release) (struct ttm_base_object *,
+                                                  enum ttm_ref_type ref_type))
+{
+       mutex_init(&prime->mutex);
+       prime->size = PAGE_ALIGN(size);
+       prime->real_type = type;
+       prime->dma_buf = NULL;
+       prime->refcount_release = refcount_release;
+       return ttm_base_object_init(tfile, &prime->base, shareable,
+                                   ttm_prime_type,
+                                   ttm_prime_refcount_release,
+                                   ref_obj_release);
+}
+EXPORT_SYMBOL(ttm_prime_object_init);
index 2cc6cd91ac11da8829a349bbf7b523ec4f0905dc..9f8b690bcf52c97e24db7c6a515f9be185b30321 100644 (file)
@@ -6,6 +6,6 @@ vmwgfx-y := vmwgfx_execbuf.o vmwgfx_gmr.o vmwgfx_kms.o vmwgfx_drv.o \
            vmwgfx_fifo.o vmwgfx_irq.o vmwgfx_ldu.o vmwgfx_ttm_glue.o \
            vmwgfx_overlay.o vmwgfx_marker.o vmwgfx_gmrid_manager.o \
            vmwgfx_fence.o vmwgfx_dmabuf.o vmwgfx_scrn.o vmwgfx_context.o \
-           vmwgfx_surface.o
+           vmwgfx_surface.o vmwgfx_prime.o
 
 obj-$(CONFIG_DRM_VMWGFX) := vmwgfx.o
index 20d5485eaf98c570de5535e4299eabac3fc442a0..c7a549694e59fb2614562627dcb7f167680babcd 100644 (file)
@@ -677,7 +677,7 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
        }
 
        dev_priv->tdev = ttm_object_device_init
-           (dev_priv->mem_global_ref.object, 12);
+               (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops);
 
        if (unlikely(dev_priv->tdev == NULL)) {
                DRM_ERROR("Unable to initialize TTM object management.\n");
@@ -1210,7 +1210,7 @@ static const struct file_operations vmwgfx_driver_fops = {
 
 static struct drm_driver driver = {
        .driver_features = DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED |
-       DRIVER_MODESET,
+       DRIVER_MODESET | DRIVER_PRIME,
        .load = vmw_driver_load,
        .unload = vmw_driver_unload,
        .lastclose = vmw_lastclose,
@@ -1235,6 +1235,9 @@ static struct drm_driver driver = {
        .dumb_map_offset = vmw_dumb_map_offset,
        .dumb_destroy = vmw_dumb_destroy,
 
+       .prime_fd_to_handle = vmw_prime_fd_to_handle,
+       .prime_handle_to_fd = vmw_prime_handle_to_fd,
+
        .fops = &vmwgfx_driver_fops,
        .name = VMWGFX_DRIVER_NAME,
        .desc = VMWGFX_DRIVER_DESC,
index e401d5dbcb964eb59c13fe2c5923f72370155eef..db85985c7086f04648b6a4bb89b83f352c393ec2 100644 (file)
@@ -818,6 +818,20 @@ int vmw_overlay_num_free_overlays(struct vmw_private *dev_priv);
 
 extern const struct ttm_mem_type_manager_func vmw_gmrid_manager_func;
 
+/**
+ * Prime - vmwgfx_prime.c
+ */
+
+extern const struct dma_buf_ops vmw_prime_dmabuf_ops;
+extern int vmw_prime_fd_to_handle(struct drm_device *dev,
+                                 struct drm_file *file_priv,
+                                 int fd, u32 *handle);
+extern int vmw_prime_handle_to_fd(struct drm_device *dev,
+                                 struct drm_file *file_priv,
+                                 uint32_t handle, uint32_t flags,
+                                 int *prime_fd);
+
+
 /**
  * Inline helper functions
  */
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c b/drivers/gpu/drm/vmwgfx/vmwgfx_prime.c
new file mode 100644 (file)
index 0000000..31fe32d
--- /dev/null
@@ -0,0 +1,137 @@
+/**************************************************************************
+ *
+ * Copyright Â© 2013 VMware, Inc., Palo Alto, CA., USA
+ * All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM,
+ * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR
+ * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE
+ * USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ **************************************************************************/
+/*
+ * Authors:
+ *     Thomas Hellstrom <thellstrom@vmware.com>
+ *
+ */
+
+#include "vmwgfx_drv.h"
+#include <linux/dma-buf.h>
+#include <drm/ttm/ttm_object.h>
+
+/*
+ * DMA-BUF attach- and mapping methods. No need to implement
+ * these until we have other virtual devices use them.
+ */
+
+static int vmw_prime_map_attach(struct dma_buf *dma_buf,
+                               struct device *target_dev,
+                               struct dma_buf_attachment *attach)
+{
+       return -ENOSYS;
+}
+
+static void vmw_prime_map_detach(struct dma_buf *dma_buf,
+                                struct dma_buf_attachment *attach)
+{
+}
+
+static struct sg_table *vmw_prime_map_dma_buf(struct dma_buf_attachment *attach,
+                                             enum dma_data_direction dir)
+{
+       return ERR_PTR(-ENOSYS);
+}
+
+static void vmw_prime_unmap_dma_buf(struct dma_buf_attachment *attach,
+                                   struct sg_table *sgb,
+                                   enum dma_data_direction dir)
+{
+}
+
+static void *vmw_prime_dmabuf_vmap(struct dma_buf *dma_buf)
+{
+       return NULL;
+}
+
+static void vmw_prime_dmabuf_vunmap(struct dma_buf *dma_buf, void *vaddr)
+{
+}
+
+static void *vmw_prime_dmabuf_kmap_atomic(struct dma_buf *dma_buf,
+               unsigned long page_num)
+{
+       return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap_atomic(struct dma_buf *dma_buf,
+               unsigned long page_num, void *addr)
+{
+
+}
+static void *vmw_prime_dmabuf_kmap(struct dma_buf *dma_buf,
+               unsigned long page_num)
+{
+       return NULL;
+}
+
+static void vmw_prime_dmabuf_kunmap(struct dma_buf *dma_buf,
+               unsigned long page_num, void *addr)
+{
+
+}
+
+static int vmw_prime_dmabuf_mmap(struct dma_buf *dma_buf,
+                                struct vm_area_struct *vma)
+{
+       WARN_ONCE(true, "Attempted use of dmabuf mmap. Bad.\n");
+       return -ENOSYS;
+}
+
+const struct dma_buf_ops vmw_prime_dmabuf_ops =  {
+       .attach = vmw_prime_map_attach,
+       .detach = vmw_prime_map_detach,
+       .map_dma_buf = vmw_prime_map_dma_buf,
+       .unmap_dma_buf = vmw_prime_unmap_dma_buf,
+       .release = NULL,
+       .kmap = vmw_prime_dmabuf_kmap,
+       .kmap_atomic = vmw_prime_dmabuf_kmap_atomic,
+       .kunmap = vmw_prime_dmabuf_kunmap,
+       .kunmap_atomic = vmw_prime_dmabuf_kunmap_atomic,
+       .mmap = vmw_prime_dmabuf_mmap,
+       .vmap = vmw_prime_dmabuf_vmap,
+       .vunmap = vmw_prime_dmabuf_vunmap,
+};
+
+int vmw_prime_fd_to_handle(struct drm_device *dev,
+                          struct drm_file *file_priv,
+                          int fd, u32 *handle)
+{
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+       return ttm_prime_fd_to_handle(tfile, fd, handle);
+}
+
+int vmw_prime_handle_to_fd(struct drm_device *dev,
+                          struct drm_file *file_priv,
+                          uint32_t handle, uint32_t flags,
+                          int *prime_fd)
+{
+       struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile;
+
+       return ttm_prime_handle_to_fd(tfile, handle, flags, prime_fd);
+}
index 252501a54def8e110d29884cf820b2471e4bf4a8..efe2b74c5eb17f1f86b12048be5884c9aabf50b2 100644 (file)
@@ -35,7 +35,7 @@
 #define VMW_RES_EVICT_ERR_COUNT 10
 
 struct vmw_user_dma_buffer {
-       struct ttm_base_object base;
+       struct ttm_prime_object prime;
        struct vmw_dma_buffer dma;
 };
 
@@ -297,7 +297,7 @@ int vmw_user_resource_lookup_handle(struct vmw_private *dev_priv,
        if (unlikely(base == NULL))
                return -EINVAL;
 
-       if (unlikely(base->object_type != converter->object_type))
+       if (unlikely(ttm_base_object_type(base) != converter->object_type))
                goto out_bad_resource;
 
        res = converter->base_obj_to_res(base);
@@ -387,7 +387,7 @@ static void vmw_user_dmabuf_destroy(struct ttm_buffer_object *bo)
 {
        struct vmw_user_dma_buffer *vmw_user_bo = vmw_user_dma_buffer(bo);
 
-       ttm_base_object_kfree(vmw_user_bo, base);
+       ttm_prime_object_kfree(vmw_user_bo, prime);
 }
 
 static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
@@ -401,7 +401,8 @@ static void vmw_user_dmabuf_release(struct ttm_base_object **p_base)
        if (unlikely(base == NULL))
                return;
 
-       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+                                  prime.base);
        bo = &vmw_user_bo->dma.base;
        ttm_bo_unref(&bo);
 }
@@ -442,18 +443,19 @@ int vmw_user_dmabuf_alloc(struct vmw_private *dev_priv,
                return ret;
 
        tmp = ttm_bo_reference(&user_bo->dma.base);
-       ret = ttm_base_object_init(tfile,
-                                  &user_bo->base,
-                                  shareable,
-                                  ttm_buffer_type,
-                                  &vmw_user_dmabuf_release, NULL);
+       ret = ttm_prime_object_init(tfile,
+                                   size,
+                                   &user_bo->prime,
+                                   shareable,
+                                   ttm_buffer_type,
+                                   &vmw_user_dmabuf_release, NULL);
        if (unlikely(ret != 0)) {
                ttm_bo_unref(&tmp);
                goto out_no_base_object;
        }
 
        *p_dma_buf = &user_bo->dma;
-       *handle = user_bo->base.hash.key;
+       *handle = user_bo->prime.base.hash.key;
 
 out_no_base_object:
        return ret;
@@ -475,8 +477,8 @@ int vmw_user_dmabuf_verify_access(struct ttm_buffer_object *bo,
                return -EPERM;
 
        vmw_user_bo = vmw_user_dma_buffer(bo);
-       return (vmw_user_bo->base.tfile == tfile ||
-       vmw_user_bo->base.shareable) ? 0 : -EPERM;
+       return (vmw_user_bo->prime.base.tfile == tfile ||
+               vmw_user_bo->prime.base.shareable) ? 0 : -EPERM;
 }
 
 int vmw_dmabuf_alloc_ioctl(struct drm_device *dev, void *data,
@@ -538,14 +540,15 @@ int vmw_user_dmabuf_lookup(struct ttm_object_file *tfile,
                return -ESRCH;
        }
 
-       if (unlikely(base->object_type != ttm_buffer_type)) {
+       if (unlikely(ttm_base_object_type(base) != ttm_buffer_type)) {
                ttm_base_object_unref(&base);
                printk(KERN_ERR "Invalid buffer object handle 0x%08lx.\n",
                       (unsigned long)handle);
                return -EINVAL;
        }
 
-       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer, base);
+       vmw_user_bo = container_of(base, struct vmw_user_dma_buffer,
+                                  prime.base);
        (void)ttm_bo_reference(&vmw_user_bo->dma.base);
        ttm_base_object_unref(&base);
        *out = &vmw_user_bo->dma;
@@ -562,7 +565,8 @@ int vmw_user_dmabuf_reference(struct ttm_object_file *tfile,
                return -EINVAL;
 
        user_bo = container_of(dma_buf, struct vmw_user_dma_buffer, dma);
-       return ttm_ref_object_add(tfile, &user_bo->base, TTM_REF_USAGE, NULL);
+       return ttm_ref_object_add(tfile, &user_bo->prime.base,
+                                 TTM_REF_USAGE, NULL);
 }
 
 /*
@@ -807,15 +811,16 @@ int vmw_dumb_create(struct drm_file *file_priv,
                goto out_no_dmabuf;
 
        tmp = ttm_bo_reference(&vmw_user_bo->dma.base);
-       ret = ttm_base_object_init(vmw_fpriv(file_priv)->tfile,
-                                  &vmw_user_bo->base,
-                                  false,
-                                  ttm_buffer_type,
-                                  &vmw_user_dmabuf_release, NULL);
+       ret = ttm_prime_object_init(vmw_fpriv(file_priv)->tfile,
+                                   args->size,
+                                   &vmw_user_bo->prime,
+                                   false,
+                                   ttm_buffer_type,
+                                   &vmw_user_dmabuf_release, NULL);
        if (unlikely(ret != 0))
                goto out_no_base_object;
 
-       args->handle = vmw_user_bo->base.hash.key;
+       args->handle = vmw_user_bo->prime.base.hash.key;
 
 out_no_base_object:
        ttm_bo_unref(&tmp);
@@ -994,7 +999,6 @@ void vmw_resource_unreserve(struct vmw_resource *res,
  */
 static int
 vmw_resource_check_buffer(struct vmw_resource *res,
-                         struct ww_acquire_ctx *ticket,
                          bool interruptible,
                          struct ttm_validate_buffer *val_buf)
 {
@@ -1011,7 +1015,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
        INIT_LIST_HEAD(&val_list);
        val_buf->bo = ttm_bo_reference(&res->backup->base);
        list_add_tail(&val_buf->head, &val_list);
-       ret = ttm_eu_reserve_buffers(ticket, &val_list);
+       ret = ttm_eu_reserve_buffers(NULL, &val_list);
        if (unlikely(ret != 0))
                goto out_no_reserve;
 
@@ -1029,7 +1033,7 @@ vmw_resource_check_buffer(struct vmw_resource *res,
        return 0;
 
 out_no_validate:
-       ttm_eu_backoff_reservation(ticket, &val_list);
+       ttm_eu_backoff_reservation(NULL, &val_list);
 out_no_reserve:
        ttm_bo_unref(&val_buf->bo);
        if (backup_dirty)
@@ -1074,8 +1078,7 @@ int vmw_resource_reserve(struct vmw_resource *res, bool no_backup)
  * @val_buf:        Backup buffer information.
  */
 static void
-vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
-                                struct ttm_validate_buffer *val_buf)
+vmw_resource_backoff_reservation(struct ttm_validate_buffer *val_buf)
 {
        struct list_head val_list;
 
@@ -1084,7 +1087,7 @@ vmw_resource_backoff_reservation(struct ww_acquire_ctx *ticket,
 
        INIT_LIST_HEAD(&val_list);
        list_add_tail(&val_buf->head, &val_list);
-       ttm_eu_backoff_reservation(ticket, &val_list);
+       ttm_eu_backoff_reservation(NULL, &val_list);
        ttm_bo_unref(&val_buf->bo);
 }
 
@@ -1099,14 +1102,12 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
 {
        struct ttm_validate_buffer val_buf;
        const struct vmw_res_func *func = res->func;
-       struct ww_acquire_ctx ticket;
        int ret;
 
        BUG_ON(!func->may_evict);
 
        val_buf.bo = NULL;
-       ret = vmw_resource_check_buffer(res, &ticket, interruptible,
-                                       &val_buf);
+       ret = vmw_resource_check_buffer(res, interruptible, &val_buf);
        if (unlikely(ret != 0))
                return ret;
 
@@ -1121,7 +1122,7 @@ int vmw_resource_do_evict(struct vmw_resource *res, bool interruptible)
        res->backup_dirty = true;
        res->res_dirty = false;
 out_no_unbind:
-       vmw_resource_backoff_reservation(&ticket, &val_buf);
+       vmw_resource_backoff_reservation(&val_buf);
 
        return ret;
 }
index 582814339748c2fa194921ab8034d7e629088367..7de2ea8bd55357561913384bfa23196fe3479ec2 100644 (file)
@@ -38,7 +38,7 @@
  * @size:           TTM accounting size for the surface.
  */
 struct vmw_user_surface {
-       struct ttm_base_object base;
+       struct ttm_prime_object prime;
        struct vmw_surface srf;
        uint32_t size;
        uint32_t backup_handle;
@@ -580,7 +580,8 @@ static int vmw_surface_init(struct vmw_private *dev_priv,
 static struct vmw_resource *
 vmw_user_surface_base_to_res(struct ttm_base_object *base)
 {
-       return &(container_of(base, struct vmw_user_surface, base)->srf.res);
+       return &(container_of(base, struct vmw_user_surface,
+                             prime.base)->srf.res);
 }
 
 /**
@@ -599,7 +600,7 @@ static void vmw_user_surface_free(struct vmw_resource *res)
        kfree(srf->offsets);
        kfree(srf->sizes);
        kfree(srf->snooper.image);
-       ttm_base_object_kfree(user_srf, base);
+       ttm_prime_object_kfree(user_srf, prime);
        ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 }
 
@@ -616,7 +617,7 @@ static void vmw_user_surface_base_release(struct ttm_base_object **p_base)
 {
        struct ttm_base_object *base = *p_base;
        struct vmw_user_surface *user_srf =
-           container_of(base, struct vmw_user_surface, base);
+           container_of(base, struct vmw_user_surface, prime.base);
        struct vmw_resource *res = &user_srf->srf.res;
 
        *p_base = NULL;
@@ -790,8 +791,8 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
        }
        srf->snooper.crtc = NULL;
 
-       user_srf->base.shareable = false;
-       user_srf->base.tfile = NULL;
+       user_srf->prime.base.shareable = false;
+       user_srf->prime.base.tfile = NULL;
 
        /**
         * From this point, the generic resource management functions
@@ -803,9 +804,9 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                goto out_unlock;
 
        tmp = vmw_resource_reference(&srf->res);
-       ret = ttm_base_object_init(tfile, &user_srf->base,
-                                  req->shareable, VMW_RES_SURFACE,
-                                  &vmw_user_surface_base_release, NULL);
+       ret = ttm_prime_object_init(tfile, res->backup_size, &user_srf->prime,
+                                   req->shareable, VMW_RES_SURFACE,
+                                   &vmw_user_surface_base_release, NULL);
 
        if (unlikely(ret != 0)) {
                vmw_resource_unreference(&tmp);
@@ -813,7 +814,7 @@ int vmw_surface_define_ioctl(struct drm_device *dev, void *data,
                goto out_unlock;
        }
 
-       rep->sid = user_srf->base.hash.key;
+       rep->sid = user_srf->prime.base.hash.key;
        vmw_resource_unreference(&res);
 
        ttm_read_unlock(&vmaster->lock);
@@ -823,7 +824,7 @@ out_no_copy:
 out_no_offsets:
        kfree(srf->sizes);
 out_no_sizes:
-       ttm_base_object_kfree(user_srf, base);
+       ttm_prime_object_kfree(user_srf, prime);
 out_no_user_srf:
        ttm_mem_global_free(vmw_mem_glob(dev_priv), size);
 out_unlock:
@@ -859,13 +860,14 @@ int vmw_surface_reference_ioctl(struct drm_device *dev, void *data,
                return -EINVAL;
        }
 
-       if (unlikely(base->object_type != VMW_RES_SURFACE))
+       if (unlikely(ttm_base_object_type(base) != VMW_RES_SURFACE))
                goto out_bad_resource;
 
-       user_srf = container_of(base, struct vmw_user_surface, base);
+       user_srf = container_of(base, struct vmw_user_surface, prime.base);
        srf = &user_srf->srf;
 
-       ret = ttm_ref_object_add(tfile, &user_srf->base, TTM_REF_USAGE, NULL);
+       ret = ttm_ref_object_add(tfile, &user_srf->prime.base,
+                                TTM_REF_USAGE, NULL);
        if (unlikely(ret != 0)) {
                DRM_ERROR("Could not add a reference to a surface.\n");
                goto out_no_reference;
index ec8a1d306510e1f5c92fc7193a777e6f4b73caf2..16db7d01a33668b576d9888fa8335bc7cc3346c9 100644 (file)
@@ -70,7 +70,8 @@ extern void ttm_eu_backoff_reservation(struct ww_acquire_ctx *ticket,
 /**
  * function ttm_eu_reserve_buffers
  *
- * @ticket:  [out] ww_acquire_ctx returned by call.
+ * @ticket:  [out] ww_acquire_ctx filled in by call, or NULL if only
+ *           non-blocking reserves should be tried.
  * @list:    thread private list of ttm_validate_buffer structs.
  *
  * Tries to reserve bos pointed to by the list entries for validation.
index fc0cf064990196534a7e93097860277f8b21e7e6..58b029894eb33ea32dff2ba2977546c097f2b533 100644 (file)
@@ -41,6 +41,7 @@
 #include <drm/drm_hashtab.h>
 #include <linux/kref.h>
 #include <linux/rcupdate.h>
+#include <linux/dma-buf.h>
 #include <ttm/ttm_memory.h>
 
 /**
@@ -77,6 +78,7 @@ enum ttm_object_type {
        ttm_fence_type,
        ttm_buffer_type,
        ttm_lock_type,
+       ttm_prime_type,
        ttm_driver_type0 = 256,
        ttm_driver_type1,
        ttm_driver_type2,
@@ -132,6 +134,30 @@ struct ttm_base_object {
                                 enum ttm_ref_type ref_type);
 };
 
+
+/**
+ * struct ttm_prime_object - Modified base object that is prime-aware
+ *
+ * @base: struct ttm_base_object that we derive from
+ * @mutex: Mutex protecting the @dma_buf member.
+ * @size: Size of the dma_buf associated with this object
+ * @real_type: Type of the underlying object. Needed since we're setting
+ * the value of @base::object_type to ttm_prime_type
+ * @dma_buf: Non ref-coutned pointer to a struct dma_buf created from this
+ * object.
+ * @refcount_release: The underlying object's release method. Needed since
+ * we set @base::refcount_release to our own release method.
+ */
+
+struct ttm_prime_object {
+       struct ttm_base_object base;
+       struct mutex mutex;
+       size_t size;
+       enum ttm_object_type real_type;
+       struct dma_buf *dma_buf;
+       void (*refcount_release) (struct ttm_base_object **);
+};
+
 /**
  * ttm_base_object_init
  *
@@ -248,14 +274,18 @@ extern void ttm_object_file_release(struct ttm_object_file **p_tfile);
 /**
  * ttm_object device init - initialize a struct ttm_object_device
  *
+ * @mem_glob: struct ttm_mem_global for memory accounting.
  * @hash_order: Order of hash table used to hash the base objects.
+ * @ops: DMA buf ops for prime objects of this device.
  *
  * This function is typically called on device initialization to prepare
  * data structures needed for ttm base and ref objects.
  */
 
-extern struct ttm_object_device *ttm_object_device_init
-    (struct ttm_mem_global *mem_glob, unsigned int hash_order);
+extern struct ttm_object_device *
+ttm_object_device_init(struct ttm_mem_global *mem_glob,
+                      unsigned int hash_order,
+                      const struct dma_buf_ops *ops);
 
 /**
  * ttm_object_device_release - release data held by a ttm_object_device
@@ -272,4 +302,31 @@ extern void ttm_object_device_release(struct ttm_object_device **p_tdev);
 
 #define ttm_base_object_kfree(__object, __base)\
        kfree_rcu(__object, __base.rhead)
+
+extern int ttm_prime_object_init(struct ttm_object_file *tfile,
+                                size_t size,
+                                struct ttm_prime_object *prime,
+                                bool shareable,
+                                enum ttm_object_type type,
+                                void (*refcount_release)
+                                (struct ttm_base_object **),
+                                void (*ref_obj_release)
+                                (struct ttm_base_object *,
+                                 enum ttm_ref_type ref_type));
+
+static inline enum ttm_object_type
+ttm_base_object_type(struct ttm_base_object *base)
+{
+       return (base->object_type == ttm_prime_type) ?
+               container_of(base, struct ttm_prime_object, base)->real_type :
+               base->object_type;
+}
+extern int ttm_prime_fd_to_handle(struct ttm_object_file *tfile,
+                                 int fd, u32 *handle);
+extern int ttm_prime_handle_to_fd(struct ttm_object_file *tfile,
+                                 uint32_t handle, uint32_t flags,
+                                 int *prime_fd);
+
+#define ttm_prime_object_kfree(__obj, __prime)         \
+       kfree_rcu(__obj, __prime.base.rhead)
 #endif