Merge tag 'tilcdc-4.19' of https://github.com/jsarha/linux into drm-next
[sfrench/cifs-2.6.git] / drivers / dma-buf / dma-buf.c
index 5394507138381d7e5da309a0d6497ed08df4f0a9..13884474d1588f7a086154d3b06dbd09ff28e881 100644 (file)
@@ -135,10 +135,10 @@ static loff_t dma_buf_llseek(struct file *file, loff_t offset, int whence)
  * Userspace can query the state of these implicitly tracked fences using poll()
  * and related system calls:
  *
- * - Checking for POLLIN, i.e. read access, can be use to query the state of the
+ * - Checking for EPOLLIN, i.e. read access, can be use to query the state of the
  *   most recent write or exclusive fence.
  *
- * - Checking for POLLOUT, i.e. write access, can be used to query the state of
+ * - Checking for EPOLLOUT, i.e. write access, can be used to query the state of
  *   all attached fences, shared and exclusive ones.
  *
  * Note that this only signals the completion of the respective fences, i.e. the
@@ -168,13 +168,13 @@ static __poll_t dma_buf_poll(struct file *file, poll_table *poll)
 
        dmabuf = file->private_data;
        if (!dmabuf || !dmabuf->resv)
-               return POLLERR;
+               return EPOLLERR;
 
        resv = dmabuf->resv;
 
        poll_wait(file, &dmabuf->poll, poll);
 
-       events = poll_requested_events(poll) & (POLLIN | POLLOUT);
+       events = poll_requested_events(poll) & (EPOLLIN | EPOLLOUT);
        if (!events)
                return 0;
 
@@ -193,12 +193,12 @@ retry:
                goto retry;
        }
 
-       if (fence_excl && (!(events & POLLOUT) || shared_count == 0)) {
+       if (fence_excl && (!(events & EPOLLOUT) || shared_count == 0)) {
                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_excl;
-               __poll_t pevents = POLLIN;
+               __poll_t pevents = EPOLLIN;
 
                if (shared_count == 0)
-                       pevents |= POLLOUT;
+                       pevents |= EPOLLOUT;
 
                spin_lock_irq(&dmabuf->poll.lock);
                if (dcb->active) {
@@ -228,19 +228,19 @@ retry:
                }
        }
 
-       if ((events & POLLOUT) && shared_count > 0) {
+       if ((events & EPOLLOUT) && shared_count > 0) {
                struct dma_buf_poll_cb_t *dcb = &dmabuf->cb_shared;
                int i;
 
                /* Only queue a new callback if no event has fired yet */
                spin_lock_irq(&dmabuf->poll.lock);
                if (dcb->active)
-                       events &= ~POLLOUT;
+                       events &= ~EPOLLOUT;
                else
-                       dcb->active = POLLOUT;
+                       dcb->active = EPOLLOUT;
                spin_unlock_irq(&dmabuf->poll.lock);
 
-               if (!(events & POLLOUT))
+               if (!(events & EPOLLOUT))
                        goto out;
 
                for (i = 0; i < shared_count; ++i) {
@@ -253,14 +253,14 @@ retry:
                                 *
                                 * call dma_buf_poll_cb and force a recheck!
                                 */
-                               events &= ~POLLOUT;
+                               events &= ~EPOLLOUT;
                                dma_buf_poll_cb(NULL, &dcb->cb);
                                break;
                        }
                        if (!dma_fence_add_callback(fence, &dcb->cb,
                                                    dma_buf_poll_cb)) {
                                dma_fence_put(fence);
-                               events &= ~POLLOUT;
+                               events &= ~EPOLLOUT;
                                break;
                        }
                        dma_fence_put(fence);
@@ -405,7 +405,6 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
                          || !exp_info->ops->map_dma_buf
                          || !exp_info->ops->unmap_dma_buf
                          || !exp_info->ops->release
-                         || !exp_info->ops->map_atomic
                          || !exp_info->ops->map
                          || !exp_info->ops->mmap)) {
                return ERR_PTR(-EINVAL);
@@ -568,7 +567,7 @@ struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
        mutex_lock(&dmabuf->lock);
 
        if (dmabuf->ops->attach) {
-               ret = dmabuf->ops->attach(dmabuf, dev, attach);
+               ret = dmabuf->ops->attach(dmabuf, attach);
                if (ret)
                        goto err_attach;
        }
@@ -687,26 +686,14 @@ EXPORT_SYMBOL_GPL(dma_buf_unmap_attachment);
  *      void \*dma_buf_kmap(struct dma_buf \*, unsigned long);
  *      void dma_buf_kunmap(struct dma_buf \*, unsigned long, void \*);
  *
- *   There are also atomic variants of these interfaces. Like for kmap they
- *   facilitate non-blocking fast-paths. Neither the importer nor the exporter
- *   (in the callback) is allowed to block when using these.
- *
- *   Interfaces::
- *      void \*dma_buf_kmap_atomic(struct dma_buf \*, unsigned long);
- *      void dma_buf_kunmap_atomic(struct dma_buf \*, unsigned long, void \*);
- *
- *   For importers all the restrictions of using kmap apply, like the limited
- *   supply of kmap_atomic slots. Hence an importer shall only hold onto at
- *   max 2 atomic dma_buf kmaps at the same time (in any given process context).
+ *   Implementing the functions is optional for exporters and for importers all
+ *   the restrictions of using kmap apply.
  *
  *   dma_buf kmap calls outside of the range specified in begin_cpu_access are
  *   undefined. If the range is not PAGE_SIZE aligned, kmap needs to succeed on
  *   the partial chunks at the beginning and end but may return stale or bogus
  *   data outside of the range (in these partial chunks).
  *
- *   Note that these calls need to always succeed. The exporter needs to
- *   complete any preparations that might fail in begin_cpu_access.
- *
  *   For some cases the overhead of kmap can be too high, a vmap interface
  *   is introduced. This interface should be used very carefully, as vmalloc
  *   space is a limited resources on many architectures.
@@ -859,41 +846,6 @@ int dma_buf_end_cpu_access(struct dma_buf *dmabuf,
 }
 EXPORT_SYMBOL_GPL(dma_buf_end_cpu_access);
 
-/**
- * dma_buf_kmap_atomic - Map a page of the buffer object into kernel address
- * space. The same restrictions as for kmap_atomic and friends apply.
- * @dmabuf:    [in]    buffer to map page from.
- * @page_num:  [in]    page in PAGE_SIZE units to map.
- *
- * This call must always succeed, any necessary preparations that might fail
- * need to be done in begin_cpu_access.
- */
-void *dma_buf_kmap_atomic(struct dma_buf *dmabuf, unsigned long page_num)
-{
-       WARN_ON(!dmabuf);
-
-       return dmabuf->ops->map_atomic(dmabuf, page_num);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kmap_atomic);
-
-/**
- * dma_buf_kunmap_atomic - Unmap a page obtained by dma_buf_kmap_atomic.
- * @dmabuf:    [in]    buffer to unmap page from.
- * @page_num:  [in]    page in PAGE_SIZE units to unmap.
- * @vaddr:     [in]    kernel space pointer obtained from dma_buf_kmap_atomic.
- *
- * This call must always succeed.
- */
-void dma_buf_kunmap_atomic(struct dma_buf *dmabuf, unsigned long page_num,
-                          void *vaddr)
-{
-       WARN_ON(!dmabuf);
-
-       if (dmabuf->ops->unmap_atomic)
-               dmabuf->ops->unmap_atomic(dmabuf, page_num, vaddr);
-}
-EXPORT_SYMBOL_GPL(dma_buf_kunmap_atomic);
-
 /**
  * dma_buf_kmap - Map a page of the buffer object into kernel address space. The
  * same restrictions as for kmap and friends apply.
@@ -907,6 +859,8 @@ void *dma_buf_kmap(struct dma_buf *dmabuf, unsigned long page_num)
 {
        WARN_ON(!dmabuf);
 
+       if (!dmabuf->ops->map)
+               return NULL;
        return dmabuf->ops->map(dmabuf, page_num);
 }
 EXPORT_SYMBOL_GPL(dma_buf_kmap);