Merge tag 'drm-misc-fixes-2017-11-13' of git://anongit.freedesktop.org/drm/drm-misc...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / msm / msm_drv.c
index 606df7bea97bff05691b41993ee741088c3d86dd..0a3ea3034e39a65f6afefc99c2f0294aa906d94b 100644 (file)
  * - 1.0.0 - initial interface
  * - 1.1.0 - adds madvise, and support for submits with > 4 cmd buffers
  * - 1.2.0 - adds explicit fence support for submit ioctl
+ * - 1.3.0 - adds GMEM_BASE + NR_RINGS params, SUBMITQUEUE_NEW +
+ *           SUBMITQUEUE_CLOSE ioctls, and MSM_INFO_IOVA flag for
+ *           MSM_GEM_INFO ioctl.
  */
 #define MSM_VERSION_MAJOR      1
-#define MSM_VERSION_MINOR      2
+#define MSM_VERSION_MINOR      3
 #define MSM_VERSION_PATCHLEVEL 0
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
@@ -44,7 +47,7 @@ static void msm_fb_output_poll_changed(struct drm_device *dev)
 static const struct drm_mode_config_funcs mode_config_funcs = {
        .fb_create = msm_framebuffer_create,
        .output_poll_changed = msm_fb_output_poll_changed,
-       .atomic_check = msm_atomic_check,
+       .atomic_check = drm_atomic_helper_check,
        .atomic_commit = msm_atomic_commit,
        .atomic_state_alloc = msm_atomic_state_alloc,
        .atomic_state_clear = msm_atomic_state_clear,
@@ -211,7 +214,6 @@ static int msm_drm_uninit(struct device *dev)
        struct drm_device *ddev = platform_get_drvdata(pdev);
        struct msm_drm_private *priv = ddev->dev_private;
        struct msm_kms *kms = priv->kms;
-       struct msm_gpu *gpu = priv->gpu;
        struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
        struct vblank_event *vbl_ev, *tmp;
 
@@ -253,15 +255,6 @@ static int msm_drm_uninit(struct device *dev)
        if (kms && kms->funcs)
                kms->funcs->destroy(kms);
 
-       if (gpu) {
-               mutex_lock(&ddev->struct_mutex);
-               // XXX what do we do here?
-               //pm_runtime_enable(&pdev->dev);
-               gpu->funcs->pm_suspend(gpu);
-               mutex_unlock(&ddev->struct_mutex);
-               gpu->funcs->destroy(gpu);
-       }
-
        if (priv->vram.paddr) {
                unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING;
                drm_mm_takedown(&priv->vram.mm);
@@ -514,24 +507,37 @@ static void load_gpu(struct drm_device *dev)
        mutex_unlock(&init_lock);
 }
 
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static int context_init(struct drm_device *dev, struct drm_file *file)
 {
        struct msm_file_private *ctx;
 
-       /* For now, load gpu on open.. to avoid the requirement of having
-        * firmware in the initrd.
-        */
-       load_gpu(dev);
-
        ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
        if (!ctx)
                return -ENOMEM;
 
+       msm_submitqueue_init(dev, ctx);
+
        file->driver_priv = ctx;
 
        return 0;
 }
 
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+       /* For now, load gpu on open.. to avoid the requirement of having
+        * firmware in the initrd.
+        */
+       load_gpu(dev);
+
+       return context_init(dev, file);
+}
+
+static void context_close(struct msm_file_private *ctx)
+{
+       msm_submitqueue_close(ctx);
+       kfree(ctx);
+}
+
 static void msm_postclose(struct drm_device *dev, struct drm_file *file)
 {
        struct msm_drm_private *priv = dev->dev_private;
@@ -542,7 +548,7 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
                priv->lastctx = NULL;
        mutex_unlock(&dev->struct_mutex);
 
-       kfree(ctx);
+       context_close(ctx);
 }
 
 static void msm_lastclose(struct drm_device *dev)
@@ -737,16 +743,27 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
        struct msm_drm_private *priv = dev->dev_private;
        struct drm_msm_wait_fence *args = data;
        ktime_t timeout = to_ktime(args->timeout);
+       struct msm_gpu_submitqueue *queue;
+       struct msm_gpu *gpu = priv->gpu;
+       int ret;
 
        if (args->pad) {
                DRM_ERROR("invalid pad: %08x\n", args->pad);
                return -EINVAL;
        }
 
-       if (!priv->gpu)
+       if (!gpu)
                return 0;
 
-       return msm_wait_fence(priv->gpu->fctx, args->fence, &timeout, true);
+       queue = msm_submitqueue_get(file->driver_priv, args->queueid);
+       if (!queue)
+               return -ENOENT;
+
+       ret = msm_wait_fence(gpu->rb[queue->prio]->fctx, args->fence, &timeout,
+               true);
+
+       msm_submitqueue_put(queue);
+       return ret;
 }
 
 static int msm_ioctl_gem_madvise(struct drm_device *dev, void *data,
@@ -787,6 +804,28 @@ unlock:
        return ret;
 }
 
+
+static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
+               struct drm_file *file)
+{
+       struct drm_msm_submitqueue *args = data;
+
+       if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
+               return -EINVAL;
+
+       return msm_submitqueue_create(dev, file->driver_priv, args->prio,
+               args->flags, &args->id);
+}
+
+
+static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
+               struct drm_file *file)
+{
+       u32 id = *(u32 *) data;
+
+       return msm_submitqueue_remove(file->driver_priv, id);
+}
+
 static const struct drm_ioctl_desc msm_ioctls[] = {
        DRM_IOCTL_DEF_DRV(MSM_GET_PARAM,    msm_ioctl_get_param,    DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(MSM_GEM_NEW,      msm_ioctl_gem_new,      DRM_AUTH|DRM_RENDER_ALLOW),
@@ -796,6 +835,8 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
        DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT,   msm_ioctl_gem_submit,   DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE,   msm_ioctl_wait_fence,   DRM_AUTH|DRM_RENDER_ALLOW),
        DRM_IOCTL_DEF_DRV(MSM_GEM_MADVISE,  msm_ioctl_gem_madvise,  DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW,   msm_ioctl_submitqueue_new,   DRM_AUTH|DRM_RENDER_ALLOW),
+       DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_AUTH|DRM_RENDER_ALLOW),
 };
 
 static const struct vm_operations_struct vm_ops = {