Merge tag 'tilcdc-4.15-fixes' of https://github.com/jsarha/linux into drm-next
[sfrench/cifs-2.6.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_ctx.c
index 75c933b1a4326532a76d023cbaa242b84bbed015..c184468e2b2b31cc196c9494e94ce31538091ae4 100644 (file)
  */
 
 #include <drm/drmP.h>
+#include <drm/drm_auth.h>
 #include "amdgpu.h"
+#include "amdgpu_sched.h"
 
-static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
+static int amdgpu_ctx_priority_permit(struct drm_file *filp,
+                                     enum amd_sched_priority priority)
+{
+       /* NORMAL and below are accessible by everyone */
+       if (priority <= AMD_SCHED_PRIORITY_NORMAL)
+               return 0;
+
+       if (capable(CAP_SYS_NICE))
+               return 0;
+
+       if (drm_is_current_master(filp))
+               return 0;
+
+       return -EACCES;
+}
+
+static int amdgpu_ctx_init(struct amdgpu_device *adev,
+                          enum amd_sched_priority priority,
+                          struct drm_file *filp,
+                          struct amdgpu_ctx *ctx)
 {
        unsigned i, j;
        int r;
 
+       if (priority < 0 || priority >= AMD_SCHED_PRIORITY_MAX)
+               return -EINVAL;
+
+       r = amdgpu_ctx_priority_permit(filp, priority);
+       if (r)
+               return r;
+
        memset(ctx, 0, sizeof(*ctx));
        ctx->adev = adev;
        kref_init(&ctx->refcount);
@@ -39,19 +67,24 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev, struct amdgpu_ctx *ctx)
        if (!ctx->fences)
                return -ENOMEM;
 
+       mutex_init(&ctx->lock);
+
        for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
                ctx->rings[i].sequence = 1;
                ctx->rings[i].fences = &ctx->fences[amdgpu_sched_jobs * i];
        }
 
        ctx->reset_counter = atomic_read(&adev->gpu_reset_counter);
+       ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
+       ctx->init_priority = priority;
+       ctx->override_priority = AMD_SCHED_PRIORITY_UNSET;
 
        /* create context entity for each ring */
        for (i = 0; i < adev->num_rings; i++) {
                struct amdgpu_ring *ring = adev->rings[i];
                struct amd_sched_rq *rq;
 
-               rq = &ring->sched.sched_rq[AMD_SCHED_PRIORITY_NORMAL];
+               rq = &ring->sched.sched_rq[priority];
 
                if (ring == &adev->gfx.kiq.ring)
                        continue;
@@ -96,10 +129,14 @@ static void amdgpu_ctx_fini(struct amdgpu_ctx *ctx)
                                      &ctx->rings[i].entity);
 
        amdgpu_queue_mgr_fini(adev, &ctx->queue_mgr);
+
+       mutex_destroy(&ctx->lock);
 }
 
 static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
                            struct amdgpu_fpriv *fpriv,
+                           struct drm_file *filp,
+                           enum amd_sched_priority priority,
                            uint32_t *id)
 {
        struct amdgpu_ctx_mgr *mgr = &fpriv->ctx_mgr;
@@ -117,8 +154,9 @@ static int amdgpu_ctx_alloc(struct amdgpu_device *adev,
                kfree(ctx);
                return r;
        }
+
        *id = (uint32_t)r;
-       r = amdgpu_ctx_init(adev, ctx);
+       r = amdgpu_ctx_init(adev, priority, filp, ctx);
        if (r) {
                idr_remove(&mgr->ctx_handles, *id);
                *id = 0;
@@ -193,6 +231,7 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 {
        int r;
        uint32_t id;
+       enum amd_sched_priority priority;
 
        union drm_amdgpu_ctx *args = data;
        struct amdgpu_device *adev = dev->dev_private;
@@ -200,10 +239,16 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
 
        r = 0;
        id = args->in.ctx_id;
+       priority = amdgpu_to_sched_priority(args->in.priority);
+
+       /* For backwards compatibility reasons, we need to accept
+        * ioctls with garbage in the priority field */
+       if (priority == AMD_SCHED_PRIORITY_INVALID)
+               priority = AMD_SCHED_PRIORITY_NORMAL;
 
        switch (args->in.op) {
        case AMDGPU_CTX_OP_ALLOC_CTX:
-               r = amdgpu_ctx_alloc(adev, fpriv, &id);
+               r = amdgpu_ctx_alloc(adev, fpriv, filp, priority, &id);
                args->out.alloc.ctx_id = id;
                break;
        case AMDGPU_CTX_OP_FREE_CTX:
@@ -256,12 +301,8 @@ int amdgpu_ctx_add_fence(struct amdgpu_ctx *ctx, struct amdgpu_ring *ring,
 
        idx = seq & (amdgpu_sched_jobs - 1);
        other = cring->fences[idx];
-       if (other) {
-               signed long r;
-               r = dma_fence_wait_timeout(other, true, MAX_SCHEDULE_TIMEOUT);
-               if (r < 0)
-                       return r;
-       }
+       if (other)
+               BUG_ON(!dma_fence_is_signaled(other));
 
        dma_fence_get(fence);
 
@@ -305,6 +346,51 @@ struct dma_fence *amdgpu_ctx_get_fence(struct amdgpu_ctx *ctx,
        return fence;
 }
 
+void amdgpu_ctx_priority_override(struct amdgpu_ctx *ctx,
+                                 enum amd_sched_priority priority)
+{
+       int i;
+       struct amdgpu_device *adev = ctx->adev;
+       struct amd_sched_rq *rq;
+       struct amd_sched_entity *entity;
+       struct amdgpu_ring *ring;
+       enum amd_sched_priority ctx_prio;
+
+       ctx->override_priority = priority;
+
+       ctx_prio = (ctx->override_priority == AMD_SCHED_PRIORITY_UNSET) ?
+                       ctx->init_priority : ctx->override_priority;
+
+       for (i = 0; i < adev->num_rings; i++) {
+               ring = adev->rings[i];
+               entity = &ctx->rings[i].entity;
+               rq = &ring->sched.sched_rq[ctx_prio];
+
+               if (ring->funcs->type == AMDGPU_RING_TYPE_KIQ)
+                       continue;
+
+               amd_sched_entity_set_rq(entity, rq);
+       }
+}
+
+int amdgpu_ctx_wait_prev_fence(struct amdgpu_ctx *ctx, unsigned ring_id)
+{
+       struct amdgpu_ctx_ring *cring = &ctx->rings[ring_id];
+       unsigned idx = cring->sequence & (amdgpu_sched_jobs - 1);
+       struct dma_fence *other = cring->fences[idx];
+
+       if (other) {
+               signed long r;
+               r = dma_fence_wait_timeout(other, false, MAX_SCHEDULE_TIMEOUT);
+               if (r < 0) {
+                       DRM_ERROR("Error (%ld) waiting for fence!\n", r);
+                       return r;
+               }
+       }
+
+       return 0;
+}
+
 void amdgpu_ctx_mgr_init(struct amdgpu_ctx_mgr *mgr)
 {
        mutex_init(&mgr->lock);