Merge tag 'drm-next-5.5-2019-11-01' of git://people.freedesktop.org/~agd5f/linux...
[sfrench/cifs-2.6.git] / drivers / gpu / drm / scheduler / sched_main.c
index f39b97ed4ade4db6730fb5a31029c16701fd1d7d..2af64459b3d77023c0554a89e950a57d0b526e6e 100644 (file)
@@ -632,43 +632,41 @@ static void drm_sched_process_job(struct dma_fence *f, struct dma_fence_cb *cb)
 }
 
 /**
- * drm_sched_cleanup_jobs - destroy finished jobs
+ * drm_sched_get_cleanup_job - fetch the next finished job to be destroyed
  *
  * @sched: scheduler instance
  *
- * Remove all finished jobs from the mirror list and destroy them.
+ * Returns the next finished job from the mirror list (if there is one)
+ * ready for it to be destroyed.
  */
-static void drm_sched_cleanup_jobs(struct drm_gpu_scheduler *sched)
+static struct drm_sched_job *
+drm_sched_get_cleanup_job(struct drm_gpu_scheduler *sched)
 {
+       struct drm_sched_job *job;
        unsigned long flags;
 
        /* Don't destroy jobs while the timeout worker is running */
        if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
            !cancel_delayed_work(&sched->work_tdr))
-               return;
-
+               return NULL;
 
-       while (!list_empty(&sched->ring_mirror_list)) {
-               struct drm_sched_job *job;
+       spin_lock_irqsave(&sched->job_list_lock, flags);
 
-               job = list_first_entry(&sched->ring_mirror_list,
+       job = list_first_entry_or_null(&sched->ring_mirror_list,
                                       struct drm_sched_job, node);
-               if (!dma_fence_is_signaled(&job->s_fence->finished))
-                       break;
 
-               spin_lock_irqsave(&sched->job_list_lock, flags);
+       if (job && dma_fence_is_signaled(&job->s_fence->finished)) {
                /* remove job from ring_mirror_list */
                list_del_init(&job->node);
-               spin_unlock_irqrestore(&sched->job_list_lock, flags);
-
-               sched->ops->free_job(job);
+       } else {
+               job = NULL;
+               /* queue timeout for next job */
+               drm_sched_start_timeout(sched);
        }
 
-       /* queue timeout for next job */
-       spin_lock_irqsave(&sched->job_list_lock, flags);
-       drm_sched_start_timeout(sched);
        spin_unlock_irqrestore(&sched->job_list_lock, flags);
 
+       return job;
 }
 
 /**
@@ -708,12 +706,19 @@ static int drm_sched_main(void *param)
                struct drm_sched_fence *s_fence;
                struct drm_sched_job *sched_job;
                struct dma_fence *fence;
+               struct drm_sched_job *cleanup_job = NULL;
 
                wait_event_interruptible(sched->wake_up_worker,
-                                        (drm_sched_cleanup_jobs(sched),
+                                        (cleanup_job = drm_sched_get_cleanup_job(sched)) ||
                                         (!drm_sched_blocked(sched) &&
                                          (entity = drm_sched_select_entity(sched))) ||
-                                        kthread_should_stop()));
+                                        kthread_should_stop());
+
+               if (cleanup_job) {
+                       sched->ops->free_job(cleanup_job);
+                       /* queue timeout for next job */
+                       drm_sched_start_timeout(sched);
+               }
 
                if (!entity)
                        continue;