From: Lucas Stach Date: Fri, 25 May 2018 14:51:25 +0000 (+0200) Subject: drm/etnaviv: protect sched job submission with fence mutex X-Git-Tag: 4.19-rc-smb3~62^2~9^2~2 X-Git-Url: http://git.samba.org/samba.git/?p=sfrench%2Fcifs-2.6.git;a=commitdiff_plain;h=a0780bb1df60f00e4573db7bd53e7039e9eee1cb drm/etnaviv: protect sched job submission with fence mutex The documentation of drm_sched_job_init and drm_sched_entity_push_job has been clarified. Both functions should be called under a shared lock, to avoid jobs getting pushed into the scheduler queue in a different order than their sched_fence seqnos, which will confuse checks that are looking at the seqnos to infer information about completion order. Signed-off-by: Lucas Stach --- diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c index 46ecd3e66ac9..983e67f19e45 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c @@ -388,9 +388,9 @@ static void submit_cleanup(struct kref *kref) dma_fence_put(submit->in_fence); if (submit->out_fence) { /* first remove from IDR, so fence can not be found anymore */ - mutex_lock(&submit->gpu->fence_idr_lock); + mutex_lock(&submit->gpu->fence_lock); idr_remove(&submit->gpu->fence_idr, submit->out_fence_id); - mutex_unlock(&submit->gpu->fence_idr_lock); + mutex_unlock(&submit->gpu->fence_lock); dma_fence_put(submit->out_fence); } kfree(submit->pmrs); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c index 686f6552db48..18c2224ba0b8 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c @@ -1733,7 +1733,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev) gpu->dev = &pdev->dev; mutex_init(&gpu->lock); - mutex_init(&gpu->fence_idr_lock); + mutex_init(&gpu->fence_lock); /* Map registers: */ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h index 90f17ff7888e..9a75a6937268 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h +++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h @@ -118,7 +118,7 @@ struct etnaviv_gpu { u32 idle_mask; /* Fencing support */ - struct mutex fence_idr_lock; + struct mutex fence_lock; struct idr fence_idr; u32 next_fence; u32 active_fence; diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c index 50d6b88cb7aa..b267d9c4d91c 100644 --- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c +++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c @@ -140,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = { int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity, struct etnaviv_gem_submit *submit) { - int ret; + int ret = 0; + + /* + * Hold the fence lock across the whole operation to avoid jobs being + * pushed out of order with regard to their sched fence seqnos as + * allocated in drm_sched_job_init. + */ + mutex_lock(&submit->gpu->fence_lock); ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched, sched_entity, submit->cmdbuf.ctx); if (ret) - return ret; + goto out_unlock; submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished); - mutex_lock(&submit->gpu->fence_idr_lock); submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr, submit->out_fence, 0, INT_MAX, GFP_KERNEL); - mutex_unlock(&submit->gpu->fence_idr_lock); - if (submit->out_fence_id < 0) - return -ENOMEM; + if (submit->out_fence_id < 0) { + ret = -ENOMEM; + goto out_unlock; + } /* the scheduler holds on to the job now */ kref_get(&submit->refcount); drm_sched_entity_push_job(&submit->sched_job, sched_entity); - return 0; +out_unlock: + mutex_unlock(&submit->gpu->fence_lock); + + return ret; } int etnaviv_sched_init(struct etnaviv_gpu *gpu)