drm/msm: Show process names in gem_describe
authorRob Clark <robdclark@chromium.org>
Mon, 17 Aug 2020 22:01:45 +0000 (15:01 -0700)
committerRob Clark <robdclark@chromium.org>
Sat, 12 Sep 2020 17:48:32 +0000 (10:48 -0700)
In $debugfs/gem we already show any vma(s) associated with an object.
Also show process names if the vma's address space is a per-process
address space.

Signed-off-by: Rob Clark <robdclark@chromium.org>
Reviewed-by: Jordan Crouse <jcrouse@codeaurora.org>
Reviewed-by: Bjorn Andersson <bjorn.andersson@linaro.org>
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_gem.h
drivers/gpu/drm/msm/msm_gem_vma.c
drivers/gpu/drm/msm/msm_gpu.c
drivers/gpu/drm/msm/msm_gpu.h

index e460c78e4d79745301c4dea6c54fe942120eff43..abf5799d9a22bbe1cc9c3fea57d3dddf96bcd05b 100644 (file)
@@ -589,7 +589,7 @@ static int context_init(struct drm_device *dev, struct drm_file *file)
        kref_init(&ctx->ref);
        msm_submitqueue_init(dev, ctx);
 
-       ctx->aspace = msm_gpu_create_private_address_space(priv->gpu);
+       ctx->aspace = msm_gpu_create_private_address_space(priv->gpu, current);
        file->driver_priv = ctx;
 
        return 0;
index 3cb7aeb93fd3b78ddd5e1c1de4ba8189012343e2..76a6c5271e578c36d0c5d56c06bc3f90f8486f7e 100644 (file)
@@ -842,11 +842,28 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
 
                seq_puts(m, "      vmas:");
 
-               list_for_each_entry(vma, &msm_obj->vmas, list)
-                       seq_printf(m, " [%s: %08llx,%s,inuse=%d]",
-                               vma->aspace != NULL ? vma->aspace->name : NULL,
-                               vma->iova, vma->mapped ? "mapped" : "unmapped",
+               list_for_each_entry(vma, &msm_obj->vmas, list) {
+                       const char *name, *comm;
+                       if (vma->aspace) {
+                               struct msm_gem_address_space *aspace = vma->aspace;
+                               struct task_struct *task =
+                                       get_pid_task(aspace->pid, PIDTYPE_PID);
+                               if (task) {
+                                       comm = kstrdup(task->comm, GFP_KERNEL);
+                               } else {
+                                       comm = NULL;
+                               }
+                               name = aspace->name;
+                       } else {
+                               name = comm = NULL;
+                       }
+                       seq_printf(m, " [%s%s%s: aspace=%p, %08llx,%s,inuse=%d]",
+                               name, comm ? ":" : "", comm ? comm : "",
+                               vma->aspace, vma->iova,
+                               vma->mapped ? "mapped" : "unmapped",
                                vma->inuse);
+                       kfree(comm);
+               }
 
                seq_puts(m, "\n");
        }
index 9c573c4269cb5f0a69d2acd84b34a4337ff3ce68..7b1c7a5f8eef4ab8144fb668b2b4ce7943051313 100644 (file)
@@ -24,6 +24,11 @@ struct msm_gem_address_space {
        spinlock_t lock; /* Protects drm_mm node allocation/removal */
        struct msm_mmu *mmu;
        struct kref kref;
+
+       /* For address spaces associated with a specific process, this
+        * will be non-NULL:
+        */
+       struct pid *pid;
 };
 
 struct msm_gem_vma {
index 29cc1305cf37de4951274962b3771dfc105c9c1d..80a8a266d68f95c8111f49cd931e29f5efc5f04f 100644 (file)
@@ -17,6 +17,7 @@ msm_gem_address_space_destroy(struct kref *kref)
        drm_mm_takedown(&aspace->mm);
        if (aspace->mmu)
                aspace->mmu->funcs->destroy(aspace->mmu);
+       put_pid(aspace->pid);
        kfree(aspace);
 }
 
index 9455c0b713e44a1ff858411f4795711f8db3b25d..29c8d73c9a039c5f43a80552fa4935e677c49e34 100644 (file)
@@ -829,10 +829,9 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
 
 /* Return a new address space for a msm_drm_private instance */
 struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu)
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task)
 {
        struct msm_gem_address_space *aspace = NULL;
-
        if (!gpu)
                return NULL;
 
@@ -840,8 +839,11 @@ msm_gpu_create_private_address_space(struct msm_gpu *gpu)
         * If the target doesn't support private address spaces then return
         * the global one
         */
-       if (gpu->funcs->create_private_address_space)
+       if (gpu->funcs->create_private_address_space) {
                aspace = gpu->funcs->create_private_address_space(gpu);
+               if (!IS_ERR(aspace))
+                       aspace->pid = get_pid(task_pid(task));
+       }
 
        if (IS_ERR_OR_NULL(aspace))
                aspace = msm_gem_address_space_get(gpu->aspace);
index 04a2f7539712fc83677389a3d2e528ca74d76993..5ee358b480e6e72f2fac467f618493beaf0ba0e3 100644 (file)
@@ -301,7 +301,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
                const char *name, struct msm_gpu_config *config);
 
 struct msm_gem_address_space *
-msm_gpu_create_private_address_space(struct msm_gpu *gpu);
+msm_gpu_create_private_address_space(struct msm_gpu *gpu, struct task_struct *task);
 
 void msm_gpu_cleanup(struct msm_gpu *gpu);